Merge pull request #102 from terminalforlife/master

Code, Formatting Tweaks, Some Fixes, Headers, etc
pull/104/head
Igor Chubin 4 years ago committed by GitHub
commit a6abb2e889
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,15 +1,14 @@
# airport
#
# Wireless network configuration utility.
# Show current wireless status information:
# Show current wireless status information.
airport -I
# Sniff wireless traffic on channel 1:
# Sniff wireless traffic on channel 1.
airport sniff 1
# Scan for available wireless networks:
# Scan for available wireless networks.
airport -s
# Disassociate from current airport network:
sudo airport -z
# Disassociate from current airport network.
airport -z

@ -1,23 +1,24 @@
# apk is the tool used to install, upgrade, or delete software on a running system
# of Alpine Linux package management
# apk
# Package management software for use in Alpine Linux
# Install a package
apk add $package
# Install a package, or upgrade an existing one.
apk add [PKG]
# Remove a package
apk del $package
apk del [PKG]
# Update repos
# Update index of available packages.
apk update
# Upgrade all packages
# Upgrade all installed packages.
apk upgrade
# Find a package
apk search $package
# Find package, using glob pattern matching.
apk search [PKG]
# To list all installed packages, use:
# List all installed packages.
apk info
# To determine which package a file belongs to:
apk info --who-owns /sbin/lbu
# Determine to which package a file belongs, akin to `dpkg -S [PATH]` in
# Debian- and Ubuntu-based distributions of Linux.
apk info --who-owns [PATH]

@ -1,9 +1,12 @@
# Search for package PKG. Both package names and their descriptions are searched
# for a REGEX match; to avoid this behavior, you may use the `-n` flag, which will
# only look for a match in the package name.
# apt-cache
# Query the APT cache
# Search for package PKG. Both package names and their descriptions are
# searched for a REGEX match; to avoid this behavior, you may use the `-n`
# flag, which will only look for a match in the package name.
apt-cache search 'PKG'
# Regarding the above, although multiple package names may not be specified, it's
# possible to use ERE to easily and quickly get around this limitation. Here, all
# 3 packages (PKG1, PKG2, and PKG3) will be sought.
# Regarding the above, although multiple package names may not be specified, -
# it's possible to use ERE to easily and quickly get around this limitation.
# Here, all 3 packages (PKG1, PKG2, and PKG3) will be sought.
apt-cache search '(PKG1|PKG2|PKG3)'

@ -1,17 +1,20 @@
# Update the local database of available packages, as discovered from package index
# file found in their sources. This does not actually update your installed
# software! For that, keeping reading.
# apt-get
# Command-line interface to APT package management
# Update the local database of available packages, as discovered from package
# index file found in their sources. This does not actually update your
# installed software! For that, keeping reading.
apt-get update
# Upgrade installed packages, but there may be exceptions, such as important kernel
# packages. Also, packages will not be removed, like if they're deprecated, with
# this method.
# Upgrade installed packages, but there may be exceptions, such as important
# kernel packages. Also, packages will not be removed, like if they're
# deprecated, with this method.
apt-get upgrade
# Unlike the above, this will upgrade all of the installed packages, and perform
# other actions required for a successful and thorough upgrade. This will also
# allow for upgrading to the next minor release of your distributions, such as from
# Ubuntu '16.04.1' to '16.04.2'.
# Unlike the above, this will upgrade all of the installed packages, and
# perform other actions required for a successful and thorough upgrade. This
# will also allow for upgrading to the next minor release of your
# distributions, such as from Ubuntu '16.04.1' to '16.04.2'.
apt-get dist-upgrade
# Clean out (completely) the follow locations of saved DEB files:
@ -26,22 +29,23 @@ apt-get clean -s
# View the changelog for the firefox package. Useful prior to or after upgrade.
apt-get changelog firefox
# Download PKG (one or more) without actually installing or extracting them. A good
# use for this might be to upgrade an offline system, by downloading the packages
# on a system using an Internet-able machine. Files are downloaded into the CWD.
# Download PKG (one or more) without actually installing or extracting them. A
# good use for this might be to upgrade an offline system, by downloading the
# packages on a system using an Internet-able machine. Files are downloaded
# into the CWD.
apt-get download PKG
# Install PKG (one or more), bringing in dependencies and, provided settings allow
# it, install recommended and/or suggested packages.
# Install PKG (one or more), bringing in dependencies and, provided settings
# allow it, install recommended and/or suggested packages.
apt-get install PKG
# At times, dependencies won't be installed, yet you still need them; the following
# command will often fix this, and is usually suggested to the user.
# At times, dependencies won't be installed, yet you still need them; the
# following command will often fix this, and is usually suggested to the user.
apt-get -f install
# Remove PKG, while also purging system-wide configuration files for it.
apt-get purge PKG
# Alternative syntax:
# Alternative syntax to the above.
apt-get remove --purge PKG
# Often used to first update the local database of packages, then, only if
@ -50,4 +54,5 @@ apt-get update && apt-get dist-upgrade
# Download specified package (firefox, in this example) and all packages marked
# thereby as important or dependencies. Files are downloaded into the CWD.
apt-get download firefox `apt-cache --important depends firefox | awk '{if(NR>1){printf("%s ", $2)}}'`
apt-get download firefox `apt-cache --important depends firefox |
awk '{if(NR>1){printf("%s ", $2)}}'`

@ -1,2 +1,5 @@
# List trusted GPG keys for APT.
# apt-key
# Command-line APT key management utility
# List APT keys marked as trusted.
apt-key list

@ -1,3 +1,6 @@
# aspell
# Interactive spell-checker for use in a terminal
# Spell check a single file.
aspell check [FILE]

@ -1,49 +1,82 @@
# sum integers from a file or STDIN, one integer per line:
printf '1\n2\n3\n' | awk '{ sum += $1} END {print sum}'
# awk
# Pattern scanning and processing language
# using specific character as separator to sum integers from a file or STDIN
# Sum integers from a file or STDIN, one integer per line.
printf '1\n2\n3\n' | awk '{sum += $1} END {print sum}'
# Using specific character as separator to sum integers from a file or STDIN.
printf '1:2:3' | awk -F ":" '{print $1+$2+$3}'
# print a multiplication table
seq 9 | sed 'H;g' | awk -v RS='' '{for(i=1;i<=NF;i++)printf("%dx%d=%d%s", i, NR, i*NR, i==NR?"\n":"\t")}'
# Print a multiplication table.
awk -v RS='' '
{
for(i=1;i<=NF;i++){
printf("%dx%d=%d%s", i, NR, i*NR, i==NR?"\n":"\t")
}
}
' <<< "$(seq 9 | sed 'H;g')"
# Specify output separator character
# Specify output separator character.
printf '1 2 3' | awk 'BEGIN {OFS=":"}; {print $1,$2,$3}'
# search for a paragraph containing string
# Search paragraph for the given REGEX match.
awk -v RS='' '/42B/' file
# display only first column from multi-column text
echo "first-column second-column third-column" | awk '{print $1}'
# Display only first field in text taken from STDIN.
echo 'Field_1 Field_2 Field_3' | awk '{print $1}'
# Note that in this case, you're far better off using cut(1).
# Use AWK solo; without the need for something via STDIN.
awk BEGIN'{printf("Example text.\n")}'
awk 'BEGIN {print("Example text.")}'
# Accessing environment variables from within AWK.
awk 'BEGIN{print ENVIRON["LS_COLORS"]}'
# Access environment variables from within AWK.
awk 'BEGIN {print ENVIRON["LS_COLORS"]}'
# One method to count the number of lines; in this case, read from STDIN.
free | awk '{i++} END{print i}'
# Count number of lines taken from STDIN.
free | awk '{L++} END {print(L)}'
# Cleaner, more efficient approach to the above.
free | awk 'END {print(NR)}'
# Output unique list of available sections under which to create a DEB package.
awk '!A[$1]++{print($1)}' <<< "$(dpkg-query --show -f='${Section}\n')"
awk '!A[$1]++ {print($1)}' <<< "$(dpkg-query --show -f='${Section}\n')"
# Using process substitution (`<()` is NOT command substitution), with AWK and
# its associative array variables, we can print just column 2 for lines whose
# first column is equal to what's between the double-quotes.
awk '{NR!=1&&A[$1]=$2} END{print(A["Mem:"])}' <(free -h)
awk '{NR != 1 && A[$1]=$2} END {print(A["Mem:"])}' <(free -h)
# While below is an easier and simpler solution to the above, it's not at all
# the same, and in other cases, the above is definitely preferable.
awk '/^Mem:/{print($2)}' <(free -h)
awk '/^Mem:/ {print($2)}' <(free -h)
# Output list of unique uppercase-only, sigil-omitted variables used in [FILE].
awk '{for(F=0; F<NF; F++){if($F~/^\$[A-Z_]+$/){A[$F]++}}} END{for(I in A){X=substr(I, 2, length(I)); printf("%s\n", X)}}' [FILE]
awk '
{
for(F=0; F<NF; F++){
if($F~/^\$[A-Z_]+$/){
A[$F]++
}
}
}
END {
for(I in A){
X=substr(I, 2, length(I))
printf("%s\n", X)
}
}
' [FILE]
# Output only lines from FILE between PATTERN_1 and PATTERN_2. Good for logs.
awk '/PATTERN_1/,/PATTERN_2/{print}' [FILE]
awk '/PATTERN_1/,/PATTERN_2/ {print}' [FILE]
# Pretty-print a table of an overview of the non-system users on the system.
awk -SPF ':' 'BEGIN {printf("%-17s %-4s %-4s %-s\n", "NAME", "UID", "GID", "SHELL")} $3>=1000 && $1!="nobody" {printf("%-17s %-d %-d %-s\n", $1, $3, $4, $7)}' /etc/passwd
awk -F ':' '
BEGIN {
printf("%-17s %-4s %-4s %-s\n", "NAME", "UID", "GID", "SHELL")
}
$3 >= 1000 && $1 != "nobody" {
printf("%-17s %-d %-d %-s\n", $1, $3, $4, $7)
}
' /etc/passwd
# Display the total amount of MiB of RAM available in the machine. This is also
# a painful but useful workaround to get the units comma-separated, as would be

@ -1,38 +1,43 @@
# install aws cli
# aws
# ???
# Install AWS CLI, using a Python package installer. This is available in the
# Ubuntu 16.04 repositories, so is probably available in other distributions'
# repositories, therefore I'd recommend using that unless otherwise required.
pip install awscli
# configure AWS CLI
# Configure AWS CLI.
aws configure
# describe instances in the current account
# Describe instances in the current account.
aws ec2 describe-instances --instance-ids i-01234567
# list public IP addresses of instances
# List public IP addresses of instances.
aws ec2 describe-instances \
--query "Reservations[*].Instances[*].PublicIpAddress" \
--output=text
# start instance with the specified id
# Start instance with the specified ID.
aws ec2 start-instances --instance-ids i-12345678c
# copy directory to S3
# Copy directory to S3.
aws s3 cp ${directory} s3://${bucket}/${directory} --recursive
# sync directory with S3
# Sync directory with S3.
aws s3 sync ${directory} s3://${bucket}/${directory} --exclude *.tmp
# list s3 buckets
# List S3 buckets.
aws s3 ls
# remove s3 bucket
# Remove S3 bucket.
aws s3 rb --force s3://${bucket_name}
# get bucket logging
# Get bucket logging.
aws s3api get-bucket-logging --bucket ${bucket_name}
# AWS cloudformation list stacks
# AWS cloudformation list stacks.
aws cloudformation list-stacks \
--stack-status-filter [ CREATE_COMPLETE | UPDATE_COMPLETE | etc.. ]
# other useful commands:
# Follow the below link for some other useful commands.
# https://github.com/toddm92/aws/wiki/AWS-CLI-Cheat-Sheet

@ -62,11 +62,11 @@ az disk list --output table
# Copy blob
az storage blob copy start \
--source-uri 'https://md-ldh5nknx2rkz.blob.core.windows.net/jzwuuuzzapn0/abcd?sv=2017-04-17&sr=b&si=68041718-6828-4f5e-9e6e-a1b719975062&sig=XXX' \
--account-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX== \
--account-name destaccount \
--destination-container vms \
--destination-blob DESTINATION-blob.vhd
--source-uri 'https://md-ldh5nknx2rkz.blob.core.windows.net/jzwuuuzzapn0/abcd?sv=2017-04-17&sr=b&si=68041718-6828-4f5e-9e6e-a1b719975062&sig=XXX' \
--account-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX== \
--account-name destaccount \
--destination-container vms \
--destination-blob DESTINATION-blob.vhd
# List virtual networks
az network vnet list --output table

@ -6,23 +6,23 @@ done
# For example:
for CurDay in Monday Tuesday Wednesday Thursday Friday Saturday Sunday
do
printf "%s\n" "$CurDay"
printf "%s\n" "$CurDay"
done
# To implement a case statement:
case $1 in
0)
echo "Found a '0'." ;;
echo "Found a '0'." ;;
1)
echo "Found a '1'." ;;
echo "Found a '1'." ;;
2)
echo "Found a '2'." ;;
echo "Found a '2'." ;;
3*)
echo "Something beginning with '3' found." ;;
'')
echo "Nothing (null) found." ;;
*)
echo "Anything else found." ;;
echo "Something beginning with '3' found." ;;
'')
echo "Nothing (null) found." ;;
*)
echo "Anything else found." ;;
esac
# Turn on built-in Bash debugging output:
@ -67,8 +67,8 @@ command -v ${program} >/dev/null 2>&1 || error "${program} not installed"
# However, that is a solution commonly found in a script using the Bourne shell, so
# in this case, an alternative, Bash-like, and more accurate version could be:
if ! type -fP bash > /dev/null 2>&1; then
printf "ERROR: Dependency 'bash' not met." >&2
exit 1
printf "ERROR: Dependency 'bash' not met." >&2
exit 1
fi
# Send both STDOUT and STDERR from COMMAND to FILE. The `2>&1` must go at the end.

@ -47,12 +47,13 @@ UUID=<blkid#> /opt defaults 0 0 # 00 disable disk checking and metadata dumping
mount -a
df -h /opt
# Extending a physical disk (vm)
1. Unmount drive - comment out /etc/fstab entry for /dev/sdc
2. Increase drive physical space
3. parted /dev/sdc - 'p' #print partitions on /dev/sdc
4. fdisk /dev/sdc - 'p' - 'd' - 'n' (defaults) - w # delete old partition, create new partition
5. reboot
6. e2fsck -f /dev/sdc1
7. resize2fs /dev/sdc1
8. mount /dev/sdc1 - uncomment fstab
# Extending a physical disk (vm). Step 3 prints partitions on `/dev/sdc`. Step
# 4 deletes the old partition, then creates a new one.
1. Unmount drive. Comment out `/etc/fstab` entry for `/dev/sdc`.
2. Increase drive physical space
3. parted /dev/sdc - 'p'
4. fdisk /dev/sdc - 'p' - 'd' - 'n' (defaults) - w
5. reboot
6. e2fsck -f /dev/sdc1
7. resize2fs /dev/sdc1
8. mount /dev/sdc1 - uncomment fstab

@ -1,61 +1,115 @@
# Find files by case-insensitive extension (ex: .jpg, .JPG, .jpG):
# find
# Search for files in a directory hierarchy
# Find files by case-insensitive extension, such as `.jpg`, `.JPG`, & `.jpG`).
# By default, find(1) uses glob pathname pattern matching. To avoid shell
# interpretation, the glob either must be expanded or the string quoted.
find . -iname '*.jpg'
# Find directories:
# Find directories.
find . -type d
# Find files:
# Find files. Specifically files; not directories, links, FIFOs, etc.
find . -type f
# Find files by octal permission:
# Find files set to the provided octal mode (permissions).
find . -type f -perm 777
# Find files with setuid bit set:
# Find files with setuid bit set, keeping to the same filesystem.
find . -xdev \( -perm -4000 \) -type f -print0 | xargs -0 ls -l
# To find files with extension '.txt' and remove them:
find ./path/ -name '*.txt' -exec rm '{}' \;
# Find files with extension '.txt' and look for a string into them:
# The above is a useful demonstration of some pitfalls into which a user can
# fall, where the below is the above but corrected. Here is why:
#
# * The `.` (current working directory) is assumed when no path is provided.
# * Group syntax (parentheses) was used, but nothing was actually grouped.
# * A lot of people have their ls(1) command aliased in many ways, -
# potentially causing problems with the output and how xargs(1) handles it.
# By escaping the command, we temporarily override any aliases and even
# functions by the same name.
# * At least in my experience, the prior xargs(1) is not as reliable.
# * The `-print0` and `xargs -0` is great, but unnecessary (except when?).
#
# However, it might be more preferred to simply use find(1)'s own `-printf`
# flag, in order to avoid the need for xargs(1) and ls(1), which should be many
# times faster, and allows for more specificity.
find -perm -4000 -type f -print0 | xargs -I '{}' -0 \ls -l '{}'
# Find and remove files with case-senstive extension of `.txt`.
find [PATH] -name '*.txt' -exec rm '{}' \;
# The above is much more efficiently written as shown below, as find(1) has its
# own built-in delete function, not to mention a single rm(1) process was
# previously executed for each file processed, which is comparatively slow.
find [PATH] -name '*.txt' -delete
# Find files with extension '.txt' and look for a string into them.
find ./path/ -name '*.txt' | xargs grep 'string'
# Find files with size bigger than 5 Mb and sort them by size:
# Find files with size bigger than 5 Mb and sort them by size.
find . -size +5M -type f -print0 | xargs -0 ls -Ssh | sort -z
# Find files bigger thank 2 MB and list them:
# Find files bigger thank 2 MB and list them.
find . -type f -size +20000k -exec ls -lh {} \; | awk '{ print $9 ": " $5 }'
# Find files modified more than 7 days ago and list file information
# Alternative, faster approach* to the above.
#
# Why it's faster:
#
# * No need for an external process, like ls(1).
# * The use of `;` with the `-exec` flag executes an ls(1) process for each
# file found, which is comparatively very slow.
# * The `printf` feature is built in and special to awk(1).
#
# That said, awk(1) or gawk(1) is doing a little more here, in order to get
# somewhat of a human-readable file size, but its impact is likely negligible.
find -type f -size +20000k -printf '%s %P\n' |
awk "{printf(\"%'dM %s\n\", \$1 / (1024 * 1024), \$2)}"
# Find files modified more than 7 days ago and list file information.
find . -type f -mtime +7d -ls
# Find symlinks owned by a user and list file information
find . -type l --user=username -ls
# Find symlinks owned by the given user, then list file information.
find -type l -user [NAME] -ls
# The following may be the syntax used on a Mac, however this is not valid on
# Linux, or at least version 4.7.0. All flags in GNU find(1) are one `-` only.
find . -type l --user=[NAME] -ls
# Search for and delete empty directories
# Search for and delete empty directories.
find . -type d -empty -exec rmdir {} \;
# A far more efficient approach to the above. If no path is provided, then the
# current working directory (CWD) is assumed, making the `.` superfluous.
find -type d -empty -delete
# Search for directories named build at a max depth of 2 directories
# Search for directories named `build` at a maximum depth of 2 directories.
# This means that find will not recursively search beyond two levels.
find . -maxdepth 2 -name build -type d
# Search all files who are not in .git directory
find . ! -iwholename '*.git*' -type f
# Search all files which are not in a `.git` directory. Depending on the shell
# used, the bang (`!`) may need to be escaped, to avoid shell interpretation.
# Alternatively, although non-POSIX, the `-not` flag can be used.
find . \! -iwholename '*.git*' -type f
# Find all files that have the same node (hard link) as MY_FILE_HERE
find . -type f -samefile MY_FILE_HERE 2>/dev/null
# Find all files that have the same inode (indicating hard link) as FILE. All
# output going to STDERR (typically error messages) will also be redirected to
# `/dev/null`, a special pseudo-file where data is sent to die.
find . -type f -samefile [FILE] 2>/dev/null
# Find all files in the current directory and modify their permissions
# Find all files in the current directory and modify their permissions.
find . -type f -exec chmod 644 {} \;
# Find files with extension '.txt.' and edit all of them with vim
# vim is started only once for all files
# Find files with extension `.txt` and edit all of them with vim(1).
#
# The use of `+` (escaped to avoid shell interpretation) with `-exec` means
# that only one process (in this case, `vim`) per `exec`ution is used. If `;`
# is instead used (would also need escaping), then one `vim` process would be
# used per file.
find . -iname '*.txt' -exec vim {} \+
# Find all files with extension '.png' and rename them by changing extension to
# '.jpg' (base name is preserved)
# Find files with extension `.png`, then rename their extension to `.jpg`. It's
# highly important that `\;` is used here, instead of `\+`, otherwise it'd make
# a right mess of the files, due to the way in which mv(1) works.
find . -type f -iname '*.png' -exec bash -c 'mv "$0" "${0%.*}.jpg"' {} \;
# Use logic and grouping to delete extension-specific files.
find \( -iname "*.jpg" -or -iname "*.sfv" -or -iname "*.xspf" \) -type f -delete
# List all executable files, by basename, found within PATH.
# List executable files, by basename, found within PATH.
find ${PATH//:/ } -type f -executable -printf "%P\n"

@ -19,5 +19,5 @@ go install
# See also:
# Go language cheat sheets at /go/
# list of pages: /go/:list
# learn go: /go/:learn
# learn go: /go/:learn
# search in pages: /go/~keyword

@ -1,18 +1,19 @@
# js
#
# JavaScript often abbreviated as "JS", is a high-level, dynamic, untyped, interpreted run-time language.
# It has been standardized in the ECMAScript language specification.
# JavaScript often abbreviated as "JS", is a high-level, dynamic, untyped, -
# interpreted run-time language. It has been standardized in the ECMAScript
# language specification.
#
# js is a JavaScript shell, part of SpiderMonkey
# to install spidermonkey: (in Debian) apt-get install libmozjs-24-bin
# `js` is a JavaScript shell, part of SpiderMonkey. To install spidermonkey in
# Debian systems, execute: apt-get install libmozjs-24-bin
# Run the shell in interactive mode
# Launch an interactive JavaScript session.
js
# Run the JavaScript code in the file hello.js
# Run the JavaScript code in the file `hello.js`.
js hello.js
# Run hello.js then drop into the interactive shell
# Run the `hello.js` file, then drop into the interactive shell.
js -f hello.js -i
# See also:

@ -1,9 +1,11 @@
# Convert documents to PDF
# libreoffice
# Office productivity suite
# Convert documents to PDF.
libreoffice --headless --convert-to pdf *.pptx
# Save them to a different directory?
# Save PDFs to a different directory?
libreoffice --headless --convert-to pdf *.docx --outdir ~/docs/
# Convert files nested inside folders?
# This uses sharkdp/fd, you could use GNU find, xargs etc.
# Convert files nested inside folders? Uses sharkdp/fd, IE: find(1) & xargs(1).
fd -e doc -e docx -x libreoffice --headless --convert-to pdf --outdir {//} {}

@ -1,17 +1,25 @@
# lua
# A powerful, light-weight embeddable programming language.
# Simple, extensible, embeddable programming language
# You may not have access to the `lua` command, but instead to something like
# `lua5.3` or one of a different version. The following information assumes you
# have the `lua` command, as-is, but alternative commands may also work the
# same or similarly, version allowing.
# Start an interactive Lua shell:
lua
# Execute a Lua script:
lua script_name.lua --optional-argument
# Execute a Lua script.
lua script_name.lua
# Execute a Lua script, with arguments (IE: flags) given to the script itself.
lua script_name.lua [ARGS]
# Execute a Lua expression:
lua -e 'print( "Hello World" )'
# Execute a Lua expression(s), as though executed from within a script.
lua -e 'print("Hello World")'
# All options are handled in order, except -i. For instance, an invocation like
# first set a to 1, then print the value of a (1), and finally run the file script.lua
# All options are handled in order, except `-i`. For instance, an invocation
# like first set a to 1, then print the value of a (1), and finally run the
# file `script.lua`
lua -e'a=1' -e 'print(a)' script.lua
# See also:

@ -1,10 +1,13 @@
# Dump $url, do not show links urls
lynx -dump -nolist $url
# lynx
# Classic non-graphical (CLI) web browser
# Use lynx to run repeating website actions
# For creating your keystroke file, use:
# lynx -cmd_log yourfile
lynx -accept_all_cookies -cmd_script=/your/keystroke-file
# Dump [URL]; do not show link URLs.
lynx -dump -nolist [URL]
# convert html to text
# Creating your keystroke file for use with lynx(1).
lynx -cmd_log [FILE]
# Use above keystroke file to run repeating website actions.
lynx -accept_all_cookies -cmd_script=[FILE]
# Convert HTML from STDIN to text, then dump it to STDOUT.
lynx -force_html -stdin -dump -nolist

@ -1,80 +1,78 @@
# Send a mail with a subject, cc, and an attachment
# mutt
# Text-based mailreader supporting MIME, GPG, PGP and threading
# Send E-Mail with subject, cc, and attachment.
mutt -s "Site Backup" -a backup.tar -c user@example.com user2@example.com
# Send a mail with a blind carbon copy (bcc)
# Send E-Mail with blind carbon copy (bcc).
mutt -s "hello" -b user2@example.com user@example.com
#
# Select (tag) messages: shift+t (T)
#
= (string)
~ (expression)
#
~b expr (message with expr in body)
[b]body
[B]whole message
[f]rom
[c]c'ed
[C]c'ed or to
[h]eader
#
~m from-to (range)
~d from-to (date range)
#
~N (new messages)
~U (unread messages)
~R (read messages)
~A (all messages)
~D (deleted messages)
~p (message to you)
~P (message from you)
~= (duplicated messages)
~$ (unref messages)
#
# Logical operators:
# = (string)
# ~ (expression)
#
AND = ~P ~N (new messages from you)
OR = ~P | ~N (new messages of messages from you)
() = (~= ~$) | ~N
# ~b expr (message with expr in body)
# [b]body
# [B]whole message
# [f]rom
# [c]c'ed
# [C]c'ed or to
# [h]eader
#
#
# DATES
# ~m from-to (range)
# ~d from-to (date range)
#
DD/MM/YY
~d 20/1/95-31/10/95 (from 20/01/95 to 31/10/95)
~d -31/10/95 (before 31/10/95)
~d 20/1/95- (after 20/01/95)
~d 20/1/95 (only 20/01/95)
~d 20/1/95*2w (two weeks around 20/01/95)
[w]eek
[d]day
[m]onth
[y]ear
can use *(around), +(after) or -(before)
#
~d <1m (messages newer than 1 month)
~d >1y (messages older than 1 year)
~d =1d (exact one day)
# ~N (new messages)
# ~U (unread messages)
# ~R (read messages)
# ~A (all messages)
# ~D (deleted messages)
# ~p (message to you)
# ~P (message from you)
# ~= (duplicated messages)
# ~$ (unref messages)
#
# Keys
# Logical operators:
#
shift+d (D) : delete messages using pattern
shift+t (T) : select messages using pattern
shift+u (U) : undelete messages using pattern
ctrl+t : un-selected messages using pattern
# AND = ~P ~N (new messages from you)
# OR = ~P | ~N (new messages of messages from you)
# () = (~= ~$) | ~N
#
# DATES
#
# DD/MM/YY
# ~d 20/1/95-31/10/95 (from 20/01/95 to 31/10/95)
# ~d -31/10/95 (before 31/10/95)
# ~d 20/1/95- (after 20/01/95)
# ~d 20/1/95 (only 20/01/95)
# ~d 20/1/95*2w (two weeks around 20/01/95)
# ^
# [w]eek
# [d]day
# [m]onth
# [y]ear
#
# Can use `*(around)`, `+(after)` or `-(before)`.
#
d : delete message
N : mark as new
C : copy message to another folder(mailbox)
o, O : change sort, reverse sort
ctrl+ D, U : Deleted, Undelete a thread
ctrl+R : mark current thread as read
esc+t : select thread
esc+ v, V : collapse thread, collapse all threads
w,W : add, remove flags
# ~d <1m (messages newer than 1 month)
# ~d >1y (messages older than 1 year)
# ~d =1d (exact one day)
# Keyboard shortcuts.
#
T ~U ;WN ctrl+t .
mark unread msgs, remove its new flag, unselect them
# shift+d (D) : Delete messages using pattern
# shift+t (T) : Select messages using pattern
# shift+u (U) : Un-delete messages using pattern
# ctrl+t : de-selected messages using pattern
# d : Delete message
# N : Mark as new
# C : Copy message to another folder(mailbox)
# o, O : Change sort, reverse sort
# ctrl+ D, U : Deleted, Undelete a thread
# ctrl+R : Mark current thread as read
# esc+t : Select thread
# esc+ v, V : Collapse thread, collapse all threads
# w,W : Add, remove flags
# T ~U ;WN ctrl+t . : Mark unread msgs, remove its new flag, de-select them

@ -1,7 +1,10 @@
# Number lines given to `nl` via STDIN.
# nl
# Number lines of files
# Number lines given to nl(1) via STDIN.
printf "Here\nis\nsome\nexample\ntext." | nl
# Number (all) lines given to `nl` via provided file(s).
# Number (all) lines given to nl(1) via provided file(s).
nl -b a /path/to/file
# The above can also be achieved with `cat`, which is perhaps more portable:
# The above can also be achieved with cat(1), which is perhaps more portable:
cat -n /path/to/file

@ -0,0 +1,4 @@
# nodejs
# Server-side JavaScript
# Duplicate sheet -- please refer instead to: cheat.sh/js

@ -15,8 +15,8 @@ find -name \*.pdf -type f -print0 | xargs -0rn10 rm
find -name \*.pdf | xargs -I{} rm -rf '{}'
# Will show every .pdf like:
# &toto.pdf=
# &titi.pdf=
# &toto.pdf=
# &titi.pdf=
# -n1 => One file by one file. ( -n2 => 2 files by 2 files )
find -name \*.pdf | xargs -I{} -n1 echo '&{}='
# The above is, however, much faster and easier without xargs:

@ -1,14 +1,26 @@
# Show keycodes used by Xorg
# start xev and show only the relevant parts:
xev | awk -F'[ )]+' '/^KeyPress/ { a[NR+2] } NR in a { printf "%-3s %s\n", $5, $8 }'
# xev
# Print contents of X events
# Alternative approach to show keycodes, with standard AWK formatting.
# Start xev(1) and show only the relevant parts.
xev | awk -F'[ )]+' '
/^KeyPress/ {
a[NR+2]
}
NR in a {
printf "%-3s %s\n", $5, $8
}
'
# Alternative approach to showing keycodes.
#
# Note that the use of `<(xev)` is process substitution, which is unavailable
# in the Bourne Shell and its standard derivatives, nor is it available in Bash
# with its own POSIX mode enabled.
awk '
/^KeyPress/ {
A[NR+2]
}
NR in A {
B=substr($7, 0, length($7) - 2)
printf("%3d %s\n", $4, B)
}
/^KeyPress/ {
A[NR+2]
}
NR in A {
B=substr($7, 0, length($7) - 2)
printf("%3d %s\n", $4, B)
}
' <(xev)

@ -1,21 +1,20 @@
# xsel
#
# X11 selection and clipboard manipulation tool.
# Command-line tool to access X clipboard and selection buffers
# Use a command's output as input of the clip[b]oard (equivalent to Ctrl + C):
# Read from STDIN and save it to the clipboard; as if `Ctrl + C`.
echo 123 | xsel -ib
# Use the contents of a file as input of the clipboard:
# A file's contents can also be provided to xsel(1x) via STDIN.
cat file | xsel -ib
# Output the clipboard's contents into the terminal (equivalent to Ctrl + V):
# Send the clipboard's contents to STDOUT; as if `Ctrl + V`.
xsel -ob
# Output the clipboard's contents into a file:
# The contents of the clipboard can be saved to a file(s). Note that the use of
# `>` means that any existing file by the same name will be overwritten. Use
# `>>` to instead append the data to that file.
xsel -ob > file
# Clear the clipboard:
# Clear the clipboard.
xsel -cb
# Output the X11 primary selection's contents into the terminal (equivalent to a mouse middle-click):
# Send X11 primary selection to STDOUT, as if clicking mouse's middle button.
xsel -op

Loading…
Cancel
Save