Month: November 2014

How to trace/profile C function calls in Linux (using gcc, strace, systemtap, sysdig, ktap, lttng)

'-finstrument-functions'
  void __cyg_profile_func_enter (void *this_fn, void *call_site);
  void __cyg_profile_func_exit  (void *this_fn, void *call_site);

$ cat trace.c
#include <stdio.h>
#include <time.h>
static FILE *fp_trace;
/* GCC's constructor/destructor attributes define functions executed before/after main */
void __attribute__ ((constructor)) trace_begin (void) { fp_trace = fopen("trace.out", "w"); }
void __attribute__ ((destructor)) trace_end (void) { if(fp_trace != NULL) fclose(fp_trace); }
/* GCC's instrument-functions */
void __cyg_profile_func_enter (void *func,  void *caller) { 
    if(fp_trace != NULL) fprintf(fp_trace, "e %p %p %lun", func, caller, time(NULL) );
}
void __cyg_profile_func_exit (void *func, void *caller) {
    if(fp_trace != NULL) fprintf(fp_trace, "x %p %p %lun", func, caller, time(NULL));
}

$ gcc -finstrument-functions -g -c -o main.o main.c
$ gcc -c -o trace.o trace.c
$ gcc main.o trace.o -o main
$ ./main
foo
$ cat trace.out
e 0x400679 0x394281c40b 1286372153
e 0x400648 0x40069a 1286372153
(use 'addr2line' to map execution address to source file and line)

from Trace and profile function calls with GCC

strace [OPTIONS] command [args ...]
'-p pid' attach to the process pid and begin tracing
'-f' trace child processes created from fork
$ strace ls
execve("/bin/ls", ["ls"], [/* 21 vars */]) = 0
$ sudo strace -p `pidof firefox`
...

## filter
'-e trace=set,set' filter by syscall set (all,open,close,read,write,file,open,stat,chmod,unlink,process,network,signal,ipc)
'-e signal=set' filter by signal (all,SIGIO,...)
'-e read/write=number' filter by file descriptor number
$ strace -e trace=open,read ls
open("/etc/ld.so.cache", O_RDONLY)      = 3

## output
'-c' report stats: count time, calls, and errors for each syscall
'-t' print absolute timestamps, '-r' print relative timestamp
'-T' show time spent in syscall
$ strace -t -e open ls /home
20:42:37 open("/etc/ld.so.cache", O_RDONLY) = 3
$ strace -r ls 
    0.000000 execve("/bin/ls", ["ls"], [/* 37 vars */]) = 0
    0.000846 brk(0)                    = 0x8418000
$ strace -c ls
% time     seconds  usecs/call     calls    errors syscall
  -nan    0.000000           0         9           read

from 7 Strace Examples to Debug the Execution of a Program in Linux

## install, requires kernel 3.5 with utrace/probes
$ sudo yum install systemtap systemtap-runtime  | $ sudo apt-get install systemtap gcc
# also requires packages's -debuginfo with same build-id, e.g.: for kernel-debuginfo kernel-debuginfo-common kernel-devel

stap [OPTIONS] FILENAME [ARGS]
'-L probe' lists probe-able functions and script-level local variables
$ stap -L 'process("asterisk").function("*@manager.c")' | sort

'-e SCRIPT' runs script, where script is 
  probe PROBEPOINT [, PROBEPOINT] { [STMT ...] }
'-c CMD' start the probes, run CMD, and exit when CMD finishes
'-x PID' allows scripts to be written that filter on a specific process

## kernel and user-space probes
syscall.system_call /* e.g.: system_call.close, system_call.close.return, system_call.*.return */
kernel.function("func[@file]")  /* .call or .return */
kernel.statement("func@file:linenumber")
kernel.trace("tracepoint") /* statically marked with tracepoints */
module("module").function("function") /* similar to kernel.function */
begin,end
timer.ms(milliseconds), timer.us(microseconds), ...
process("PATH"|PID).function("func[@file]") /* similar to kernel.function */ 
process("PATH"|PID).statement("func@file:linenumber") /* similar to kernel.statement */ 
process("PATH"|PID).mark("marker") /* static markers */
process.begin, process.end, process.thread.begin, process.thread.end

## pretty printing variables, see https://sourceware.org/systemtap/langref/
$var refers to in-scope variable, supports $var->field and $val[N]
$$vars prints all local scope variables, $$vars$ expands structures, '$$vars$$' recursively
$$locals, $$params, $$return subsets of $$vars containing only local variables, function arguments and return value

## tapsets: scripts of pre-written probes

## examples
#! /usr/bin/env stap
# helloworld.stp
probe begin {
    printf ("hello world\n")
}

# func_time_stats.stp, function time stats
global start, intervals
probe $1 { start[tid()] = gettimeofday_us() }
probe $1.return {
    t = gettimeofday_us() 
    old_t = start[tid()]
    if (old_t) intervals << 1 %? if (tid() in trace) %)
    printf("%s%s%s %s\n", thread_indent (entry_p), (entry_p>0?"->":" 1 %?
global trace
probe $2.call {
    trace[tid()] = 1
}
probe $2.return {
    delete trace[tid()]
}
%)
probe $1.call   { trace(1, $$parms) }
probe $1.return { trace(-1, $$return) }

# regex.stp, prints opened files by name
probe syscall.open {
    if (filename =~ %($# == 0 %?
            "(\\.tar\\.(gz|bz2|xz)|\\.zip|\\.tgz|\\.tbz)$"
            %: @1 %) ) {
        printf ("opened %s\n", filename)
    }
}

# last_100_frees.stp, print recent free calls
global bt%[100]
probe process("/lib*/libc.so.*").function("free") {
    if (@defined($mem)) {
        bt[execname(), tid(), $mem, sprint_ubacktrace()] << %s\n", thread_indent(1), ppfunc())
}
probe kernel.function("*@net/socket.c").return {
    printf ("%s <- %s\n", thread_indent(-1), ppfunc())
}

# tcp_connections.stp, traces incoming TCP connections
probe begin {
    printf("%6s %16s %6s %6s %16s\n", "UID", "CMD", "PID", "PORT", "IP_SOURCE")
}
probe kernel.function("tcp_accept").return?, kernel.function("inet_csk_accept").return? {
    sock = $return
    if (sock != 0)
        printf("%6d %16s %6d %6d %16s\n", uid(), execname(), pid(), inet_get_local_port(sock), inet_get_ip_source(sock))
}

# tcpdumplike.stp, dump received UDP/TCP packets
probe udp.recvmsg /* ,udp.sendmsg */ {
    printf(" %15s %15s  %5d  %5d  UDP\n", saddr, daddr, sport, dport)
}
probe tcp.receive {
    printf(" %15s %15s  %5d  %5d  %d  %d  %d  %d  %d  %d\n", 
        saddr, daddr, sport, dport, urg, ack, psh, rst, syn, fin)
}

# sigmon.stp, tracks signal, usage: stap -x 31994 sigmon.stp SIGKILL
probe signal.send {
    if (sig_name == @1 && sig_pid == target())
        printf("%-8d %-16s %-5d %-16s %-6d %-16s\n", 
            pid(), execname(), sig_pid, pid_name, sig, sig_name)
}

# functioncallcount.stp, usage: stap functioncallcount.stp "*@mm/*.c"
global called
probe kernel.function(@1).call {
    called[ppfunc()] <<< 1
}
probe end {
  foreach (fn in called-)  # sort by call count (in decreasing order)
    printf("%s %d\n", fn, @count(called[fn]))
}

## systemtap initscript service, see https://sourceware.org/systemtap/man/systemtap.8.html
$ sudo yum install systemtap-initscript
$ cp script1.stp /etc/systemtap/script.d/
$ cat /etc/systemtap/conf.d/group1
script1_OPT="-o /var/log/group1.out -DRELAY_HOST=group1"
$ service systemtap restart

## '-g' guro mode, allows to embedded C within %{ ... %} and variables become writabble
    function : ( :, ... ) %{  %}
function add_one (val) %{
    STAP_RETVALUE = STAP_ARG_val + 1;
%}

## static probes, see https://sourceware.org/systemtap/wiki/AddingUserSpaceProbingToApps
$ sudo yum isnstall systemtap-sdt-devel
/* match STAP_PROBE(bar, baz ) */
probe process("foo").provider("bar").mark("baz")

## client-server probes, see http://jfsaucier.wordpress.com/tutorials-and-howto/systemtap-server-and-client/

from tutorial, guide/guide@rhel, langref, tapsets and examples

  • ktap similar to systemtap but based on bytecode, so it doesn't depend upon GCC, doesn't require compiling kernel module for each script, safe to use in production environment, fulfilling the embedded ecosystem's tracing needs.
## install
$ git clone http://github.com/ktap/ktap.git
$ cd ktap ; make ; sudo make load
$ ./ktap samples/helloworld.kp

## trace all syscalls in system
$ cat syscalls.kp
#/usr/bin/env ktap
trace syscalls:* { print(cpu(), pid(), execname(), argevent) }

## enable all tracepoints
$ ktap -e "trace *:* { print(argstr) }"

## syscall tracing on target process
$ ktap -e "trace syscalls:* { print(argstr) }" -- ls

## ftrace, http://en.wikipedia.org/wiki/Ftrace
$ ktap -e "trace ftrace:function { print(argstr) }"

## kprobe tracing, http://lwn.net/Articles/132196/
$ cat kprobe.kp
trace probe:do_sys_open dfd=%di fname=%dx flags=%cx mode=+4($stack) { print("entry:", execname, argstr) }
trace probe:do_sys_open%return fd=$retval { print("exit:", execname, argstr) }

## uprobe tracing, http://lwn.net/Articles/499190/
$ cat uprobe.kp
trace probe:/lib/libc.so.6:malloc { print("entry:", execname, argstr) }
trace probe:/lib/libc.so.6:malloc%return { print("exit:", execname, argstr) }
(stapsdt tracing - userspace static marker)

$ cat stapsdt.kp
#trace all static mark in libc
trace sdt:/lib64/libc.so.6:* { print(execname, argstr) }

from tutorial and ktap@github

  • sysdig architecture that is very similar to that of libpcap/tcpdump/wireshark: events are captured by kernel module using tracepoints, the rest (chisels) is user-space
## installing
$ curl -s https://s3.amazonaws.com/download.draios.com/stable/install-sysdig | sudo bash

## list chisels, show chisel info
$ sysdig -cl ; sysdig -i spy_port 
# list filters, to be applied to both live and trace files
$ sysdig -l

## execute a chisel on live data
$ sysdig -c bottlenecks
# or on saved/offline trace data
$ sysdig -w trace.scap ; sysdig -r trace.scap -c bottlenecks
$ sysdig -s 4096 -z -w $(hostname).scap.gz

## monitor user activity, network io amd file io
$ sysdig -r $(hostname).scap.gz -c spy_users 'user.name='
$ sysdig -s 4096 -A -c echo_fds fd.cip=192.168.0.100 -r $(hostname).scap.gz proc.name=apache2
$ sysdig -p '%user.name %proc.name %fd.name' 'evt.type=write and fd.name contains /home/' -z -w writetrace.scap.gz ; $ sysdig -r writetrace.scap.gz

from sysdig@xmodulo

  • lttng/lttng@wiki “Linux Trace Toolkit Next Generation” uses kernel modules (for Linux kernel tracing) and dynamically linked libraries (for application and library tracing), both controlled by session daemon, which receives commands from CLI.

How to sign/verify a Git tags and commits (using GnuPG)

  • git-tag@man is used to create, list, delete or verify a tag object signed with gnupg.
# install gnupg
$ sudo apt-get install gnupg2 | sudo yum install gnupg2

# from git-tag
'-s/--sign' make a GPG-signed tag, using the default e-mail address’s key
'-u/--local-user=<key-id>' make a GPG-signed tag, using the given key (defaults to 'user.signingkey')
'-v/--verify' verify the gpg signature of the given tag names.

# create key pair, asks for your_email@address.com; note: use rng-tools to increase entropy
$ gpg --gen-key
$ gpg --list-secret-keys | grep ^sec
# either use '-u' or
$ git config --global user.signingkey [gpg-key-id]

# create a signed tab with private key
$ git tag --sign [signed-tag-name] -m "message"

# make public key available by storing as raw object and importing them
$ gpg --list-keys
$ gpg -a --export [gpg-key-id] | git hash-object -w --stdin
[object SHA]
# tag key with a name
$ git tag -a [object SHA] maintainer-pgp-pub
# import keys
$ git show maintainer-pgp-pub | gpg --import

# verify a tag signature
$ git tag --verify [signed-tag-name]

from Git Tools – Signing Your Work

  • git-commit@man record changes to the repository.
    As of 1.7.9 it’s possible to sign your commits with your private/secret key.
    As of 1.8.3 and later, “git merge” and “git pull” can be told to inspect and reject when merging a commit that does not carry a trusted GPG signature with the –verify-signatures command.
# from git-commit
'-S<keyid>/--gpg-sign=<keyid>' GPG-sign commit using the given key (defaults to 'user.signingkey')
# from git-log
'--show-signature' check the validity of a signed commit object by passing the signature to 'gpg --verify' and show the output
# from git-merge
'--verify-signatures' verify that the commits being merged have good and trusted GPG signatures and abort the merge in case they do not
'-S' sign the resulting merge commit itself

# sign commit
$ git config --global user.signingkey 8EE30EAB
$ git commit -m "message" -S

# show and verify signature in commit message
$ git log --show-signature 
gpg: Signature made ...
gpg: Good signature from ...

# verify and reject merge if has commits not signed
$ git merge --verify-signatures non-verify
fatal: Commit ab06180 does not have a GPG signature.

from Git Tools – Signing Your Work

How to encrypt/decrypt/sign/verify files in Linux (using GnuPG, PKZIP and 7z)

  • gnupg/gnupg@man GPL-licensed alternative to PGP, an encryption/decryption program that provides cryptographic privacy and authentication for data communication.

Using symmetric-key algorithms – use same key for encryption and decryption

# install
$ sudo apt-get install gnupg2 | sudo yum install gnupg2

'-c/--symmetric' encrypt with a symmetric cipher using a passphrase using '-cipher-algo' (default is CAST5)

# encrypting a file, asks for passphrase; generates encrypted 'filename.gpg'
$ gpg -c filename

'-d' decrypt to stdout, use '-o/--output'; if the decrypted file is signed, the signature is also verified

# decrypt file, asks for passphrase
$ gpg filename.gpg or  gpg -o filename -d filename.gpg

from Linux: HowTo Encrypt And Decrypt Files With A Password and How to create an encrypted zip file on Linux

Using public-key algorithms – use public key to encrypt or verify digital signature; and private/secret key to decrypt or sign with digital signature

# random number generator for entropy see http://www.howtoforge.com/helping-the-random-number-generator-to-gain-enough-entropy-with-rng-tools-debian-lenny
$ sudo apt-get install rng-tools | sudo yum install rng-tools
$ cat /etc/default/rng-tools
HRNGDEVICE=/dev/urandom
$ service rng-tools start

'-s/--sign' make a signature (to .sig)
'-e/--encrypt' encrypt data (to .asc or .gpg, see '-a')
'-s -e' signed and encrypted message
'-c -s' signed and symmetrically encrypted message
'-c -e' message that may be decrypted via a secret key or a passphrase
'-c -s -e' signed message that may be decrypted via a secret key or a passphrase

'-a/--armor' create ASCII armored output, .asc is generated instead of .gpg
'-u/--local-user name' secret key to use, defaults to first; usefull if you have more then one secret key
'-r/--recipient name' public key of recipient; if this option or --hidden-recipient is not specified, GnuPG asks for the user-id unless --default-recipient is given

# create key pair, asks for your_email@address.com
$ gpg --gen-key
# optionally, create revocation certificate; used to invalidate key pair
$ gpg --gen-revoke your_email@address.com

# lists public/private keys
$ gpg --list-keys ; gpg --list-secret-keys
# export public keys to share with everyone
$ gpg -a --export your_email@address.com > public.key
# import others public keys
$ gpg --import public.key
# search for public keys in a keyserver; default keyserver is 'hkp://keys.gnupg.net/'
$ gpg --keyserver pgp.mit.edu --search-keys search_parameters
# send public key 'KEYID' to keyserver
$ gpg --keyserver pgp.mit.edu --send-keys KEYID
# get public key from keyserver
$ gpg --keyserver pgp.mit.edu --recv-key KEYID
# delete public/private keys
$ gpg --delete-key your_email@address.com ; gpg --delete-secret-key your_email@address.com

# encrypt with your public key and signs with your private key; use '-u your_email@address.com' if you have more then one private key
$ gpg --encrypt --sign -a -r receiver_email@address.com -o filename.asc filename
# decrypt and verify signature using private key, asks for passpharse
$ gpg --decrypt filename.asc -o filename

# sign with your private key; generates 'filename.sig'
$ gpg --sign -a -o filename.sig filename
# verify signature and recover original file
$ gpg --decrypt -o filename filename.sig
# generate a detached signature and verify signature
$ gpg --detach-sig filename.sig ; gpg --verify filename.sig filename

from The GNU Privacy Handbook
front-ends for GnuPG: KGPG@wiki and Seahorse@wiki/How to PGP encrypt, decrypt or digitally sign files via GnuPG GUI

# encrypt using zip; it supports http://www.academia.edu/348210/PKZIP_Algorithm
$ zip --password MY_SECRET secure.zip files
# decrypt
$ unzip secure.zip

# encrypt using 7zip; 7z archiver supports AES-256 encryption algorithm with SHA-256 hash algorithm based key generation
$ 7za a -tzip -pMY_SECRET -mem=AES256 secure.zip files
# decrypt
$ 7za e secure.zip

# encrypt using tar + gnugp symmetric key
$ tar czvpf - files | gpg --symmetric --cipher-algo aes256 -o secure.tar.gz.gpg
# decrypt
$ gpg -d secure.tar.gz.gpg | tar xzvf - 

from How to create an encrypted zip file on Linux

How to backup/synchronize files and directories in Linux (using scp/rsync, rdiff-backup, duplicity, rsnapshot, unison, rdup, burp, dar/tar/dtrx, obnam, dd, bup, csync, dump/restore)

From backups@archlinux and file sync@wiki

  • scp@man secure copy (remote file copy program)
# install
$ sudo yum install openssh-clients | sudo apt-get install openssh-client

# remote pull, scp [OPTION...] [USER@]HOST:SRC... [DEST]
$ scp /from user@host:/to
# remote push, scp [OPTION...] SRC... [USER@]HOST:DEST
$ scp user@host:/from /to
# note: both src and dest can be remote

'-r' recursively copy entire directories
'-p' preserves modification/access times and modes
'-l limit' Limits the used bandwidth, in kbps
'-C' compression enable
'-B' batch mode (prevents asking for passwords or passphrases)
'-F ssh_config'/'-o ssh_option' use alternative ssh config file/options

from SCP examples to transfer files/folders

  • rsync@wiki/rsync@man is a file synchronization and file transfer for Unix.
    It can copy locally, to/from another host over any remote shell, or to/from a remote rsync daemon. Its a replacement for rcp and scp.
    It uses delta-transfer algorithm, which reduces the amount of data sent over the network. It contacts remote system using remote shell (rsh,ssh) or rsync deamon.
    Its doesnt use librsync.
## install
$ sudo yum install rsync | sudo apt-get install rsync

# local, rsync [OPTION...] SRC... [DEST]
$ rsync -az /from /to
# remote pull, rsync [OPTION...] [USER@]HOST:SRC... [DEST]
$ rsync -az /from user@host:/to
# remote push, rsync [OPTION...] SRC... [USER@]HOST:DEST
$ rsync -az user@host:/from /to
# note: both src and dest can be remote

'-a' archive mode, copying recursively, preserves symbolic links, file permissions, user & group ownerships and timestamps, equals -rlptgoD
'-r' recurse into direct­ories, but don’t preserve timestamps and permission while transferring data
'-l' copy symlinks as symlinks
'-p/-E/-A/-X/-o/-g/-t' preserve permissions/executability/ACLs/extended/owner/group/times
'-z' compress data during transfer
'--dry-run,-n' trial run, no changes

'--igno­re-­exi­sting' skip updating files that already exist on receiver
'--remo­ve-­sou­rce­-files' sender removes synchr­onized files (non-dirs)
'--delete' delete extraneous files from destin­ation dirs
'--partial' keeps partially transfered files
'--existing' skip creating new files on receiver
$ rsync -az --delete user@host:/from /to
$ rsync -az --remove-source-files backup.tar /to

'--bwlimit=KBPS' limit I/O bandwidth; KBytes per second
'--exclude=PATTERN' exclude files matching PATTERN
'--incl­ude­=PA­TTERN' don't exclude files matching PATTERN
$ rsync -az --include 'R*' --exclude '*' user@host:/from /to

'-b' preexisting destination files are renamed as each file is transferred or deleted
'-u' skip files newer in destination

## run rsync daemon, using systemd; already included in latest package
$ cat /{etc,usr/lib}/systemd/system/rsyncd.socket
; socket activation to start on first connect, systemctl enable rsyncd.socket
[Unit]
Description=Rsync Server Socket
Conflicts=rsyncd.service
[Socket]
ListenStream=873
Accept=yes
[Install]
WantedBy=sockets.target 
$ cat /{etc,usr/lib}/systemd/system/rsyncd.service
; daemon to start at boot, systemctl enable rsyncd.service
[Unit]
Description=fast remote file copy program daemon
ConditionPathExists=/etc/rsyncd.conf
[Service]
EnvironmentFile=/etc/sysconfig/rsyncd
ExecStart=/usr/bin/rsync --daemon --no-detach "$OPTIONS"
[Install]
WantedBy=multi-user.target
$ rsync localhost::

from rsync cheat sheet

  • rdiff-backup/rdiff-backup@man local/remote mirror and incremental backup.
    Uses librsync (also used by rdiff@man, a diff/patch but in a different delta format) instead of rsync.
    Stores the most recent backup as regular files, To revert to older versions, you apply the diff files to recreate the older versions.
# install
$ sudo yum install rdiff-backup | sudo apt-get install rdiff-backup

# backup 'local-dir' to remote dir which will also include 'rdiff-backup-data' to restore previous states
$ rdiff-backup local-dir user@host.net::/remote-dir

# restore current version, '-r/--restore-as-of'
$ rdiff-backup -r now host.net::/remote-dir/file local-dir/file
# restore from 10 days ago
$ rdiff-backup -r 10D host.net::/remote-dir/file /tmp/file

# delete older files, '--remove-older-than'
$ rdiff-backup --remove-older-than 2W host.net::/remote-dir

# file selection include/exclude; '**' matches any path, '*' matches any path except '/'
$ rdiff-backup --exclude /tmp --exclude /mnt --exclude /proc user@host.net::/ /backup/host.net

# list number and date of partial incremental backups from given directory/file, '-l/--list-increments'
$ rdiff-backup -l out-dir/file
# list all files in directory changed since given date, '-list-changed-since'
$ rdiff-backup --list-changed-since 5D out-dir/subdir
# list all files present at given time, '--list-at-time'
$ rdiff-backup --list-at-time 5D out-dir/subdir

# compare changes in directories
$ rdiff-backup --compare in-dir user@host::out-dir
# same by as seen by date
$ rdiff-backup --compare-at-time 2W in-dir user@host::out-dir

from rdiff-backup examples

  • duplicity/duplicity@man backs directories by producing encrypted tar-format volumes and uploading them to a remote or local file server.
    Its a variation on rdiff-backup that allows for backups without cooperation from the storage server.
    Uses librsync to build space efficient incremental archives.
    Supports deleted files, full Unix permissions, directories, symbolic links, fifos, etc., but not hard links.
    Supports local, scp/ssh, sftp, rsync, ftp, s3, webdav: ‘scheme://user[:password]@host[:port]/[/]path’.
# install
$ sudo apt-get install duplicity | sudo yum install duplicity (EPEL)

# backup '/from' using scp/ssh to remote; repeating command will make incremental backups
$ duplicity /from scp://user@host//usr/backup
# force full backup 'full/incremental'
$ duplicity full --exclude /tmp /from scp://user@host//usr/backup

# restore
$ duplicity scp://user@host//usr/backup restored_dir

from How to create a secure incremental offsite backup in Linux with Duplicity

  • rsnapshot/rsnapshot@man remote filesystem snapshot utility.
    It can take incremental snapshots of local and remote filesystems for any number of machines.
## install
$ sudo yum install rsnapshot | sudo apt-get install rsnapshot | sudo pacman -S rsnapshot

## configure
$ cat /etc/rsnapshot.conf
...
# local filesystem path to save all snapshots
snapshot_root /mnt/backups/
...
# number of snapshots to retain by 'type'
retain hourly 6
...
# backup /etc/ to <snapshot_root>/<retain>.0/localhost/etc/ using rsync on the local filesystem
backup /usr/local/ localhost/
# backup /usr/local/ to <snapshot_root>/<retain>.0/localhost/usr/local/ using rsync on the local filesystem
backup /usr/local/ localhost/
#backup root@example.com:/etc/ to <snapshot_root>/<retain>.0/example.com/etc/ using rsync over ssh
backup root@example.com:/etc/ example.com/

# test
$ rsnapshot configtest
# verify by type
$ rsnapshot -t hourly

# schedule backups using cron
$ crontab -e
0 */4 * * * /usr/local/bin/rsnapshot hourly
30 23 * * * /usr/local/bin/rsnapshot daily
(or using systemd)
$ cat /{etc,usr/lib}/systemd/system/rsnapshot@.service
[Unit]
Description=rsnapshot (%I) backup
[Service]
Type=oneshot
Nice=19
IOSchedulingClass=3
ExecStart=/usr/bin/rsnapshot %I
$ cat /{etc,usr/lib}/systemd/system/rsnapshot-daily.timer
[Unit]
Description=rsnapshot daily backup
[Timer]
# 14:30 is the clock time when to start it
OnCalendar=14:30
Persistent=true
Unit=rsnapshot@daily.service
[Install]
WantedBy=timers.target
$ systemctl enable rsnapshot-daily.timer ; systemctl start rsnapshot-daily.timer

from rsnapshot@archlinux and rsnapshot@tecmint

  • unison/unison@wiki supports bi-directional file synchronization using a smart diff method + rsync.
    Allows the user to interactively choose which changes to push, pull, or merge.
## install
$ sudo yum install unison | sudo apt-get install unison | sudo pacman -S unison

# sync '/path' with a remote interactively
$ unison /path ssh://host//path

## using profiles, non-interactively
$ cat ~/.unison/default.prf
# roots of the synchronization, supports ssh://,rsh://, socket://
root = /sync_folder
root = ssh://dev@192.168.1.10//sync_folder (under the root)
# path = dir1
# regexps specifying names and paths to ignore
#ignore = Path stats/*  ## ignores /stats/*
#ignore = Name *stats   ## ignores all files/directories that end with "stats"
# no question asked, non-conflicting changes will be propagated, conflicts will be skipped.
auto = true
# source for force one-way sync
# force = /sync_folder
$ unison (in either server)

from How to synchronize files between two servers bidirectionally

  • rdup scripts to facilitate backups and delegates the encryption, compression, transfer and packaging to other utilities.
# install
$ sudo yum install rdup (RPMforge) | sudo apt-get install rdup

# prints list of files that changed, or all in case of /dev/null dump
$ rdup -N timestamp LIST DIR

# rdup mirroring, backup
$ rdup /dev/null ~/bin | rdup-up -t /shared/backup
# and restore
$ rdup /dev/null /shared/backup | rdup-up -t /tmp/restore

# rdup archiving, backup
$ rdup /dev/null ~/bin > my-archive.rdup
# and restore
$ rdup-up -t /tmp/restore  /mnt/disk1/sda.img.gz
# restore
$ gunzip -c /mnt/disk1/sda.img.gz | sudo dd of=/dev/sda

# backup/restore to/from a remote
$ sudo dd if=/dev/sda | gzip -c | ssh user@remote_host "cat > /mnt/disk1/sda.img.gz"
$ ssh user@remote_host "cat /mnt/disk1/sda.img.gz" | gunzip -c | sudo dd of=/dev/sda

from How to backup a hard disk on Linux

  • bup@github/bup@ubuntu based on the git packfile format, providing fast incremental saves and global deduplication.
    Uses a rolling checksum algorithm (similar to rsync) to split large files into chunks.
# install
$ sudo apt-get install bup
# for rhel/centos build from source, https://build.opensuse.org/package/show/home:p_conrad:branches/bup
$ yum install -y python-devel git-core python-fuse && rpmbuild -bb burp.spec

# local backup; '-x' to limit to one filesystem, '--exclude' to exclude paths, '-u' update, index, default option
$ bup init ; bup index -x /etc ; bup save -n local-etc /etc

# restore local backup to ./dest; '-n' branch name
$ bup restore -C ./dest local-etc/latest/etc
# make another backup, incremental
$ bup index /etc ; bup save -n local-etc /etc
# restore previous backup
$ bup restore -C ./dest-2 local-etc/2013-11-23-11195/etc

# list previous backups
$ bup ls local-etc

# backup to remote; '-r/--remote=host:port' using ssh
$ ssh host bup init; bup index /etc ; bup save -r host: -n local-etc /etc
# restore from remote; 'save -r' isnt supported, so
$ sudo sshfs user@host:/dest-2 ./dest-2 ; ssh host bup restore -C ./dest-2

# use split/join instead of index/save/restore; create a local backup using tar
$ tar -cvf - /etc | bup split -n local-etc -vv
# and restoring
$ bup join local-etc | tar -tf -
# create another "incremental" backup
$ tar -cvf - /etc | bup split -n local-etc -vv
# restoring the previous backup
$ bup join local-etc~1 | tar -tf -

# backup on a remote
$ tar -cvf - /etc | bup split -r SERVERNAME: -n local-etc -vv
# restoring from a remote
$ bup join -r SERVERNAME: local-etc | tar -tf -
  • csync lightweight bidirectionally utility to synchronize files between two directories on a system or between multiple systems. Supports sftp and smb.
    Used by (OwnCloud)[http://en.wikipedia.org/wiki/OwnCloud].
# install, for fedora see http://download.opensuse.org/repositories/network:/synchronization:/files/
$ sudo add-apt-repository ppa:markhannon/ppa && sudo apt-get update && sudo apt-get install csync

csync [OPTION...] SOURCE DESTINATION
$ csync replica1 relplica2
# remote 'sftp://' or 'smb://'
$ csync replica1 smb://user:passwd@host/replica2 or csync replica1 sftp://user:passwd@host/replica2

from Csync – Client only bidirectional file synchronizer and CSYNC User Guide

  • dump@man/restore@man ext2/3 filesystem backup.
    Doesn't work on subdirectories, only filesystems (and only on ext2/3).
# install
$ sudo apt-get install dump | sudo yum install dump

# exclude pahs
$ chattr +d /path
$ lsattr -d /path
------d------e- /path

'-level#' dump level, '0' is full backup, otherwise incremental backup (copy all files new or modified since the last dump of a lower level) 
'-h level' honor the user nodump flag, default honor level is 1
'-u' update the file /etc/dumpdates after a successful dump
'-f file' write the backup to file; file may /dev/st0 (a tape drive), /dev/sda1 (a disk drive), an ordinary file, or - (the standard input)

# full backup, into '/dev/sa0' tape
$ dump -0u -f /dev/st0 /path
# incremental backup
$ dump -1u -f /dev/st0 /path

'-t' list and '-C' compare to current
'-r' restore/rebuild filesystem
'-x' extract individual files
'-f file' read the backup from file; file may /dev/st0 (a tape drive), /dev/sda1 (a disk drive), an ordinary file, or - (the standard input)
$ restore -Cf /dev/st0
$ restore -rf /dev/st0

from UNIX / Linux Dump Command Exclude Directories / Files and Linux Tape Backup With mt And tar Command Howto

How to create a RAID1 for backup in Linux (using DRBD)

  • drbd/drbd@wiki is a distributed replicated storage system.
    Writes to the primary node are transferred to the lower-level block device and simultaneously propagated to the secondary node. Should the primary node fail, a cluster management process promotes the secondary node to a primary state.
# install
$ sudo apt-get install drbd8-utils drbd8-module | sudo yum install drbd84-utils kmod-drbd84 (ELRepo)
$ modprobe drbd or reboot
# make sure all hosts have same time
$ sudo apt-get install ntp ntpdate | sudo yum install ntp ntpdate

# partition /dev/sdb, on each host
$ fdisk /dev/sdb

# configure
$ cat /etc/drbd.conf
global { usage-count no; }
common { syncer { rate 100M; } }
resource r0 {
        protocol C;
        startup {
                wfc-timeout  15;
                degr-wfc-timeout 60;
        }
        net {
                cram-hmac-alg sha1;
                shared-secret "secret";
        }
        on drbd01 {
                device /dev/drbd0;
                disk /dev/sdb1;
                address 192.168.0.1:7788;
                meta-disk internal;
        }
        on drbd02 {
                device /dev/drbd0;
                disk /dev/sdb1;
                address 192.168.0.2:7788;
                meta-disk internal;
        }
} 
$ scp /etc/drbd.conf drbd02:/etc/drbd.conf
# initialize the meta data storage, on each server
$ sudo drbdadm create-md r0 ; sudo service drbd start
# set drbd01 as primary
$(drbd01) sudo drbdadm -- --overwrite-data-of-peer primary all
# watch progress on secondary
$(drbd02) watch -n1 cat /proc/drbd
# mount fs
$(drbd01) sudo mkfs.ext3 /dev/drbd0 ; mkdir /mnt/data ; sudo mount /dev/drbd0 /mnt/data

# testing
$(drbd01) touch /mnt/data/test.txt
# on primary, unmount and demote-it to secondary
$(drbd01) sudo umount /mnt/data ; sudo drbdadm secondary r0
# on secondary, promote-it to primary
$(drbd02) sudo drbdadm primary r0
# remount; you should see '/mnt/data/test.txt' copied from the former primary host drbd01
$(drbd02) sudo mount /dev/drbd0 /srv ; ls /mnt/data/test.txt

# check status, see http://www.drbd.org/users-guide/ch-admin.html#s-check-status
$ drbd-overview
$ cat /proc/drbd

from drbd@ubuntu and drbd@debian

How to add signed-off-by lines by amending Git commit messages (using git-interpret-trailers)

‘Signed-off-by:’ tag indicates that the signer was involved in the development of the patch, or that he/she was in the patch’s delivery path. Its simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as a open-source patch.

‘Acked-by:, Cc:’ is used by the maintainer of the affected code when that maintainer neither contributed to nor forwarded the patch. If a person has had the opportunity to comment on a patch, but has not provided such comments, you may optionally add a “Cc:” tag to the patch.

If this patch fixes a problem reported by somebody else, consider adding a ‘Reported-by:’ tag to credit the reporter for their contribution. A ‘Tested-by:’ tag indicates that the patch has been successfully tested (in some environment) by the person named. ‘Reviewed-by:’, instead, indicates that the patch has been reviewed and found acceptable.

# amend commit with signed-off-by
$ git commit --amend --signoff
$ git log
...
Signed-off-by: Alice <alice@example.com>
# configure a 'sign' trailer with a 'Signed-off-by' key, and then add two of these trailers to a message
$ git config trailer.sign.key "Signed-off-by"
$ cat msg.txt
subject
message
$ cat msg.txt | git interpret-trailers --trailer 'sign: Alice <alice@example.com>' --trailer 'sign: Bob <bob@example.com>'
subject
message
Signed-off-by: Alice <alice@example.com>
Signed-off-by: Bob <bob@example.com>

# extract the last commit as a patch, and add a Cc and a Reviewed-by trailer to it
$ git format-patch -1
0001-foo.patch
$ git interpret-trailers --trailer 'Cc: Alice <alice@example.com>' --trailer 'Reviewed-by: Bob <bob@example.com>' 0001-foo.patch >0001-bar.patch

# configure a sign trailer with a command to automatically add a 'Signed-off-by: ' with the author information only if there is no 'Signed-off-by: ' already
$ git config trailer.sign.key "Signed-off-by: "
$ git config trailer.sign.ifmissing add
$ git config trailer.sign.ifexists doNothing
$ git config trailer.sign.command 'echo "$(git config user.name) <$(git config user.email)>"'
$ git interpret-trailers < EOF
Signed-off-by: Bob <bob@example.com>

How to cleanup rhel/centos/ubuntu/debian old kernels

# remove all unused linux kernel headers, kimages and modules
$ sudo yum install yum-utils && sudo package-cleanup --oldkernels --count=2

see yum@commandlinefu

# query linux kernels, in ubuntu/debian
$ dpkg -l 'linux-*'

# remove all unused linux kernel headers, images and modules
$ dpkg -l 'linux-*' | sed '/^ii/!d;/'"$(uname -r | sed "s/(.*)-([^0-9]+)/1/")"'/d;s/^[^ ]* [^ ]* ([^ ]*).*/1/;/[0-9]/!d' | xargs sudo apt-get -y purge

# remove vs purge; purge is identical to remove except that packages are removed and purged, any configuration files are deleted too
$ sudo apt-get|aptitude remove vs sudo apt-get|aptitude purge

# removes all previously downloaded .deb files from the package cache directory '/var/cache/apt/archives,{,/partial}'
$ sudo apt-get|aptitude clean

# same as clean but only removes package files that can no longer be downloaded, and are largely useless
$ sudo apt-get|aptitude autoclean

# remove packages that were automatically installed to satisfy dependencies for other packages and are now no longer needed; automatically done in aptitude
$ sudo apt-get remove --autoremove | sudo apt-get autoremove

# after installing anything with apt-get install, localepurge will remove all translation files and translated man pages in languages you cannot read
$ sudo apt-get install localepurge

# remove orphaned packages
$ sudo apt-get install deborphan
$ sudo deborphan | xargs sudo apt-get -y remove --purge

from cleaning up a Ubuntu GNU/Linux system
see also apt@commandlinefu and dpkg@commandlinefu

How to tunneling SSH through HTTP proxies (using corkscrew)

  • corkscrew/corkscrew@ubuntu is a tool for tunneling SSH through HTTP proxies.
    To open the connection to the server running the SSH daemon we will use the HTTP CONNECT method which allows a client to connect to a server through an HTTP proxy by sending an HTTP CONNECT request to this proxy.
# install
$ sudo yum install corkscrew (Fedora Rawhide) | sudo apt-get install corkscrew

$ cat ~/.ssh/config
ProxyCommand corkscrew http-proxy.example.com 8080 %h %p

# authenticated proxy connections
$ cat ~/.ssh/config
ProxyCommand corkscrew http-proxy.example.com 8080 %h %p ~/.ssh/proxyauth
$ cat ~/.ssh/proxyauth
username:passwd

# same but using putty, see http://www.howopensource.com/2014/10/creare-secure-communication-putty/

from Tunneling SSH through HTTP proxies using HTTP Connect

How to measure network speed in Linux (using netcat)

# install
$ sudo yum install nc | sudo apt-get install netcat

$(server) nc -lk 2112 >/dev/null
$(client) dd if=/dev/zero bs=16000 count=625 | nc -v server 2112
Connection to server 2112 port [tcp/idonix-metanet] succeeded!
625+0 records in
625+0 records out
10000000 bytes (10 MB) copied, 0.121985 s, 82.0 MB/s

from Linux Network Speed Test