Advertisement
Guest User

Untitled

a guest
Aug 22nd, 2017
131
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 32.93 KB | None | 0 0
  1. #!/bin/bash
  2.  
  3. ###########################################################
  4. # PUPPET MANAGED #
  5. # Do not edit this file on a server node unless you #
  6. # are willing to have your changes overwritten by #
  7. # Puppet. If you really want to change the contents #
  8. # of this file, change it in the puppet subversion #
  9. # repository and check it out on the ops server. #
  10. ###########################################################
  11.  
  12. #
  13. # This script is used to backup multiple mysql instances all
  14. # running on the same machine, with data directories in the
  15. # same LVM partition. A full backup will copy ALL data on the
  16. # mysql LVM partition. An incremental backup will rsync only
  17. # mysql binary logs for each mysql instance into the most
  18. # recently created backup.
  19. #
  20. # This allows for point in time restores of any database.
  21. # To restore:
  22. # - Copy data directory of mysql instance to the restore machine.
  23. # - Start mysql with this data directory.
  24. # - Replay the binary logs in the binlog directory, starting
  25. # at the position and binlog file listed in instance/master_status.txt
  26. # You may also filter the binary logs in order to leave out some
  27. # SQL statements.
  28.  
  29.  
  30. # must run this script as root.
  31. if [ "$(id -u)" != "0" ]; then
  32. echo "$0 must be run as root" 1>&2
  33. exit 1
  34. fi
  35.  
  36.  
  37. # Constants and Global Variables
  38. pidfile="/var/run/dbbackup.pid"
  39. timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
  40. backup_directory=/backup/dbbackup
  41. current_backup_directory="$backup_directory/current" # a symlink to the most recently created full backup
  42. incomplete_directory="$backup_directory/incomplete"
  43. archive_directory="$backup_directory/archive" # store compressed old backups here
  44. new_backup_directory="$incomplete_directory/new_$timestamp"
  45. mysql_directory="/mysql"
  46. lvm_snapshot_size="50GB"
  47. lvm_volume_path="/dev/cs/mysql"
  48. lvm_snapshot_name="mysql_snapshot_$timestamp"
  49. lvm_snapshot_volume_path="/dev/cs/$lvm_snapshot_name"
  50. lvm_snapshot_mount_directory="/mnt/$lvm_snapshot_name"
  51. slaves_stopped=0 # true while any mysql instance slaves are stopped
  52. tables_locked=0 # true while any mysql instance tables are locked
  53. stale_backup_days=30 # delete backups older than this many days
  54. report_to_email_addresses="farley@couchsurfing.org steve.phillips@couchsurfing.com" # space separated list of emails to send reports to
  55. minimum_archive_filesize=161061273600 # 150GB - No dbbackup archive should be smaller than this. If a newly created archive is smaller than this, the archive will be considered a failure
  56. dev_host="<%= dev_server %>"
  57. # mysql is currently being managed by supervisor
  58. # 'mysql:' is the name of the supervisor group
  59. mysql_start_command="/usr/bin/supervisorctl start mysql:"
  60. mysql_stop_command="/usr/bin/supervisorctl stop mysql:"
  61. mysql="/usr/local/mysql/bin/mysql" # path to mysql client binary
  62.  
  63.  
  64. # List of mysql instance slaves on dbbackup.
  65. # These are the directory names inside of the /mysql directory.
  66. mysql_instances=(dbcore dbgroup dbarchive dbmessage1 dbmessage2 dbmessage3 dbmessage4 dbspool ops)
  67.  
  68.  
  69. function usage {
  70. echo "Usage:
  71. $0 full|incremental|archive|delete|restore|status
  72. full - Creates a full backup of each mysql instance's data directory from an LVM snapshot.
  73. incremental - Copies new binlogs from each mysql instances.
  74. archive - Looks in $backup_directory for uncompressed backups and compresses and archives them them.
  75. delete - Looks in $backup directory for any compressed backups older than $stale_backup_days days and deletes them.
  76. restore - This does only a very specific restore. It will copy from each $current_backup_directroy/<instance>/data
  77. directory into $mysql_directory/<instance>/data. This is useful for doing automated restores of
  78. the staging database.
  79. status - Prints out backup status information.
  80. "
  81. }
  82.  
  83.  
  84. # Creates an LVM snapshot of the /mysql directory and
  85. # creates backups of the mysql instance data files.
  86. function backup_full {
  87. /bin/mkdir -pv $new_backup_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not create directory '$new_backup_directory'"
  88.  
  89. # - Stop all slaves, flush binary logs, purge all old logs
  90. log "Stopping all mysql slaves"
  91. mysql_multi_command 'STOP SLAVE;'
  92. slaves_stopped=1
  93.  
  94. log "Obtaining read lock on all mysql instances..."
  95. mysql_multi_command 'FLUSH TABLES WITH READ LOCK;'
  96. tables_locked=1
  97.  
  98. log "Flushing all mysql binary logs"
  99. mysql_multi_command 'FLUSH LOGS;'
  100.  
  101. # foreach instance, purge all old logs
  102. # get the newest binlog and exclude it from being rsynced
  103. for instance in "${mysql_instances[@]}"; do
  104. newest_binlog=$("${mysql}" -S /mysql/$instance/mysql.sock -e "SHOW BINARY LOGS;" | tail -n 1 | awk '{print $1}')
  105. # purge all old binary logs for this instance
  106. log "Purging binary logs on instance $instance up to $newest_binlog"
  107. mysql_command $instance "PURGE BINARY LOGS TO '$newest_binlog'"
  108. done
  109.  
  110.  
  111. # - Save master status of each mysql instance into files.
  112. log "Saving mysql master and slave statuses into files."
  113. save_multi_mysql_statuses
  114.  
  115. # - create LVM snapshot of /mysql directory
  116. log "Creating LVM snapshot of $lvm_volume_path named $lvm_snapshot_name"
  117. /usr/sbin/lvcreate -L${lvm_snapshot_size} -s -n $lvm_snapshot_name $lvm_volume_path 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not create LVM snapshot of $lvm_volume_path named $lvm_snapshot_volume_path"
  118.  
  119.  
  120. log "Unlocking all mysql instances..."
  121. mysql_multi_command 'UNLOCK TABLES;'
  122. tables_locked=0
  123.  
  124. # - Start all slaves
  125. log "Starting all mysql slaves"
  126. mysql_multi_command 'START SLAVE;'
  127. slaves_stopped=0
  128.  
  129.  
  130.  
  131. # - Mount LVM snapshot
  132. log "Mounting LVM snapshot $lvm_snapshot_volume_path at $lvm_snapshot_mount_directory"
  133. /bin/mkdir -pv $lvm_snapshot_mount_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not create directory $lvm_snapshot_mount_directory"
  134. /bin/mount -t ext3 -v $lvm_snapshot_volume_path $lvm_snapshot_mount_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not mount $lvm_snapshot_volume_path at $lvm_snapshot_mount_directory"
  135.  
  136. # Rsync each mysql instance's data folder into
  137. # $current_backup_directory/$instance/data
  138. # We don't need to save the binary logs here because this is a full data backup.
  139. # Incremental backups taken later will consist only of binary logs, allowing for
  140. # incremental restores.
  141. for instance in "${mysql_instances[@]}"; do
  142. sudo -u mysql /bin/mkdir -pv "$new_backup_directory/$instance" || die "Could not mkdir $new_backup_directory/$instance"
  143. log "Copying files from $lvm_snapshot_mount_directory/$instance/data to $new_backup_directory/$instance/"
  144. /bin/nice --adjustment=10 /usr/bin/rsync -avWP --exclude="lost+found" --exclude="*.pid" --exclude="mysql.sock" "$lvm_snapshot_mount_directory/$instance/data" "$new_backup_directory/$instance/" 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not copy data from $lvm_snapshot_mount_directory/$instance/data to $new_backup_directory/$instance/"
  145.  
  146. # Verify that the copy was successful
  147. log "Verifying backup via quick checksum..."
  148. checksum_original=$(checksum_quick $lvm_snapshot_mount_directory/$instance/data)
  149. checksum_backup=$(checksum_quick $new_backup_directory/$instance/data)
  150.  
  151. if [ "${checksum_original}" != "${checksum_backup}" ]; then
  152. die "Error when backing up $instance data. Original ($checksum_original) and backup ($checksum_backup) checksums do not match."
  153. fi
  154.  
  155. log "$instance data directory checksums match."
  156. done
  157.  
  158. # - Unmount LVM snapshot
  159. log "Unmounting $lvm_snapshot_mount_directory"
  160. /bin/umount -v $lvm_snapshot_mount_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not umount $lvm_snapshot_mount_directory"
  161. /bin/rm -rfv $lvm_snapshot_mount_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not remove $lvm_snapshot_mount_directory" # no need to die here, if this fails there will just be an extra empty diretory around
  162.  
  163. # - Delete LVM snapshot
  164. log "Deleting LVM snapshot $lvm_snapshot_volume_path"
  165. /usr/sbin/lvremove -f $lvm_snapshot_volume_path 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not delete LVM snapshot $lvm_snapshot_volume_path"
  166.  
  167. # Remove the 'new_' from the beginning of the new backup directory name and
  168. # point the current backup symlink at the new backup directory
  169. (/bin/mv -v $new_backup_directory $backup_directory/$timestamp && /bin/ln -sfnv $backup_directory/$timestamp $current_backup_directory) || die "Could not point the current symlink at the $backup_directory/$timestamp"
  170. }
  171.  
  172.  
  173.  
  174.  
  175. # Rsyncs each mysql instance's binlogs to
  176. # the backup directory
  177. function backup_incremental {
  178. # Make sure $current_backup_directory exists
  179. if [ ! -d "$current_backup_directory" ]; then
  180. die "Cannot do incremental backup. Current backup directory does not exist. You probably need to run your first full backup."
  181. fi
  182.  
  183. # Rsync each mysql instance's binlog folder into
  184. # current_backup_directory/instance_name/binlog
  185. for instance in "${mysql_instances[@]}"; do
  186. incremental_binlog_backup_directory="$current_backup_directory/$instance/binlog"
  187. /bin/mkdir -pv $incremental_binlog_backup_directory || die "Could not create directory $incremental_binlog_backup_directory"
  188.  
  189.  
  190. # stop this slave
  191. # We need to do this to get accurate information about this slave's status
  192. # and position for the binlog we are about to save. This information
  193. # can be used to restore a backup to this binlog, and then start
  194. # it as a slave of this instance's master.
  195. log "Stopping $instance slave."
  196. mysql_command $instance 'stop slave;';
  197. slaves_stopped=1
  198.  
  199. # Get the location that this instance has currently executed binlogs to.
  200. # This is the most recent position that our instance has executed, as
  201. # well as the latest binlog file that we are about to back up.
  202. # This information will be included alongside the current slave
  203. # status, so that when restoring this instance to this position,
  204. # the slave status can be used to recreate a slave and point it at
  205. # this dbbackup instance's current master.
  206. current_position=$("${mysql}" -S /mysql/$instance/mysql.sock -e 'show master status;' | sed -n '2p' | awk -F "\t" '{print $1 ":" $2}') || die "Could not get current instance binlog position."
  207.  
  208. # Flush the binary logs so that mysql will force start a new one
  209. log "Flushing $instance binary logs"
  210. mysql_command $instance 'flush logs;'
  211.  
  212. # start this slave back up
  213. log "Start $instance slave."
  214. mysql_command $instance 'start slave;';
  215. slaves_stopped=0
  216.  
  217.  
  218.  
  219. # Apped a 'CHANGE MASTER TO' statement to the $incremental_slave_status_file.
  220. # This file will include statements that can be used to create a slave from
  221. # This incremental backup by running the backed up binary logs to this point
  222. # and running the relative CHANGE MASTER TO statement.
  223. incremental_slave_status_file="$current_backup_directory/$instance/incremental_slave_status.txt"
  224. log "Appending current $instance slave status as a change master SQL command into $incremental_slave_status_file."
  225. change_master_command=$("${mysql}" -S /mysql/$instance/mysql.sock -e "show slave status;" | sed -n '2p' | awk -F "\t" '{print "CHANGE MASTER TO master_host=\"" $2 "\", master_user=\"" $3 "\", master_password=\"XXXXXXXXX\", master_log_file=\"" $10 "\", master_log_pos=\"" $22 "\";" }') || die "Could not get incremental slave status from instance $instance"
  226. date=$(date '+%Y-%m-%d %H:%M:%S')
  227. change_master_command="/* dbbackup $instance instance at ${current_position} (${date}) */ ${change_master_command}"
  228. echo "$change_master_command" >> "$incremental_slave_status_file" || die "Could not append $instance slave status as a change master SQL command to $incremental_slave_status_file"
  229.  
  230.  
  231.  
  232. # get the newest binlog (the one created when the logs were flushed above) and exclude it from being rsynced
  233. newest_binlog=$("${mysql}" -S /mysql/$instance/mysql.sock -e "SHOW BINARY LOGS;" | tail -n 1 | awk '{print $1}')
  234.  
  235. # rsync binlogs to binlog backup directory
  236. # We ONLY want the binlog files. We don't want any
  237. # relay logs or info or index files. We also don't want the
  238. # binary log that was recently created by flushing the logs.
  239. log "Copying $instance binlogs to $incremental_binlog_backup_directory"
  240. /bin/nice --adjustment=10 /usr/bin/rsync -avWP --exclude="relay-*" --exclude="bin.index" --exclude="$newest_binlog" "$mysql_directory/$instance/binlog/" "$incremental_binlog_backup_directory/" 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not copy binlogs from $mysql_directory/$instance/binlog/ to $incremental_binlog_backup_direrctory/"
  241.  
  242. # purge all old binary logs for this instance
  243. log "Purging binary logs on instance $instance up to $newest_binlog"
  244. mysql_command $instance "PURGE BINARY LOGS TO '$newest_binlog'"
  245. done
  246. }
  247.  
  248.  
  249. function archive_old_backups {
  250. /usr/bin/test -d $incomplete_directory || /bin/mkdir -p $incomplete_directory || die "Could not create directory $incomplete_directory"
  251. /usr/bin/test -d $archive_directory || /bin/mkdir -p $archive_directory || die "Could not create directory $archive_directory"
  252.  
  253. result=0
  254. # get list of old uncompressed backups
  255. old_backup_directories=$(/bin/ls -d $backup_directory/20* | grep -v 'tar' | grep -v $(/usr/bin/readlink $current_backup_directory)) || die "Could not find any old backups in $backup_directory that need compressing."
  256. for old_backup_directory in $old_backup_directories; do
  257. log "Archiving $old_backup_directory..."
  258. archive_backup $old_backup_directory
  259. if [ $? -ne 0 ]; then
  260. log "Failed archive of $old_backup_directory"
  261. result=$?
  262. fi
  263. done
  264.  
  265. return $result
  266. }
  267.  
  268.  
  269. # We want to keep individual .tar.gz files of each data and binlog
  270. # directory. This will allow us faster decompression times when we
  271. # need to restore data from a certain mysql instance backup.
  272. # This function:
  273. # - loops through each mysql instance in the backup
  274. # - creates a temporary archive directory inside of the $incomplete_directory
  275. # - creates .tar.gz files of the data and binlog directories inside the $incomplete_archive_directory
  276. # - after the compression is completed the data and binlog directories are removed from the original location (to save space)
  277. # - Copies master and slave status files to the instance directory in the $incomplete_archive_directory
  278. # - Once done looping though instances, a .tar file is made of the $incomplete_archive_directory.
  279. # - This .tar file is moved into the $archive_directory
  280. # - The original $old_backup_directory and the $incomplete_archive_directories are then removed permanently
  281. function archive_backup {
  282. retval=0
  283.  
  284. # the original directory to compress and archive
  285. old_backup_directory=$1
  286. old_backup_filename=$(/bin/basename $old_backup_directory)
  287.  
  288. # This is the directory in which archives will be kept while they are being compressed
  289. incomplete_archive_directory="$incomplete_directory/$old_backup_filename"
  290. # Once the individual data/ and binlog/ directories are tar-ed and compressed,
  291. # they will all be tar-ed into this single file
  292. incomplete_archive_file="${incomplete_directory}/$(/bin/basename $old_backup_directory).tar"
  293. # Once the $incomplete_archive_file has been created, it will be moved to
  294. # final_archive_file.
  295. final_archive_file=$archive_directory/$(/bin/basename $incomplete_archive_file)
  296.  
  297. # if $final_archive_file already exists, do not attempt to archive this.
  298. if [ -e $final_archive_file ]; then
  299. log "Not attempting to archive $old_backup_directory. An archive of this backup already exists at $final_archive_file."
  300. return 0
  301. fi
  302.  
  303. # Create a temporary incomplete directory to create the archive in
  304. /bin/mkdir -pv $incomplete_archive_directory || die "Could not create directory $incomplete_archive_directory"
  305.  
  306.  
  307. # Loop through each mysql instance and compress each binlog and data directory
  308. for instance in "${mysql_instances[@]}"; do
  309. if [ ! -d "${old_backup_directory}/${instance}" ]; then
  310. echo "$instance directory does not exist in $old_backup_directory, not attempting to archive."
  311. continue;
  312. fi
  313.  
  314. /bin/mkdir -pv $incomplete_archive_directory/$instance || die "Could not create directory $incomplete_archive_directory/$instance"
  315.  
  316. # Compress both data and binlog directories
  317. subdirectories_to_compress=(data binlog)
  318. for subdirectory_name in "${subdirectories_to_compress[@]}"; do
  319. directory_to_compress="$old_backup_directory/$instance/$subdirectory_name"
  320. compressed_filename="$incomplete_archive_directory/$instance/$subdirectory_name.tar.gz"
  321. cmd="/bin/tar -C $old_backup_directory/$instance -czf $compressed_filename $subdirectory_name"
  322. log "Compressing $directory_to_compress at $compressed_filename"
  323. log "$cmd"
  324. $cmd 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not create archive of $directory_to_compress"
  325. # Need to save space. Once the archive of this directory is complete, remove the original
  326. # This line is commented since jumbo is BIIIG. Uncomment it if you need to save space while compressing backups.
  327. # /bin/rm -rfv $directory_to_compress 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not remove $directory_to_compress after compressing it"
  328. done
  329.  
  330. # copy master and slave status text files
  331. log "Including $instance master and slave status files in archive"
  332. /bin/cp -v $old_backup_directory/$instance/{master,slave}_status.txt $incomplete_archive_directory/$instance/ 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not copy master and slave status files to archive."
  333. done
  334.  
  335. # Now that we've compressed all of the individual instance data and binlog directories,
  336. # create a a .tar file of the whole backup
  337. cmd="/bin/tar -C $incomplete_directory -cvf $incomplete_archive_file $old_backup_filename"
  338. log "$cmd"
  339. $cmd 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not create archive of $old_backup_directory at $incomplete_archive_file"
  340.  
  341. # Let's double check to make sure the archive we just created is big enough.
  342. # if it's not, die (and don't delete the original $old_backup_directory)
  343. incomplete_archive_filesize=$(/usr/bin/stat -c%s ${incomplete_archive_file})
  344. if [ "${incomplete_archive_filesize}" -lt "${minimum_archive_filesize}" ]; then
  345. log "Archiving $old_backup_filename at $incomplete_archive_file failed. Filesize ($incomplete_archive_filesize bytes) is less than minimum archive filesize ($minimum_archive_filesize bytes)."
  346. retval=1
  347. fi
  348.  
  349. # Archiving is complete!
  350. # move the incomplete archive file to the archive directory
  351. /bin/mv -v $incomplete_archive_file $final_archive_file 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not move $archive_filename into $archive_directory"
  352.  
  353. # Delete the and the incomplete archive directory old backup directory
  354. /bin/rm -rfv $incomplete_archive_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not remove $incomplete_archive_directory after creating archive $final_archive_file."
  355. /bin/rm -rfv $old_backup_directory 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not remove $old_backup_directory after creating archive $final_archive_file."
  356.  
  357. log "Finished creating backup archive $final_archive_file."
  358. return $retval
  359. }
  360.  
  361.  
  362. # Deletes any compressed backups older than $stale_backup_days
  363. function delete_stale_backups {
  364. log "Deleting compressed backups older than $stale_backup_days days."
  365. cd $archive_directory
  366. # Bah, date parsing and calculation sucks in bash. Gotta use something else...ruby it is!
  367. # This command gets the list of files in $archive_directory that match 20*.tar.gz,
  368. # converts the date-time part of the filename to a ruby DateTime Object, and then
  369. # prints out each file that was created more than $stale_backup_days ago.
  370. stale_backups=$(/usr/bin/ruby -e "require 'date'; Dir::chdir('$archive_directory'); puts Dir::glob(\"20*.tar\").select { |f| DateTime::parse(f.delete('.tar.gz')) < (DateTime::now - $stale_backup_days) }.join(' ')") || die "Failed trying to find backups to delete."
  371. log "Deleting $stale_backups..."
  372. /bin/rm -fv $stale_backups 2>&1 | log
  373. }
  374.  
  375.  
  376. # Restores mysql instance data directories from the current backup
  377. # to $mysql_directory/<instance>/data. This is useful for restoring
  378. # the staging database.
  379. function restore_full {
  380. log "Beginning full restore from $current_backup_directory..."
  381. # Stop all mysql instances
  382. log "Stopping all mysql instances..."
  383. ${mysql_stop_command} 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not stop all mysql instances."
  384. # give mysql some time to stop
  385. sleep 5
  386.  
  387. # For each mysql instance, delete the data directory
  388. # (so that we can be sure there is enough disk space to
  389. # copy the new data directory), then copy the data directory
  390. # to its restored location.
  391. for instance in "${mysql_instances[@]}"; do
  392. restore_instance $instance || die "Could not restore instance $instance."
  393. done
  394.  
  395. log "Starting all mysql instances..."
  396. ${mysql_start_command} start 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not start all mysql instances."
  397. log "Done restoring $current_backup_directory/<instance>/data directories to $mysql_directory instance data directories."
  398. }
  399.  
  400. # Copies from $current_backup_directory/instance/data to $mysql_direcotry/instance/data
  401. # $1 instance
  402. function restore_instance {
  403. instance=$1
  404.  
  405. log "Restoring $instance mysql instance from $current_backup_directory/$instance/data to $mysql_directory/$instance/data..."
  406.  
  407. log "Deleting mysql instance $instance's data and binlog files."
  408. # delete the old data directory
  409. test -d $mysql_directory/$instance/data && /bin/rm -rfv $mysql_directory/$instance/data || die "Could not remove old data direcotry $mysql_directory/$instance/data"
  410. # remove all old binlogs
  411. test -d $mysql_directory/$instance/binlog && /bin/rm -fv $mysql_directory/$instance/binlog/* || die "Could not remove binlogs from $mysql_directory/$instance/binlog/*"
  412.  
  413. # copy the new data directory (excluding master.info, we don't want the restored instance to try to start up a slave)
  414. /usr/bin/rsync -avWP --exclude='master.info' $current_backup_directory/$instance/data $mysql_directory/$instance/ 2>&1 | log; [ ${PIPESTATUS[0]} -eq 0 ] || die "Could not copy data from $current_backup_directory/$instance/data to $mysql_directory/$instance/data"
  415. return $?
  416. }
  417.  
  418. # loop through each running instance and grant permissions for
  419. # alpha CS Code environments to connect to each instance from
  420. # the dev server. This gets run automatically after a restore_full
  421. # so that dev (and the alpha site) may connect to the staging db.
  422. function grant_alpha_permissions {
  423. log "Granting alpha MySQL permissions from $dev_host..."
  424.  
  425. # grant permissions for couchs_write using staging password
  426. sql="GRANT DELETE, EXECUTE, INSERT, SELECT, UPDATE ON *.* TO 'couchs_write'@'$dev_host' IDENTIFIED BY PASSWORD '*F1BFF585E567CE876BA7D0D3CCE334AA15528CD9';"
  427. mysql_multi_command "${sql}"
  428.  
  429. # grant permissions for readonly using staging password
  430. sql="GRANT EXECUTE, PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'readonly'@'$dev_host' IDENTIFIED BY PASSWORD '*F1BFF585E567CE876BA7D0D3CCE334AA15528CD9';"
  431. mysql_multi_command "${sql}"
  432. }
  433.  
  434.  
  435. # Prints out a status message about the latest archived backup
  436. function status_archive {
  437. # check up on the latest archived backup
  438. latest_archive_file="$archive_directory/$(ls -t $archive_directory | sed -n '1p')"
  439. if [ -z "$latest_archive_file" ]; then
  440. echo "Zero archived backups."
  441. else
  442. latest_archive_filesize=$(/usr/bin/stat -c%s $latest_archive_file)
  443. latest_archive_file_mtime=$(/usr/bin/stat -c%Y $latest_archive_file)
  444. latest_archive_file_date=$(timestamp_to_date $latest_archive_file_mtime)
  445. echo "Latest archived backup: $latest_archive_file"
  446. echo " Size: $latest_archive_filesize ($(format_filesize $latest_archive_filesize))"
  447. echo " mtime: $latest_archive_file_mtime ($latest_archive_file_date)"
  448. fi
  449. }
  450.  
  451. # prints out a status message about the current backup
  452. function status_current {
  453. # check up on the current backup
  454. if [ ! -d "$current_backup_directory" ]; then
  455. echo "Current backup directory does not exist. You probably need to run your first full backup."
  456. exit 1;
  457. fi
  458.  
  459. latest_backup_directory=$(/usr/bin/readlink $current_backup_directory)
  460. latest_backup_size=$(/usr/bin/du -b --max-depth=0 $latest_backup_directory | cut -f 1)
  461.  
  462. echo ""
  463. echo "Current backup status:"
  464. echo " $latest_backup_directory"
  465. echo " Size: $latest_backup_size ($(format_filesize $latest_backup_size))"
  466. echo ""
  467. }
  468.  
  469. # prints out a status message about a particular instance inside the current backup
  470. function status_instance {
  471. instance=$1
  472.  
  473. if [ ! -d "$current_backup_directory/$instance" ]; then
  474. echo "Backup instance '$instance' does not exist. You probably need to run your first full backup."
  475. exit 1
  476. fi
  477.  
  478. latest_backup_directory=$(/usr/bin/readlink $current_backup_directory)
  479. backup_instance_size=$(/usr/bin/du -b --max-depth=0 $latest_backup_directory/$instance | cut -f 1)
  480.  
  481. echo " $instance Size: $backup_instance_size ($(format_filesize $backup_instance_size))"
  482. latest_incremental_backup_binlog=$(ls -t $latest_backup_directory/$instance/binlog/bin.* | sed -n '1p')
  483. latest_incremental_backup_binlog_filesize=$(/usr/bin/stat -c%s $latest_incremental_backup_binlog)
  484. latest_incremental_backup_binlog_mtime=$(/usr/bin/stat -c%Y $latest_incremental_backup_binlog)
  485. latest_incremental_backup_binlog_date=$(timestamp_to_date $latest_incremental_backup_binlog_mtime)
  486.  
  487. echo " Latest incremental binlog: $latest_incremental_backup_binlog"
  488. echo " Size: $latest_incremental_backup_binlog_filesize ($(format_filesize $latest_incremental_backup_binlog_filesize))"
  489. echo " mtime: $latest_incremental_backup_binlog_mtime ($latest_incremental_backup_binlog_date)"
  490. echo ""
  491. }
  492.  
  493. # prints out backup status information.
  494. # Usage:
  495. # status archive|current|instances|all|<instance_name>
  496. function status {
  497. case "$1" in
  498. 'archive' )
  499. status_archive
  500. ;;
  501. 'current' )
  502. status_current
  503. ;;
  504. 'instances' )
  505. for instance in "${mysql_instances[@]}"; do
  506. status_instance $instance
  507. done
  508. ;;
  509. * )
  510. if [ "$1" == 'all' ] || [ -z "$1" ]; then
  511. status_archive
  512. status_current
  513. # loop through each mysql instance and print out a status for each
  514. for instance in "${mysql_instances[@]}"; do
  515. status_instance $instance
  516. done
  517. else
  518. status_instance $1
  519. fi
  520. ;;
  521. esac
  522. }
  523.  
  524.  
  525. # Compresses a directory with tar and bzip2.
  526. function compress_directory {
  527. directory=$1
  528.  
  529. if [ ! -d $directory ]; then
  530. die "Cannot compress '$directory', it is not a directory"
  531. fi
  532.  
  533. # while compressing, create file in an incomplete directory
  534. /usr/bin/test -d $incomplete_directory || /bin/mkdir -p $incomplete_directory || die "Could not create directory $incomplete_directory"
  535. incomplete_compressed_file="$incomplete_directory/$(/bin/basename $directory).tar.gz"
  536.  
  537. cmd="/bin/tar -czf $incomplete_compressed_file $directory"
  538. log $cmd
  539. # compress the directory, move it out of the incomplete directory and the remove the original directory.
  540. ($cmd && /bin/mv -v $incomplete_compressed_file $backup_directory/ && /bin/rm -rfv $directory) 2>&1 | log
  541. return $?
  542. }
  543.  
  544.  
  545.  
  546. # Finds every file in the directory,
  547. # truncates the output of ls -l to
  548. # print out size and timestamp of each file,
  549. # and then generates an md5sum from this output.
  550. function checksum_quick {
  551. directory=$1
  552. /usr/bin/find $directory -type f | xargs /bin/ls -l --time-style="+%Y%m%d%H%M%S" | grep -v '.pid' | grep -v 'mysql.sock' | grep -v 'lost+found' | /bin/awk '{print $5 " " $6}' | /usr/bin/md5sum - | /bin/awk '{print $1}'
  553. }
  554.  
  555.  
  556. # logs an error message,
  557. # removes any mounts or LVM snapshots that this script created,
  558. # and then exit 1
  559. function die {
  560. log "${1}"
  561.  
  562. # if an LVM snapshot is mounted and we are dying, then unmount it
  563. /bin/mount | /bin/grep -q $lvm_snapshot_mount_directory && (/bin/umount -v $lvm_snapshot_mount_directory 2>&1 | log)
  564. # if an LVM snapshot exists, delete it.
  565. /usr/sbin/lvscan | /bin/grep -q $lvm_snapshot_volume_path && (/usr/sbin/lvremove -f $lvm_snapshot_volume_path 2>&1 | log)
  566.  
  567. # if die was called while tables_locked == 1, then run unlock tables on all instances
  568. if [ $tables_locked -eq 1 ]; then
  569. log "Unlocking tables on all mysql instances"
  570. mysql_multi_command 'UNLOCK TABLES;';
  571. fi
  572.  
  573. # if die was called while slaves_stopped == 1, then run start slave on all instances
  574. if [ $slaves_stopped -eq 1 ]; then
  575. log "Starting slaves on all mysql instance..."
  576. mysql_multi_command 'START SLAVE;'
  577. fi
  578.  
  579. # if the mount directory exists, remove it
  580. /usr/bin/test -d $lvm_snapshot_mount_directory && (rm -rfv $lvm_snapshot_mount_directory 2>&1 | log)
  581.  
  582. # remove pid file
  583. /bin/rm $pidfile
  584.  
  585. # send an email notifying that this script has died
  586. report "$0 $action failed" "$(date '+%Y-%m-%d %H:%M:%S') ${1}"
  587. exit 1
  588. }
  589.  
  590.  
  591. # Executes the same SQL statement on all mysql_instances.
  592. function mysql_multi_command {
  593. command=$1
  594.  
  595. for instance in "${mysql_instances[@]}"; do
  596. mysql_command "${instance}" "${command}"
  597. done
  598. }
  599.  
  600.  
  601. # Executes a command on a mysql instance
  602. function mysql_command {
  603. instance=$1
  604. command=$2
  605.  
  606. log "Running '$command' on $instance"
  607. "${mysql}" -S /mysql/$instance/mysql.sock -e "$command" 2>&1 | log
  608. if [ $? -ne 0 ]; then
  609. die "Running '$command' on mysql instance $instance failed."
  610. fi
  611. }
  612.  
  613. # saves master status for each mysql instance into files
  614. function save_multi_mysql_statuses {
  615. master_status_command='show master status\G'
  616. slave_status_command='show slave status\G'
  617.  
  618. for instance in "${mysql_instances[@]}"; do
  619. /bin/mkdir -pv "$new_backup_directory/${instance}" || die "Could not create directory '$new_backup_directory/${instance}'"
  620.  
  621. master_status_file="$new_backup_directory/${instance}/master_status.txt"
  622. slave_status_file="$new_backup_directory/${instance}/slave_status.txt"
  623.  
  624. # Save master status
  625. log "Running '$master_status_command' on $instance"
  626. "${mysql}" -S /mysql/$instance/mysql.sock -e "$master_status_command" > $master_status_file || die "Could not save master status for mysql instance $instance into $master_status_file"
  627.  
  628. # Save slave status
  629. log "Running '$slave_status_command' on $instance"
  630. "${mysql}" -S /mysql/$instance/mysql.sock -e "$slave_status_command" > $slave_status_file || die "Could not save slave status for mysql instance $instance into $slave_status_file"
  631. done
  632. }
  633.  
  634. # Echoes $1 or stdin to stdout and sends the message to scribe logger
  635. # in the category 'dbbackup'.
  636. function log {
  637. message=$1
  638. scribe_category='dbbackup'
  639.  
  640. # if message was not passed in, read message from stdin
  641. if [ -z "${message}" ]; then
  642. while read data; do
  643. header="[$HOSTNAME] [$$] [$(date '+%Y-%m-%d %H:%M:%S')] [$scribe_category] [$0]"
  644. echo "$header $data"
  645. echo "$header $data" | /usr/bin/scribe_cat $scribe_category
  646. done
  647. # else just echo the message
  648. else
  649. header="[$HOSTNAME] [$$] [$(date '+%Y-%m-%d %H:%M:%S')] [$scribe_category] [$0]"
  650. echo "$header $message"
  651. echo "$header $message" | /usr/bin/scribe_cat $scribe_category
  652. fi
  653. }
  654.  
  655.  
  656. function report {
  657. subject="${1}"
  658. body="${2}"
  659. email "${report_to_email_addresses}" "${subject}" "${body}"
  660. }
  661.  
  662. # sends an email!
  663. function email {
  664. to="${1}"
  665. subject="${2}"
  666. body="${3}"
  667. /bin/echo "${body}" | /bin/mail -s "${subject}" "${to}"
  668. }
  669.  
  670. # converts a unix timestamp into a human readable date
  671. function timestamp_to_date {
  672. timestamp=$1
  673. echo $timestamp | /bin/awk '{print strftime("%Y-%m-%d %H:%M:%S",$1)}'
  674. }
  675.  
  676.  
  677. # converts a byte filesize into a human readable format
  678. function format_filesize {
  679. size=${1}
  680.  
  681. mega=$(( 1024 * 1024 ))
  682. giga=$(( 1024 * 1024 * 1024))
  683.  
  684. # print size of file copied
  685. if [[ $size -le 1024 ]]; then
  686. printf "%d B" $size;
  687. elif [[ $size -le $mega ]]; then
  688. printf "%d kB" $(( $size / 1024 ));
  689. elif [[ $size -le $giga ]]; then
  690. printf "%d MB" $(( $size / $mega ));
  691. else
  692. printf "%d GB" $(( $size / $giga ));
  693. fi
  694. }
  695.  
  696. # Catch Control-C so we can clean up properly
  697. trap 'die "Caught SIGINT, exiting..."' SIGINT
  698.  
  699.  
  700. # Parse command line for action.
  701. action="${1}"
  702.  
  703.  
  704. # make sure this script isn't already running.
  705. if [ -f $pidfile ]; then
  706. pid=$(cat $pidfile)
  707. log "dbbackup script is already running with PID $pid. Aborting."
  708. exit 1
  709. fi
  710.  
  711. # store the current PID in the pidfile (if not getting status)
  712. if [ "$1" != 'status' ]; then
  713. echo $$ > $pidfile
  714. fi
  715.  
  716.  
  717. exitval=0
  718.  
  719. case "$action" in
  720. 'full' )
  721. time backup_full
  722. ;;
  723. 'incremental' )
  724. time backup_incremental
  725. ;;
  726. 'delete' )
  727. time delete_stale_backups
  728. ;;
  729. 'restore' )
  730. time restore_full
  731. sleep 5
  732. grant_alpha_permissions
  733. ;;
  734. 'archive' )
  735. time archive_old_backups
  736. exitval=$?
  737. ;;
  738. 'status' )
  739. status $2
  740. exit 0;
  741. ;;
  742. 'grant_alpha_permissions' )
  743. time grant_alpha_permissions
  744. exitval=$?
  745. ;;
  746. * )
  747. echo "'$1' is not a valid command."
  748. usage
  749. /bin/rm $pidfile
  750. exit 1;
  751. ;;
  752. esac
  753.  
  754. log "Done."
  755. echo ""
  756. echo ""
  757.  
  758. # remove the pidfile
  759. /bin/rm $pidfile
  760.  
  761. exit $exitval
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement