Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- [root@va01erp03 ~]# cat /u01/app/oracle/local/rman/bin/backup_database.sh
- #!/bin/sh
- # 7/27/17 - BMB - Changing locking mechanism to allow the script to run multiple times and skip backups that are already running but allowing backups of other databsaes in the list if they aren't yet being backed up.
- # Uncomment the following line to do a dry run
- #DEBUG=echo
- PID=$$
- # ALl lock files will be prefixed with this
- LOCKFILE_BASE=/tmp/backup-database.$PID
- # Must run as oracle user
- WHOAMI=`whoami`
- if [ "$WHOAMI" != "oracle" ]; then
- echo "Must run this script as oracle user"
- #exit
- fi
- # S3 Stuff
- S3CMD=/usr/local/bin/aws
- parse_url() {
- eval $(echo "$1" | sed -e "s#^\(\(.*\)://\)\?\(\([^:@]*\)\(:\(.*\)\)\?@\)\?\([^/?]*\)\(/\(.*\)\)\?#${PREFIX:-URL_}SCHEME='\2' \
- ${PREFIX:-URL_}USER='\4' ${PREFIX:-URL_}PASSWORD='\6' ${PREFIX:-URL_}HOST='\7' ${PREFIX:-URL_}PATH='\9'#")
- }
- function usage {
- echo "Usage: $0 -t <full|archive|incremental-0|incremental-1> -s 'SIDS' [-e email] [-r /rsync/path] [-fo] [-prefix 'Some Prefix'] [-redundancy #]"
- echo "-t - one of the following:"
- echo " full - full backup"
- echo " archive - archive log backup"
- echo " incremental-0 - Level 0 incremental backup (full)"
- echo " incremental-1 - Level 1 incremental backup"
- echo "-s SID - required - list of SIDS, quoted and space delimited"
- echo "-e email - optional - comma delimited (no spaces) list of email addresses to send failure notification to"
- echo "-prefix <prefix> : Prefix to put in E-Mail subjects. Usually used by Penta to distinguish between different customers systems"
- echo "-r rsync-path - optional - path to rsync backup files to. Do not include the SID in the path"
- echo "-fo - optional - If set, only email failed backup notifications"
- echo "-redundancy <#> : The # of full backups to store. Defaults to 3."
- echo "-aws-access-key <key> : AWS Access Key"
- echo "-aws-secret-key <key> : AWS Secret Key"
- echo "-s3-url : S3 URL to sync files to"
- echo
- echo "Default lifecycle file in s3 directory can be overriden with a file called lifecycle-override.txt"
- echo "Example: $0 -t full -s 'PROD PROD_H' -e customer@domain.com -r /someshare"
- echo "Example: $0 -t archive -s 'PROD PROD_H' -e customer@domain.com"
- }
- # make sure the lockfile is removed when we exit and then claim it
- trap "/bin/rm -f ${LOCKFILE_BASE}*; exit" INT TERM EXIT
- function get_flash_recovery_area {
- echo "select name from V\$RECOVERY_FILE_DEST;" | sqlplus / as sysdba | grep \/
- }
- function set_s3_bucket_lifecycle {
- FILE=$1
- $DEBUG aws s3api put-bucket-lifecycle --bucket $S3BUCKET --lifecycle-configuration $FILE --region $S3REGION
- }
- function s3_sync {
- LOCALPATH=$1
- S3PATH=$2
- $DEBUG aws s3 sync $LOCALPATH ${S3PATH}/ --sse --storage-class STANDARD_IA --exclude "*.ctl" --quiet --region $S3REGION
- }
- function sync_files_to_s3 {
- LOCALPATH=$1
- S3PATH=$2
- RETCODE=0
- s3_sync $LOCALPATH $S3PATH
- if [ "$?" -ne 0 ]; then
- sleep 30
- s3_sync $LOCALPATH $S3PATH
- if [ "$?" -ne 0 ]; then
- sleep 30
- s3_sync $LOCALPATH $S3PATH
- if [ "$?" -ne 0 ]; then
- sleep 30
- s3_sync $LOCALPATH $S3PATH
- if [ "$?" -ne 0 ]; then
- sleep 30
- s3_sync $LOCALPATH $S3PATH
- else
- RETCODE=1
- fi
- fi
- fi
- fi
- return $RETCODE
- }
- if [ -z $1 ]; then
- usage
- exit
- fi
- # Set the RMAN directory
- ORACLE_BASE=`cat ~/.oracle_base`
- RMAN_DIR=$ORACLE_BASE/local/rman
- #Make sure /usr/local/bin is in the path and save it
- PATH=$PATH:/usr/local/bin
- ORIG_PATH=$PATH
- # Defaults
- FAILURE_ONLY=0
- REDUNDANCY=3
- while [ $# -gt 0 ]
- do
- case "$1" in
- -t) BACKUP_TYPE="$2"; shift;;
- -s) ORACLE_SIDS="$2"; shift;;
- -e) EMAIL="$2"; shift;;
- -prefix) EMAILPREFIX="$2"; shift;;
- -r) RSYNC_DEST="$2"; shift;;
- -s3-url) S3URL="$2"; shift;;
- -aws-access-key) export AWS_ACCESS_KEY_ID="$2"; shift;;
- -aws-secret-key) export AWS_SECRET_ACCESS_KEY="$2"; shift;;
- -fo) FAILURE_ONLY=1; shift;;
- -redundancy) REDUNDANCY="$2"; shift;;
- -*) usage; exit 1;;
- *) break;; # terminate while loop
- esac
- shift
- done
- # extract bucket name from S3 URL
- if [ ! -z $S3URL ]; then
- parse_url $S3URL
- S3BUCKET=$URL_HOST
- S3REGION=$(aws s3api get-bucket-location --bucket $S3BUCKET --output text)
- # For some stupid reason get-bucket-location returns None if it's in the Standard region
- if [ "$S3REGION" == "None" ]; then
- S3REGION="us-east-1"
- fi
- fi
- case $BACKUP_TYPE in
- "full")
- TYPE="Full Hot"
- RMAN_SCRIPT=$RMAN_DIR/bin/full_hot.rman
- SUBJECT="PENTA FULL DATABASE BACKUP"
- LOG_PREFIX="full_hot"
- ;;
- "archive")
- TYPE="Archive log"
- RMAN_SCRIPT=$RMAN_DIR/bin/archive.rman
- SUBJECT="PENTA DATABASE LOG BACKUP"
- LOG_PREFIX="archive"
- ;;
- "incremental-0")
- RMAN_SCRIPT=$RMAN_DIR/bin/incremental_level0.rman
- TYPE="Incremental Level 0"
- SUBJECT="PENTA INCREMENTAL FULL DATABASE BACKUP"
- LOG_PREFIX="incremental_0"
- ;;
- "incremental-1")
- RMAN_SCRIPT=$RMAN_DIR/bin/incremental_level1.rman
- TYPE="Incremental Level 1"
- SUBJECT="PENTA INCREMENTAL DATABASE BACKUP"
- LOG_PREFIX="incremental_1"
- ;;
- *)
- echo "Invalid backup type. Must be full or archive"
- exit 255;
- ;;
- esac
- function rsync_ready {
- # Verify rsync stuf
- # RSYNC PROCESS
- RSYNC_READY=0
- if [ ! -z $RSYNC_DEST ]; then
- # We must want to rsync since we were given a directory.
- # 1. Make sure it's a remotely mounted filesystem by seeing if it's a local filesystem
- if [ -d "$RSYNC_DEST/$1" ]; then
- cd "$RSYNC_DEST/$1"
- touch .test
- if [ "$?" == "0" ]; then
- # Make sure this is a remote filesystem....
- for FS in $(mount | egrep -v "(ext3|ext4|proc|sysfs|devpts|tmpfs|rpc_pipefs|acfs)" | awk '{print $3}'); do
- FS_FOUND=$(echo $FS|grep -c $RSYNC_DEST)
- if [ $FS_FOUND == "1" ]; then
- RSYNC_READY=1
- fi
- done
- if [ $RSYNC_READY == "0" ]; then
- echo "***** $RSYNC_DEST/$1 NOT MOUNTED. Please check the share configuration. Continuing with local backup only *****"
- fi
- else
- echo "***** Cannot write to $RSYNC_DEST/$1. Please check the share configuration. Continuing with local backup only *****"
- FAILED=1
- fi
- else
- echo "***** $RSYNC_DEST/$1 doesn't exist. If this is the first time backing up databases to this directory then manually create $RSYNC_DEST/PROD. Continuing with local backup only. ******" | ma
- il -s "$EMAILPREFIX CRITICAL FAILURE DURING $SUBJECT" $EMAIL
- FAILED=1
- fi
- fi
- return $RSYNC_READY
- }
- DAY=`date +%F-%T`
- function send_failure_email {
- if [ ! -z $EMAIL ]; then # we have an email address specified
- echo "Backup Script Interrupted" | mail -s "$EMAILPREFIX CRITICAL FAILURE DURING $SUBJECT" $EMAIL
- fi
- }
- trap 'send_failure_email' SIGINT SIGTERM
- SUMMARY_EMAIL=/tmp/$$.$LOG_PREFIX.Summary.$DAY.log
- EMAIL_BODY=$RMAN_DIR/log/$LOG_PREFIX.$DAY.log
- cat /dev/null > $EMAIL_BODY
- GLOBAL_FAILED=0
- for ORACLE_SID in $ORACLE_SIDS
- do
- echo "===========================================================" >> $EMAIL_BODY
- echo "$TYPE backup of $ORACLE_SID started at `date`" >> $EMAIL_BODY
- LOCKFILE="${LOCKFILE_BASE}-${ORACLE_SID}"
- LOCKFILE_COUNT=`ls /tmp/backup-database.*${ORACLE_SID} 2> /dev/null | wc -l`
- if [ ${LOCKFILE_COUNT} -gt 0 ]; then
- echo "Backup of $ORACLE_SID already running, skipping"
- continue
- fi
- echo "Creating Lock File $LOCKFILE"
- touch $LOCKFILE
- LOG_FILE=$RMAN_DIR/log/$LOG_PREFIX.$ORACLE_SID.$DAY.log
- cat /dev/null > $LOG_FILE
- export ORACLE_SID
- # Set the PATH back to the original
- PATH=$ORIG_PATH
- export ORAENV_ASK=NO
- . /usr/local/bin/oraenv >> /dev/null 2>&1
- FAILED=0
- # Make sure the database is running on this server
- RUNNING=`lsnrctl status | grep -c \"$ORACLE_SID\"`
- if [ "$RUNNING" == "0" ]; then
- echo "Cannot locate a running database called $ORACLE_SID" >> $EMAIL_BODY
- FAILED=1
- else
- $DEBUG $ORACLE_HOME/bin/rman @$RMAN_SCRIPT $REDUNDANCY >> $LOG_FILE
- RETCODE=$?
- if [ "$RETCODE" -gt "0" ]; then
- FAILED=1
- echo "$TYPE backup of $ORACLE_SID FAILED at `date` with return code $RETCODE" >> $EMAIL_BODY
- echo "Error below:" >> $EMAIL_BODY
- echo >> $EMAIL_BODY
- tail -10 $LOG_FILE >> $EMAIL_BODY
- echo >> $EMAIL_BODY
- echo >> $EMAIL_BODY
- else
- echo "$TYPE backup of $ORACLE_SID SUCCEEDED at `date` with return code $RETCODE" >> $EMAIL_BODY
- fi
- FRA=`get_flash_recovery_area`
- # Copy Backups to Amazon S3
- if [ ! -z $S3URL ]; then
- echo "Starting S3 Sync" >> $LOG_FILE
- if [ ! -e $S3CMD ]; then
- echo "AWS CLI not installed. Please install" >> $EMAIL_BODY
- FAILED=1
- else
- # Set the lifecycle for the database
- if [ ! -e $RMAN_DIR/bin/s3/lifecycle.txt ]; then
- echo "S3 Lifecycle file not found" >> $EMAIL_BODY
- FAILED=1
- SID_POLICY=`grep -c "<ID>$ORACLE_SID</ID>" $RMAN_DIR/bin/s3/lifecycle.txt`
- if [ "$SID_POLICY" -eq 0 ]; then
- echo "S3 Lifecycle for $ORACLE_SID not found in lifecycle.txt" >> $EMAIL_BODY
- FAILED=1
- fi
- else
- LIFECYCLEFILE=lifecycle.txt
- if [ -e $RMAN_DIR/bin/s3/lifecycle-override.txt ]; then
- LIFECYCLEFILE=lifecycle-override.txt
- fi
- set_s3_bucket_lifecycle file://$RMAN_DIR/bin/s3/$LIFECYCLEFILE $S3BUCKET
- #if [ "$?" -gt 0 ]; then FAILED=1; echo "Setting S3 Bucket Lifecycle Failed" >> $EMAIL_BODY"; fi
- sync_files_to_s3 $FRA/$ORACLE_SID/backupset $S3URL/$ORACLE_SID/backupset
- RETCODE=$?
- if [ "$RETCODE" -gt 0 ]; then
- FAILED=1
- echo "FAILED TO COPY BACKUPS to S3!" >> $EMAIL_BODY
- else
- echo "S3 copy of $ORACLE_SID backupset to $S3URL/$ORACLE_SID/backupset complete" >> $EMAIL_BODY
- fi
- sync_files_to_s3 $FRA/$ORACLE_SID/autobackup $S3URL/$ORACLE_SID/autobackup
- RETCODE=$?
- if [ "$RETCODE" -gt 0 ]; then
- FAILED=1
- echo "FAILED TO COPY BACKUPS to S3!" >> $EMAIL_BODY
- else
- echo "S3 copy of $ORACLE_SID autobackup to $S3URL/$ORACLE_SID/autobackup complete" >> $EMAIL_BODY
- fi
- # Audit to make sure the files are really there.
- S3_FILE_LIST=/tmp/S3_FILE_LIST_$ORACLE_SID
- aws s3 ls --region $S3REGION --recursive $S3URL/$ORACLE_SID/ > $S3_FILE_LIST
- RETCODE=$?
- if [ "$RETCODE" -gt 0 ]; then
- FAILED=1
- echo "FAILED TO GET S3 FILE LIST!" >> $EMAIL_BODY
- else
- # Check the files we have
- for file in $(find $FRA/$ORACLE_SID -name '*.bkp' > /dev/null 2>&1); do
- RMAN_FILE=$(basename $file)
- S3_FILE_EXISTS=$(grep -c $RMAN_FILE $S3_FILE_LIST)
- if [ "$S3_FILE_EXISTS" -ne 1 ]; then
- echo "$RMAN_FILE not backed up to S3" >> $EMAIL_BODY
- FAILED=1
- S3_SYNC_FAILED=1
- fi
- done
- if [ -z $S3_SYNC_FAILED ]; then
- echo "S3 file audit of $ORACLE_SID backups complete. All local files found in S3" >> $EMAIL_BODY
- fi
- fi
- fi
- fi
- fi
- # Copy Backups to $RSYNC_DEST with rsync
- rsync_ready $ORACLE_SID
- RSYNC_READY=$?
- if [ "$RSYNC_READY" == 1 ]; then
- # don't use -t, as that isn't compatible with Windows shares.
- # use -c to compare by checksum since we can't keep timestamps in sync.
- $DEBUG rsync -av --delete $FRA/$ORACLE_SID/backupset $RSYNC_DEST/$ORACLE_SID/ >> $LOG_FILE
- if [ $? != "0" ]; then
- echo "Failed to copy backups to $RSYNC_DEST" >> $EMAIL_BODY
- FAILED=1
- else
- echo "Remote copy of $ORACLE_SID backupset backups from $FRA to $RSYNC_DEST/$ORACLE_SID SUCCEEDED" >> $EMAIL_BODY
- fi
- # don't use -t, as that isn't compatible with Windows shares.
- # use -c to compare by checksum since we can't keep timestamps in sync.
- $DEBUG rsync -av --delete $FRA/$ORACLE_SID/autobackup $RSYNC_DEST/$ORACLE_SID/ >> $LOG_FILE
- if [ $? != "0" ]; then
- echo "Failed to copy backups to $RSYNC_DEST" >> $EMAIL_BODY
- FAILED=1
- else
- echo "Remote copy of $ORACLE_SID autobackup backups from $FRA to $RSYNC_DEST/$ORACLE_SID SUCCEEDED" >> $EMAIL_BODY
- fi
- else
- echo "Skipping rsync due to error validating remote mount point" >> $EMAIL_BODY
- fi
- fi
- echo "See Log File For Details: $LOG_FILE" >> $EMAIL_BODY
- echo "===========================================================" >> $EMAIL_BODY
- /bin/rm -f ${LOCKFILE}
- done
- if [ ! -z $EMAIL ]; then # we have an email address specified
- if [ "$FAILED" == "1" ]; then # We ran into issues
- cat $LOG_FILE >> $EMAIL_BODY
- cat $EMAIL_BODY | mail -s "$EMAILPREFIX $SUBJECT FAILED - $ORACLE_SID" $EMAIL
- fi
- cat $EMAIL_BODY >> $SUMMARY_EMAIL
- else # We don't have an email address. Let's print to stdout
- if [ "$FAILED" == "1" ]; then # We ran into issues
- cat $LOG_FILE >> $EMAIL_BODY
- cat $EMAIL_BODY
- else # We didn't have any issues
- cat $EMAIL_BODY
- fi
- fi
- if [ "$FAILED" == "1" ]; then
- GLOBAL_FAILED=1
- fi
- # E-Mail Summary
- if [ ! -z $EMAIL ]; then # we have an email address specified
- if [ "$FAILURE_ONLY" == "0" ]; then # Email no matter what the result
- cat $SUMMARY_EMAIL | mail -s "$EMAILPREFIX $SUBJECT BACKUP SUMMARY" $EMAIL
- fi
- fi
- if [ $GLOBAL_FAILED == "1" ]; then
- exit 255
- fi
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement