Difference between revisions of "Retain Database Dump Tables Script"

From GWAVA Technologies Training
Jump to: navigation, search
(Importing the Dump)
Line 61: Line 61:
  
 
#mysql -uroot -p'''[root pwd]''' < __restoreData.sql
 
#mysql -uroot -p'''[root pwd]''' < __restoreData.sql
 +
 +
==Script Listing==
 +
<pre>
 +
#!/bin/bash
 +
 +
# Change the values below where needed. Most of the time, you'll only be changing the "database connection and dump output location" and sometimes the "file compression options" and the "pre-existing file cleanup".  The "InnoDB dump options" should probably remain as written unless you really know what you are talking about.  The "restore script setup" should always remain left untouched.
 +
 +
#DATABASE CONNECTION AND DUMP OUTPUT LOCATION
 +
#Replace everything within square brackets, including the square brackets themselves.
 +
## Name of Retain database (i.e., retain):
 +
DBNAMES="[name of Retain database]"
 +
## Location of the database server (e.g., localhost or IP/DNS hostname):
 +
HOST="--host=[localhost or IP/DNS hostname of DB server]"
 +
## Leave this as is unless the MySQL superuser is something other than "root", which is possible but never seen:
 +
USER="--user=root"
 +
## Password for the root MySQL user:
 +
PASSWORD="--password=[root user password]"
 +
## Self explanatory (e.g., /data/msysql-archive/dump). Whatever is used, MySQL needs to be the owner of the destination directory and have r/w rights:
 +
BACKUP_DIR="[path to dump output directory]"
 +
 +
#FILE COMPRESSION OPTIONS
 +
#Enabling either or both of these options disables the easy restore.  Rarely would you have a need for the tar option; however, if a customer is transferring their database dump to GWAVA's datacenter or to some other location within their environment, using the GZip option would reduce the size of the file and protect it during transport.  You could use a script for copying the GZip files to the new location as they are created so that they can immediately be imported into the new system while the dump process continues to process more tables#
 +
 +
#-- Valid values: yes / no --#
 +
 +
##GZip individual tables:
 +
ZIP="no"
 +
 +
##Tar all the individual files into one single .tar file at the end of the job:
 +
TAR="no"
 +
 +
#PRE-EXISTING FILE CLEANUP
 +
#Cleans up files from a previous run of the dump tables script.  This is a dangerous setting as you could lose dump files that you man want to have remain.
 +
 +
#-- Valid values: yes / no --#
 +
 +
CLEANUP="no"
 +
 +
#INNODB DUMP OPTIONS
 +
# You can change the following InnoDB options. They are optional but know what you're doing.
 +
 +
#-- Note that the "--single-transaction" option is helpful for when Tomcat is left running.  If Tomcat is left down, then it isn't necessary.  Reference https://dev.mysql.com/doc/refman/5.5/en/mysqldump.html#option_mysqldump_single-transaction. --#
 +
 +
OPTIONS="--default-character-set=utf8 --complete-insert --extended-insert --quick --opt --single-transaction"
 +
 +
#RESTORE SCRIPT SETUP
 +
#No need to edit this section:
 +
RESTORESCRIPT="$BACKUP_DIR/__restoreData.sql"
 +
DATE=`/bin/date '+%y%m%d_%H%M%S'`
 +
 +
 +
#---------------------------- BEGINNING OF SCRIPT - make no changes after this point ------------------------------------------
 +
 +
date2stamp () {
 +
    date --utc --date "$1" +%s
 +
}
 +
 +
stamp2date (){
 +
    date --utc --date "1970-01-01 $1 sec" "+%Y-%m-%d %T"
 +
}
 +
 +
dateDiff (){
 +
    case $1 in
 +
        -s)  sec=1;      shift;;
 +
        -m)  sec=60;    shift;;
 +
        -h)  sec=3600;  shift;;
 +
        -d)  sec=86400;  shift;;
 +
        *)    sec=86400;;
 +
    esac
 +
    dte1=$(date2stamp $1)
 +
    dte2=$(date2stamp $2)
 +
    diffSec=$((dte2-dte1))
 +
    if ((diffSec < 0)); then abs=-1; else abs=1; fi
 +
    echo $((diffSec/sec*abs))
 +
}
 +
 +
 +
#### Remove any files existing from a previous run of the dump tables script ####
 +
if [ $CLEANUP == "yes" ]
 +
then
 +
echo Removing old files if they exist...
 +
rm -f ${BACKUP_DIR}/*.sql > /dev/null 2>&1
 +
rm -f ${BACKUP_DIR}/*.gz > /dev/null 2>&1
 +
rm -f ${BACKUP_DIR}/*.tar > /dev/null 2>&1
 +
cd ${BACKUP_DIR}
 +
fi
 +
 +
 +
#### Begin dump ####
 +
 +
TOTBEGIN=$(date2stamp now)
 +
for DB in $DBNAMES
 +
do
 +
    echo "=========================================="
 +
    echo    Dumping Database: ${DB}
 +
    echo "=========================================="
 +
    echo 'SET AUTOCOMMIT=0;' > $RESTORESCRIPT
 +
    echo 'SET UNIQUE_CHECKS=0;' >> $RESTORESCRIPT
 +
    echo 'SET FOREIGN_KEY_CHECKS=0;' >> $RESTORESCRIPT
 +
 +
###Ernie Research This:innodb_autoinc_lock_mode = 2
 +
 +
    mysqldump --databases --no-data $HOST $USER $PASSWORD $DB > ${BACKUP_DIR}/__createDatabase.sql
 +
    echo 'source __createDatabase.sql;' >> $RESTORESCRIPT
 +
 +
    for TABLE in `mysql $HOST $USER $PASSWORD $DB -e 'show tables' | egrep -v 'Tables_in_' `; do
 +
        TABLENAME=$(echo $TABLE|awk '{ printf "%s", $0 }')
 +
 +
if [ $ZIP == "yes" ]
 +
          then FILENAME="${TABLENAME}.sql.gz"
 +
  else FILENAME="${TABLENAME}.sql"
 +
fi
 +
 +
        echo Dumping Table: $TABLENAME
 +
BEGIN=$(date2stamp now)
 +
        echo 'source' $FILENAME';' >> $RESTORESCRIPT
 +
 +
if [ $ZIP == "yes" ]
 +
          then mysqldump $OPTIONS $HOST $USER $PASSWORD $DB $TABLENAME | gzip > ${BACKUP_DIR}/${FILENAME}
 +
          else mysqldump $OPTIONS $HOST $USER $PASSWORD $DB $TABLENAME > ${BACKUP_DIR}/${FILENAME}
 +
fi
 +
 +
END=$(date2stamp now)
 +
DIFF=$((END-BEGIN))
 +
if ((DIFF < 300));
 +
  then echo "  " $((DIFF/1)) Seconds;
 +
  else echo "  " $((DIFF/60)) Minutes;
 +
fi
 +
    done
 +
 +
###Ernie Research This:innodb_autoinc_lock_mode = 1
 +
 +
    echo 'SET FOREIGN_KEY_CHECKS=1;' >> $RESTORESCRIPT
 +
    echo 'SET UNIQUE_CHECKS=1;' >> $RESTORESCRIPT
 +
    echo 'COMMIT;' >> $RESTORESCRIPT
 +
 +
 +
    if [ $TAR == "yes" ]
 +
    then
 +
      echo Making tar...
 +
      tar -cf ${DB}_${DATE}.tar *.sql  > /dev/null 2>&1
 +
 +
      echo Compressing...
 +
      gzip -9 ${DB}_${DATE}.tar > /dev/null 2>&1
 +
 +
      echo Removing temporary files...
 +
      rm -f ${BACKUP_DIR}/*.sql > /dev/null 2>&1
 +
      rm -f ${BACKUP_DIR}/*.tar > /dev/null 2>&1
 +
    fi
 +
 +
    echo "Completed: " $DB
 +
done
 +
TOTEND=$(date2stamp now)
 +
 +
DIFF=$((TOTEND-TOTBEGIN))
 +
if ((DIFF < 300));
 +
  then echo "  " $((DIFF/1)) Seconds;
 +
  else echo "  " $((DIFF/60)) Minutes;
 +
fi
 +
 +
echo "=========================================="
 +
echo "      All Databases Complete"
 +
echo "      To import these dump files:"
 +
if [ $TAR == "yes" ]
 +
then
 +
echo "        * tar -xf [tar filename]"
 +
fi
 +
if [ $ZIP == "yes" ]
 +
then
 +
echo "        * gzip -d [*.gz]"
 +
fi
 +
 +
echo "        * Import the _restoreData.sql, which will import all the other files: mysql -uroot -p[root pwd] < _restoreData.sql"
 +
echo "=========================================="
 +
 +
</pre>

Revision as of 15:21, 8 September 2016

There are occasions that going a regular dump of the database isn't enough when there is an issue. You can do a dump by tables instead.

This script does this for you. It logs into the retain database and dumps each table into a file. The script dumpTables.sh can obtained two ways:

  • For Americas support engineers, you can find it in the /mysql directory of the Software Repository.
  • For everyone else, click here to download it.

Contents

Modifying the dumpTables Script

You need to edit the script to make it work for your system. Edit these values in the script to match your system's values. There are three different sections

DATABASE CONNECTION AND DUMP OUTPUT LOCATION

Replace everything within square brackets, including the square brackets themselves.

Name of Retain database (i.e., retain):
DBNAMES="[name of Retain database]"
Location of the database server (e.g., localhost or IP/DNS hostname):
HOST="--host=[localhost or IP/DNS hostname of DB server]"
Database user. Leave this as is unless the MySQL superuser is something other than "root", which is possible but never seen:
USER="--user=root"
Password for the root MySQL user:
PASSWORD="--password=[root user password]"
Dump directory. Self explanatory (e.g., /data/msysql-archive/dump). Whatever is used, MySQL needs to be the owner of the destination directory and have r/w rights:
BACKUP_DIR="[path to dump output directory]"

FILE COMPRESSION OPTIONS

Enabling either or both of these options disables the easy restore. Rarely would you have a need for the tar option; however, if a customer is transferring their database dump to GWAVA's datacenter or to some other location within their environment, using the GZip option would reduce the size of the file and protect it during transport. You could use a script for copying the GZip files to the new location as they are created so that they can immediately be imported into the new system while the dump process continues to process more tables#

Valid values: yes / no
GZip individual tables:
ZIP="no"
Tar all the individual files into one single .tar file at the end of the job:
TAR="no"

PRE-EXISTING FILE CLEANUP

Cleans up files from a previous run of the dump tables script. This is a dangerous setting as you could lose dump files that you man want to have remain.

Valid values: yes / no
CLEANUP="no"

Executing the dumpTables Script

After editing the settings:

  • Save the script
  • Give the script file execute permissions (chmod +x dumpTables.sh)
  • Make MySQL the owner of the destination dump directory: chown mysql:mysql [path to dump directory]
  • Ensure that the directory has read and write rights: chmod +r+w [path to dump directory]
  • From the directory containing the script, run the script: ./dumpTables.sh

The instructions for importing all of the tables will be saved to the file __restoreData.sql; thus, when the dump has been completed, the only file you need to import is the _restoreData.sql.

Importing the Dump

The steps for importing depend on whether you chose to:

  • Tar the entire dump
  • Gzip the table dump files
  1. If you enabled the tar option: tar -xf [tar dump file]
  2. If you enabled the zip option: gzip -d [gz dump file]

For all situations:

  1. mysql -uroot -p[root pwd] < __restoreData.sql

Script Listing

#!/bin/bash

# Change the values below where needed. Most of the time, you'll only be changing the "database connection and dump output location" and sometimes the "file compression options" and the "pre-existing file cleanup".  The "InnoDB dump options" should probably remain as written unless you really know what you are talking about.  The "restore script setup" should always remain left untouched.

#DATABASE CONNECTION AND DUMP OUTPUT LOCATION 
#Replace everything within square brackets, including the square brackets themselves.
	## Name of Retain database (i.e., retain):
	DBNAMES="[name of Retain database]" 
	## Location of the database server (e.g., localhost or IP/DNS hostname):
	HOST="--host=[localhost or IP/DNS hostname of DB server]"
	## Leave this as is unless the MySQL superuser is something other than "root", which is possible but never seen:
	USER="--user=root" 
	## Password for the root MySQL user:
	PASSWORD="--password=[root user password]" 
	## Self explanatory (e.g., /data/msysql-archive/dump). Whatever is used, MySQL needs to be the owner of the destination directory and have r/w rights:
	BACKUP_DIR="[path to dump output directory]" 

#FILE COMPRESSION OPTIONS
#Enabling either or both of these options disables the easy restore.  Rarely would you have a need for the tar option; however, if a customer is transferring their database dump to GWAVA's datacenter or to some other location within their environment, using the GZip option would reduce the size of the file and protect it during transport.  You could use a script for copying the GZip files to the new location as they are created so that they can immediately be imported into the new system while the dump process continues to process more tables#
	
	#-- Valid values: yes / no --#
	
	##GZip individual tables:
	ZIP="no"

	##Tar all the individual files into one single .tar file at the end of the job:
	TAR="no" 

#PRE-EXISTING FILE CLEANUP
#Cleans up files from a previous run of the dump tables script.  This is a dangerous setting as you could lose dump files that you man want to have remain.
	
	#-- Valid values: yes / no --#
	
	CLEANUP="no"
	
#INNODB DUMP OPTIONS
# You can change the following InnoDB options. They are optional but know what you're doing.

	#-- Note that the "--single-transaction" option is helpful for when Tomcat is left running.  If Tomcat is left down, then it isn't necessary.  Reference https://dev.mysql.com/doc/refman/5.5/en/mysqldump.html#option_mysqldump_single-transaction. --#
	
	OPTIONS="--default-character-set=utf8 --complete-insert --extended-insert --quick --opt --single-transaction"

#RESTORE SCRIPT SETUP
#No need to edit this section:
	RESTORESCRIPT="$BACKUP_DIR/__restoreData.sql"
	DATE=`/bin/date '+%y%m%d_%H%M%S'`


#---------------------------- BEGINNING OF SCRIPT - make no changes after this point ------------------------------------------

date2stamp () {
    date --utc --date "$1" +%s
}

stamp2date (){
    date --utc --date "1970-01-01 $1 sec" "+%Y-%m-%d %T"
}

dateDiff (){
    case $1 in
        -s)   sec=1;      shift;;
        -m)   sec=60;     shift;;
        -h)   sec=3600;   shift;;
        -d)   sec=86400;  shift;;
        *)    sec=86400;;
    esac
    dte1=$(date2stamp $1)
    dte2=$(date2stamp $2)
    diffSec=$((dte2-dte1))
    if ((diffSec < 0)); then abs=-1; else abs=1; fi
    echo $((diffSec/sec*abs))
}


#### Remove any files existing from a previous run of the dump tables script ####
if [ $CLEANUP == "yes" ]
then 
	echo Removing old files if they exist...
	rm -f ${BACKUP_DIR}/*.sql > /dev/null 2>&1
	rm -f ${BACKUP_DIR}/*.gz > /dev/null 2>&1
	rm -f ${BACKUP_DIR}/*.tar > /dev/null 2>&1
	cd ${BACKUP_DIR}
fi


#### Begin dump ####

TOTBEGIN=$(date2stamp now)
for DB in $DBNAMES
do
    echo "=========================================="
    echo    Dumping Database: ${DB}
    echo "=========================================="
    echo 'SET AUTOCOMMIT=0;' > $RESTORESCRIPT
    echo 'SET UNIQUE_CHECKS=0;' >> $RESTORESCRIPT
    echo 'SET FOREIGN_KEY_CHECKS=0;' >> $RESTORESCRIPT

###Ernie Research This:innodb_autoinc_lock_mode = 2

    mysqldump --databases --no-data $HOST $USER $PASSWORD $DB > ${BACKUP_DIR}/__createDatabase.sql
    echo 'source __createDatabase.sql;' >> $RESTORESCRIPT

    for TABLE in `mysql $HOST $USER $PASSWORD $DB -e 'show tables' | egrep -v 'Tables_in_' `; do
        TABLENAME=$(echo $TABLE|awk '{ printf "%s", $0 }')

	 if [ $ZIP == "yes" ]
          then FILENAME="${TABLENAME}.sql.gz"
	   else FILENAME="${TABLENAME}.sql"
	 fi

        echo Dumping Table: $TABLENAME
	 BEGIN=$(date2stamp now)
        echo 'source' $FILENAME';' >> $RESTORESCRIPT

	 if [ $ZIP == "yes" ]
          then mysqldump $OPTIONS $HOST $USER $PASSWORD $DB $TABLENAME | gzip > ${BACKUP_DIR}/${FILENAME}
          else mysqldump $OPTIONS $HOST $USER $PASSWORD $DB $TABLENAME > ${BACKUP_DIR}/${FILENAME}
	 fi
	
	 END=$(date2stamp now)
	 DIFF=$((END-BEGIN))
	 if ((DIFF < 300)); 
	  then echo "  " $((DIFF/1)) Seconds;
	  else echo "  " $((DIFF/60)) Minutes;
	 fi
    done

###Ernie Research This:innodb_autoinc_lock_mode = 1

    echo 'SET FOREIGN_KEY_CHECKS=1;' >> $RESTORESCRIPT
    echo 'SET UNIQUE_CHECKS=1;' >> $RESTORESCRIPT
    echo 'COMMIT;' >> $RESTORESCRIPT


    if [ $TAR == "yes" ] 
    then
      echo Making tar...
      tar -cf ${DB}_${DATE}.tar *.sql  > /dev/null 2>&1

      echo Compressing...
      gzip -9 ${DB}_${DATE}.tar > /dev/null 2>&1

      echo Removing temporary files...
      rm -f ${BACKUP_DIR}/*.sql > /dev/null 2>&1
      rm -f ${BACKUP_DIR}/*.tar > /dev/null 2>&1
    fi

    echo "Completed: " $DB
done
TOTEND=$(date2stamp now)

DIFF=$((TOTEND-TOTBEGIN))
 if ((DIFF < 300)); 
  then echo "  " $((DIFF/1)) Seconds;
  else echo "  " $((DIFF/60)) Minutes;
 fi

echo "=========================================="
echo "       All Databases Complete"
echo "       To import these dump files:"
	if [ $TAR == "yes" ] 
	then
echo "         * tar -xf [tar filename]" 
	fi
	if [ $ZIP == "yes" ]
	then 
echo "         * gzip -d [*.gz]"
	fi

echo "         * Import the _restoreData.sql, which will import all the other files: mysql -uroot -p[root pwd] < _restoreData.sql"
echo "=========================================="

Personal tools
Namespaces

Variants
Actions
Home
Exchange
GroupWise
JAVA
Linux
MTK
Retain
GW Monitoring and Reporting (Redline)
GW Disaster Recovery (Reload)
GW Forensics (Reveal)
GWAVA
Secure Messaging Gateway
GW Mailbox Management (Vertigo)
Windows
Other
User Experience
Toolbox
Languages
Toolbox