Adding Incremental Backups
* Adding a config flag to register incremental runners * Adding InnoBackupExIncremental runner and restore strategy. * Adding save_metadata and load_metadata methods for swift storage strategy. * Adding incremental backup/restore integration test. Implements: blueprint incremental-backups Change-Id: I1f0d4e5967097498f86a5052c33c55471e24a137
This commit is contained in:
parent
119f3d6cd4
commit
b5fd5493e3
@ -45,7 +45,7 @@ class BackupState(object):
|
||||
class Backup(object):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, instance, name, description=None):
|
||||
def create(cls, context, instance, name, description=None, parent_id=None):
|
||||
"""
|
||||
create db record for Backup
|
||||
:param cls:
|
||||
@ -67,12 +67,22 @@ class Backup(object):
|
||||
|
||||
cls.verify_swift_auth_token(context)
|
||||
|
||||
parent = None
|
||||
if parent_id:
|
||||
# Look up the parent info or fail early if not found or if
|
||||
# the user does not have access to the parent.
|
||||
_parent = cls.get_by_id(context, parent_id)
|
||||
parent = {
|
||||
'location': _parent.location,
|
||||
'checksum': _parent.checksum,
|
||||
}
|
||||
try:
|
||||
db_info = DBBackup.create(name=name,
|
||||
description=description,
|
||||
tenant_id=context.tenant,
|
||||
state=BackupState.NEW,
|
||||
instance_id=instance_id,
|
||||
parent_id=parent_id,
|
||||
deleted=False)
|
||||
except exception.InvalidModelError as ex:
|
||||
LOG.exception("Unable to create Backup record:")
|
||||
@ -84,6 +94,7 @@ class Backup(object):
|
||||
'instance_id': instance_id,
|
||||
'backup_type': db_info.backup_type,
|
||||
'checksum': db_info.checksum,
|
||||
'parent': parent,
|
||||
}
|
||||
api.API(context).create_backup(backup_info, instance_id)
|
||||
return db_info
|
||||
@ -191,6 +202,12 @@ class Backup(object):
|
||||
:return:
|
||||
"""
|
||||
|
||||
# Recursively delete all children and grandchildren of this backup.
|
||||
query = DBBackup.query()
|
||||
query = query.filter_by(parent_id=backup_id, deleted=False)
|
||||
for child in query.all():
|
||||
cls.delete(context, child.id)
|
||||
|
||||
def _delete_resources():
|
||||
backup = cls.get_by_id(context, backup_id)
|
||||
if backup.is_running:
|
||||
@ -222,7 +239,7 @@ class DBBackup(DatabaseModelBase):
|
||||
_data_fields = ['id', 'name', 'description', 'location', 'backup_type',
|
||||
'size', 'tenant_id', 'state', 'instance_id',
|
||||
'checksum', 'backup_timestamp', 'deleted', 'created',
|
||||
'updated', 'deleted_at']
|
||||
'updated', 'deleted_at', 'parent_id']
|
||||
preserve_on_delete = True
|
||||
|
||||
@property
|
||||
|
@ -61,7 +61,8 @@ class BackupController(wsgi.Controller):
|
||||
instance = data['instance']
|
||||
name = data['name']
|
||||
desc = data.get('description')
|
||||
backup = Backup.create(context, instance, name, desc)
|
||||
parent = data.get('parent_id')
|
||||
backup = Backup.create(context, instance, name, desc, parent_id=parent)
|
||||
return wsgi.Result(views.BackupView(backup).data(), 202)
|
||||
|
||||
def delete(self, req, tenant_id, id):
|
||||
|
@ -31,7 +31,8 @@ class BackupView(object):
|
||||
"created": self.backup.created,
|
||||
"updated": self.backup.updated,
|
||||
"size": self.backup.size,
|
||||
"status": self.backup.state
|
||||
"status": self.backup.state,
|
||||
"parent_id": self.backup.parent_id,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,7 +354,8 @@ backup = {
|
||||
"properties": {
|
||||
"description": non_empty_string,
|
||||
"instance": uuid,
|
||||
"name": non_empty_string
|
||||
"name": non_empty_string,
|
||||
"parent_id": uuid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -172,6 +172,11 @@ common_opts = [
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.mysql_impl',
|
||||
help='Namespace to load restore strategies from.'),
|
||||
cfg.DictOpt('backup_incremental_strategy',
|
||||
default={'InnoBackupEx': 'InnoBackupExIncremental'},
|
||||
help='Incremental Backup Runner Based off of the default'
|
||||
' strategy. For strategies that do not implement an'
|
||||
' incremental the runner will use the default full backup.'),
|
||||
cfg.BoolOpt('verify_swift_checksum_on_restore', default=True,
|
||||
help='Enable verification of swift checksum before starting '
|
||||
'restore; makes sure the checksum of original backup matches '
|
||||
|
@ -0,0 +1,37 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy.schema import Column
|
||||
from sqlalchemy.schema import MetaData
|
||||
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import String
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Table
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
# add column:
|
||||
backups = Table('backups', meta, autoload=True)
|
||||
backups.create_column(Column('parent_id', String(36), nullable=True))
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
# drop column:
|
||||
backups = Table('backups', meta, autoload=True)
|
||||
backups.drop_column('parent_id')
|
@ -28,6 +28,7 @@ from trove.guestagent.strategies.backup.base import UnknownBackupType
|
||||
from trove.guestagent.strategies.storage import get_storage_strategy
|
||||
from trove.guestagent.strategies.backup import get_backup_strategy
|
||||
from trove.guestagent.strategies.restore import get_restore_strategy
|
||||
from trove.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
@ -35,6 +36,10 @@ CONF = cfg.CONF
|
||||
RUNNER = get_backup_strategy(CONF.backup_strategy,
|
||||
CONF.backup_namespace)
|
||||
EXTRA_OPTS = CONF.backup_runner_options.get(CONF.backup_strategy, '')
|
||||
# Try to get the incremental strategy or return the default 'backup_strategy'
|
||||
INCREMENTAL = CONF.backup_incremental_strategy.get(CONF.backup_strategy,
|
||||
CONF.backup_strategy)
|
||||
INCREMENTAL_RUNNER = get_backup_strategy(INCREMENTAL, CONF.backup_namespace)
|
||||
|
||||
|
||||
class BackupAgent(object):
|
||||
@ -55,13 +60,28 @@ class BackupAgent(object):
|
||||
auth_token=CONF.nova_proxy_admin_pass)
|
||||
conductor = conductor_api.API(ctxt)
|
||||
|
||||
LOG.info("Running backup %s", backup_id)
|
||||
LOG.info(_("Running backup %(id)s") % backup_info)
|
||||
user = ADMIN_USER_NAME
|
||||
password = get_auth_password()
|
||||
storage = get_storage_strategy(
|
||||
CONF.storage_strategy,
|
||||
CONF.storage_namespace)(context)
|
||||
|
||||
# Check if this is an incremental backup and grab the parent metadata
|
||||
parent_metadata = {}
|
||||
if backup_info.get('parent'):
|
||||
runner = INCREMENTAL_RUNNER
|
||||
LOG.info(_("Using incremental runner: %s") % runner.__name__)
|
||||
parent = backup_info['parent']
|
||||
parent_metadata = storage.load_metadata(parent['location'],
|
||||
parent['checksum'])
|
||||
# The parent could be another incremental backup so we need to
|
||||
# reset the location and checksum to *this* parents info
|
||||
parent_metadata.update({
|
||||
'parent_location': parent['location'],
|
||||
'parent_checksum': parent['checksum']
|
||||
})
|
||||
|
||||
# Store the size of the filesystem before the backup.
|
||||
stats = get_filesystem_volume_stats(CONF.mount_point)
|
||||
backup = {
|
||||
@ -75,32 +95,37 @@ class BackupAgent(object):
|
||||
|
||||
try:
|
||||
with runner(filename=backup_id, extra_opts=extra_opts,
|
||||
user=user, password=password) as bkup:
|
||||
user=user, password=password,
|
||||
**parent_metadata) as bkup:
|
||||
try:
|
||||
LOG.info("Starting Backup %s", backup_id)
|
||||
LOG.info(_("Starting Backup %s"), backup_id)
|
||||
success, note, checksum, location = storage.save(
|
||||
bkup.manifest,
|
||||
bkup)
|
||||
|
||||
LOG.info("Backup %s completed status: %s", backup_id,
|
||||
success)
|
||||
LOG.info('Backup %s file swift checksum: %s',
|
||||
backup_id, checksum)
|
||||
LOG.info('Backup %s location: %s', backup_id,
|
||||
location)
|
||||
|
||||
backup.update({
|
||||
'checksum': checksum,
|
||||
'location': location,
|
||||
'note': note,
|
||||
'success': success,
|
||||
'backup_type': bkup.backup_type,
|
||||
})
|
||||
|
||||
LOG.info(_("Backup %(backup_id)s completed status: "
|
||||
"%(success)s") % backup)
|
||||
LOG.info(_("Backup %(backup_id)s file swift checksum: "
|
||||
"%(checksum)s") % backup)
|
||||
LOG.info(_("Backup %(backup_id)s location: "
|
||||
"%(location)s") % backup)
|
||||
|
||||
if not success:
|
||||
raise BackupError(note)
|
||||
|
||||
storage.save_metadata(location, bkup.metadata())
|
||||
|
||||
except Exception:
|
||||
LOG.exception("Error saving %s Backup", backup_id)
|
||||
LOG.exception(_("Error saving %(backup_id)s Backup") %
|
||||
backup)
|
||||
backup.update({'state': BackupState.FAILED})
|
||||
conductor.update_backup(CONF.guest_id,
|
||||
sent=timeutils.float_utcnow(),
|
||||
@ -108,14 +133,14 @@ class BackupAgent(object):
|
||||
raise
|
||||
|
||||
except Exception:
|
||||
LOG.exception("Error running backup: %s", backup_id)
|
||||
LOG.exception(_("Error running backup: %(backup_id)s") % backup)
|
||||
backup.update({'state': BackupState.FAILED})
|
||||
conductor.update_backup(CONF.guest_id,
|
||||
sent=timeutils.float_utcnow(),
|
||||
**backup)
|
||||
raise
|
||||
else:
|
||||
LOG.info("Saving %s Backup Info to model", backup_id)
|
||||
LOG.info(_("Saving %(backup_id)s Backup Info to model") % backup)
|
||||
backup.update({'state': BackupState.COMPLETED})
|
||||
conductor.update_backup(CONF.guest_id,
|
||||
sent=timeutils.float_utcnow(),
|
||||
@ -124,7 +149,7 @@ class BackupAgent(object):
|
||||
def execute_restore(self, context, backup_info, restore_location):
|
||||
|
||||
try:
|
||||
LOG.debug("Getting Restore Runner of type %s", backup_info['type'])
|
||||
LOG.debug(_("Getting Restore Runner %(type)s"), backup_info)
|
||||
restore_runner = self._get_restore_runner(backup_info['type'])
|
||||
|
||||
LOG.debug("Getting Storage Strategy")
|
||||
@ -135,17 +160,18 @@ class BackupAgent(object):
|
||||
with restore_runner(storage, location=backup_info['location'],
|
||||
checksum=backup_info['checksum'],
|
||||
restore_location=restore_location) as runner:
|
||||
LOG.debug("Restoring instance from backup %s to %s",
|
||||
backup_info['id'], restore_location)
|
||||
backup_info['restore_location'] = restore_location
|
||||
LOG.debug(_("Restoring instance from backup %(id)s to "
|
||||
"%(restore_location)s") % backup_info)
|
||||
content_size = runner.restore()
|
||||
LOG.info("Restore from backup %s completed successfully to %s",
|
||||
backup_info['id'], restore_location)
|
||||
LOG.info("Restore size: %s", content_size)
|
||||
LOG.info(_("Restore from backup %(id)s completed successfully "
|
||||
"to %(restore_location)s") % backup_info)
|
||||
LOG.info(_("Restore size: %s") % content_size)
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
LOG.error("Error restoring backup %s", backup_info['id'])
|
||||
LOG.error(_("Error restoring backup %(id)s") % backup_info)
|
||||
raise
|
||||
|
||||
else:
|
||||
LOG.info("Restored Backup %s", backup_info['id'])
|
||||
LOG.info(_("Restored Backup %(id)s") % backup_info)
|
||||
|
@ -91,6 +91,10 @@ class BackupRunner(Strategy):
|
||||
|
||||
return True
|
||||
|
||||
def metadata(self):
|
||||
"""Hook for subclasses to store metadata from the backup."""
|
||||
return {}
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
"""Subclasses may overwrite this to declare a format (.tar)"""
|
||||
|
@ -67,6 +67,48 @@ class InnoBackupEx(base.BackupRunner):
|
||||
|
||||
return True
|
||||
|
||||
def metadata(self):
|
||||
LOG.debug('Getting metadata from backup')
|
||||
meta = {}
|
||||
lsn = re.compile("The latest check point \(for incremental\): '(\d+)'")
|
||||
with open('/tmp/innobackupex.log', 'r') as backup_log:
|
||||
output = backup_log.read()
|
||||
match = lsn.search(output)
|
||||
if match:
|
||||
meta = {'lsn': match.group(1)}
|
||||
LOG.info("Metadata for backup: %s", str(meta))
|
||||
return meta
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
return '%s.xbstream' % self.base_filename
|
||||
|
||||
|
||||
class InnoBackupExIncremental(InnoBackupEx):
|
||||
"""InnoBackupEx incremental backup."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if not kwargs.get('lsn'):
|
||||
raise AttributeError('lsn attribute missing, bad parent?')
|
||||
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
|
||||
self.parent_location = kwargs.get('parent_location')
|
||||
self.parent_checksum = kwargs.get('parent_checksum')
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
cmd = ('sudo innobackupex'
|
||||
' --stream=xbstream'
|
||||
' --incremental'
|
||||
' --incremental-lsn=%(lsn)s'
|
||||
' %(extra_opts)s'
|
||||
' /var/lib/mysql'
|
||||
' 2>/tmp/innobackupex.log')
|
||||
return cmd + self.zip_cmd + self.encrypt_cmd
|
||||
|
||||
def metadata(self):
|
||||
_meta = super(InnoBackupExIncremental, self).metadata()
|
||||
_meta.update({
|
||||
'parent_location': self.parent_location,
|
||||
'parent_checksum': self.parent_checksum,
|
||||
})
|
||||
return _meta
|
||||
|
@ -18,6 +18,7 @@ from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _ # noqa
|
||||
from eventlet.green import subprocess
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -104,8 +105,11 @@ class RestoreRunner(Strategy):
|
||||
return content_length
|
||||
|
||||
def _run_restore(self):
|
||||
stream = self.storage.load(self.location, self.checksum)
|
||||
self.process = subprocess.Popen(self.restore_cmd, shell=True,
|
||||
return self._unpack(self.location, self.checksum, self.restore_cmd)
|
||||
|
||||
def _unpack(self, location, checksum, command):
|
||||
stream = self.storage.load(location, checksum)
|
||||
self.process = subprocess.Popen(command, shell=True,
|
||||
stdin=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
self.pid = self.process.pid
|
||||
@ -114,7 +118,7 @@ class RestoreRunner(Strategy):
|
||||
self.process.stdin.write(chunk)
|
||||
content_length += len(chunk)
|
||||
self.process.stdin.close()
|
||||
LOG.info("Restored %s bytes from stream." % content_length)
|
||||
LOG.info(_("Restored %s bytes from stream.") % content_length)
|
||||
|
||||
return content_length
|
||||
|
||||
|
@ -24,6 +24,7 @@ from trove.openstack.common import log as logging
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
import trove.guestagent.datastore.mysql.service as dbaas
|
||||
from trove.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -37,18 +38,18 @@ class MySQLRestoreMixin(object):
|
||||
|
||||
def mysql_is_running(self):
|
||||
if base.exec_with_root_helper("/usr/bin/mysqladmin", "ping"):
|
||||
LOG.info("The mysqld daemon is up and running.")
|
||||
LOG.info(_("The mysqld daemon is up and running."))
|
||||
return True
|
||||
else:
|
||||
LOG.info("The mysqld daemon is not running.")
|
||||
LOG.info(_("The mysqld daemon is not running."))
|
||||
return False
|
||||
|
||||
def mysql_is_not_running(self):
|
||||
if base.exec_with_root_helper("/usr/bin/pgrep", "mysqld"):
|
||||
LOG.info("The mysqld daemon is still running.")
|
||||
LOG.info(_("The mysqld daemon is still running."))
|
||||
return False
|
||||
else:
|
||||
LOG.info("The mysqld daemon is not running.")
|
||||
LOG.info(_("The mysqld daemon is not running."))
|
||||
return True
|
||||
|
||||
def poll_until_then_raise(self, event, exc):
|
||||
@ -65,9 +66,9 @@ class MySQLRestoreMixin(object):
|
||||
try:
|
||||
i = child.expect(['Starting mysqld daemon'])
|
||||
if i == 0:
|
||||
LOG.info("Starting mysqld daemon")
|
||||
except pexpect.TIMEOUT as e:
|
||||
LOG.error("wait_and_close_proc failed: %s" % e)
|
||||
LOG.info(_("Starting mysqld daemon"))
|
||||
except pexpect.TIMEOUT:
|
||||
LOG.exception(_("wait_and_close_proc failed"))
|
||||
finally:
|
||||
# There is a race condition here where we kill mysqld before
|
||||
# the init file been executed. We need to ensure mysqld is up.
|
||||
@ -75,8 +76,8 @@ class MySQLRestoreMixin(object):
|
||||
self.mysql_is_running,
|
||||
base.RestoreError("Reset root password failed: "
|
||||
"mysqld did not start!"))
|
||||
LOG.info("Root password reset successfully!")
|
||||
LOG.info("Cleaning up the temp mysqld process...")
|
||||
LOG.info(_("Root password reset successfully!"))
|
||||
LOG.info(_("Cleaning up the temp mysqld process..."))
|
||||
child.delayafterclose = 1
|
||||
child.delayafterterminate = 1
|
||||
child.close(force=True)
|
||||
@ -117,20 +118,24 @@ class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
|
||||
def pre_restore(self):
|
||||
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
|
||||
app.stop_db()
|
||||
LOG.info("Cleaning out restore location: %s", self.restore_location)
|
||||
utils.execute_with_timeout("sudo", "chmod", "-R",
|
||||
"0777", self.restore_location)
|
||||
LOG.info(_("Cleaning out restore location: %s"), self.restore_location)
|
||||
utils.execute_with_timeout("chmod", "-R", "0777",
|
||||
self.restore_location,
|
||||
root_helper="sudo",
|
||||
run_as_root=True)
|
||||
utils.clean_out(self.restore_location)
|
||||
|
||||
def _run_prepare(self):
|
||||
LOG.info("Running innobackupex prepare: %s", self.prepare_cmd)
|
||||
LOG.info(_("Running innobackupex prepare: %s"), self.prepare_cmd)
|
||||
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
|
||||
LOG.info("Innobackupex prepare finished successfully")
|
||||
LOG.info(_("Innobackupex prepare finished successfully"))
|
||||
|
||||
def post_restore(self):
|
||||
self._run_prepare()
|
||||
utils.execute_with_timeout("sudo", "chown", "-R", "-f",
|
||||
"mysql", self.restore_location)
|
||||
utils.execute_with_timeout("chown", "-R", "-f", "mysql",
|
||||
self.restore_location,
|
||||
root_helper="sudo",
|
||||
run_as_root=True)
|
||||
self._delete_old_binlogs()
|
||||
self.reset_root_password()
|
||||
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
|
||||
@ -140,3 +145,91 @@ class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
|
||||
files = glob.glob(os.path.join(self.restore_location, "ib_logfile*"))
|
||||
for f in files:
|
||||
os.unlink(f)
|
||||
|
||||
|
||||
class InnoBackupExIncremental(InnoBackupEx):
|
||||
__strategy_name__ = 'innobackupexincremental'
|
||||
incremental_prep = ('sudo innobackupex'
|
||||
' --apply-log'
|
||||
' --redo-only'
|
||||
' %(restore_location)s'
|
||||
' --defaults-file=%(restore_location)s/backup-my.cnf'
|
||||
' --ibbackup xtrabackup'
|
||||
' %(incremental_args)s'
|
||||
' 2>/tmp/innoprepare.log')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
|
||||
self.restore_location = kwargs.get('restore_location')
|
||||
self.content_length = 0
|
||||
|
||||
def _incremental_restore_cmd(self, incremental_dir):
|
||||
"""Return a command for a restore with a incremental location."""
|
||||
args = {'restore_location': incremental_dir}
|
||||
return (self.decrypt_cmd +
|
||||
self.unzip_cmd +
|
||||
(self.base_restore_cmd % args))
|
||||
|
||||
def _incremental_prepare_cmd(self, incremental_dir):
|
||||
if incremental_dir is not None:
|
||||
incremental_arg = '--incremental-dir=%s' % incremental_dir
|
||||
else:
|
||||
incremental_arg = ''
|
||||
|
||||
args = {
|
||||
'restore_location': self.restore_location,
|
||||
'incremental_args': incremental_arg,
|
||||
}
|
||||
|
||||
return self.incremental_prep % args
|
||||
|
||||
def _incremental_prepare(self, incremental_dir):
|
||||
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
|
||||
LOG.info(_("Running innobackupex prepare: %s"), prepare_cmd)
|
||||
utils.execute(prepare_cmd, shell=True)
|
||||
LOG.info(_("Innobackupex prepare finished successfully"))
|
||||
|
||||
def _incremental_restore(self, location, checksum):
|
||||
"""Recursively apply backups from all parents.
|
||||
|
||||
If we are the parent then we restore to the restore_location and
|
||||
we apply the logs to the restore_location only.
|
||||
|
||||
Otherwise if we are an incremental we restore to a subfolder to
|
||||
prevent stomping on the full restore data. Then we run apply log
|
||||
with the '--incremental-dir' flag
|
||||
"""
|
||||
metadata = self.storage.load_metadata(location, checksum)
|
||||
incremental_dir = None
|
||||
if 'parent_location' in metadata:
|
||||
LOG.info(_("Restoring parent: %(parent_location)s"
|
||||
" checksum: %(parent_checksum)s") % metadata)
|
||||
parent_location = metadata['parent_location']
|
||||
parent_checksum = metadata['parent_checksum']
|
||||
# Restore parents recursively so backup are applied sequentially
|
||||
self._incremental_restore(parent_location, parent_checksum)
|
||||
# for *this* backup set the incremental_dir
|
||||
# just use the checksum for the incremental path as it is
|
||||
# sufficently unique /var/lib/mysql/<checksum>
|
||||
incremental_dir = os.path.join(self.restore_location, checksum)
|
||||
utils.execute("mkdir", "-p", incremental_dir,
|
||||
root_helper="sudo",
|
||||
run_as_root=True)
|
||||
command = self._incremental_restore_cmd(incremental_dir)
|
||||
else:
|
||||
# The parent (full backup) use the same command from InnobackupEx
|
||||
# super class and do not set an incremental_dir.
|
||||
command = self.restore_cmd
|
||||
|
||||
self.content_length += self._unpack(location, checksum, command)
|
||||
self._incremental_prepare(incremental_dir)
|
||||
|
||||
def _run_restore(self):
|
||||
"""Run incremental restore.
|
||||
|
||||
First grab all parents and prepare them with '--redo-only'. After
|
||||
all backups are restored the super class InnoBackupEx post_restore
|
||||
method is called to do the final prepare with '--apply-log'
|
||||
"""
|
||||
self._incremental_restore(self.location, self.checksum)
|
||||
return self.content_length
|
||||
|
@ -34,3 +34,11 @@ class Storage(Strategy):
|
||||
@abc.abstractmethod
|
||||
def load(self, location, backup_checksum):
|
||||
"""Load a stream from a persisted storage location """
|
||||
|
||||
@abc.abstractmethod
|
||||
def load_metadata(self, location, backup_checksum):
|
||||
"""Load metadata for a persisted object."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def save_metadata(self, location, metadata={}):
|
||||
"""Save metadata for a persisted object."""
|
||||
|
@ -18,6 +18,7 @@ import hashlib
|
||||
|
||||
from trove.guestagent.strategies.storage import base
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _ # noqa
|
||||
from trove.common.remote import create_swift_client
|
||||
from trove.common import cfg
|
||||
|
||||
@ -192,3 +193,44 @@ class SwiftStorage(base.Storage):
|
||||
self._verify_checksum(headers.get('etag', ''), backup_checksum)
|
||||
|
||||
return info
|
||||
|
||||
def _get_attr(self, original):
|
||||
"""Get a friendly name from an object header key"""
|
||||
key = original.replace('-', '_')
|
||||
key = key.replace('x_object_meta_', '')
|
||||
return key
|
||||
|
||||
def _set_attr(self, original):
|
||||
"""Return a swift friendly header key"""
|
||||
key = original.replace('_', '-')
|
||||
return 'X-Object-Meta-%s' % key
|
||||
|
||||
def load_metadata(self, location, backup_checksum):
|
||||
"""Load metadata from swift."""
|
||||
|
||||
storage_url, container, filename = self._explodeLocation(location)
|
||||
|
||||
headers = self.connection.head_object(container, filename)
|
||||
|
||||
if CONF.verify_swift_checksum_on_restore:
|
||||
self._verify_checksum(headers.get('etag', ''), backup_checksum)
|
||||
|
||||
_meta = {}
|
||||
for key, value in headers.iteritems():
|
||||
if key.startswith('x-object-meta'):
|
||||
_meta[self._get_attr(key)] = value
|
||||
|
||||
return _meta
|
||||
|
||||
def save_metadata(self, location, metadata={}):
|
||||
"""Save metadata to a swift object."""
|
||||
|
||||
storage_url, container, filename = self._explodeLocation(location)
|
||||
|
||||
_headers = self.connection.head_object(container, filename)
|
||||
headers = {'X-Object-Manifest': _headers.get('x-object-manifest')}
|
||||
for key, value in metadata.iteritems():
|
||||
headers[self._set_attr(key)] = value
|
||||
|
||||
LOG.info(_("Writing metadata: %s"), str(headers))
|
||||
self.connection.post_object(container, filename, headers=headers)
|
||||
|
@ -12,14 +12,16 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import uuid
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_not_equal
|
||||
from proboscis.asserts import assert_raises
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis import test
|
||||
from proboscis import SkipTest
|
||||
from proboscis.decorators import time_out
|
||||
import troveclient.compat
|
||||
from trove.common.utils import poll_until
|
||||
from trove.common.utils import generate_uuid
|
||||
from trove.tests.util import create_dbaas_client
|
||||
from trove.tests.util.users import Requirements
|
||||
from trove.tests.config import CONFIG
|
||||
@ -36,6 +38,8 @@ BACKUP_DESC = 'test description'
|
||||
|
||||
|
||||
backup_info = None
|
||||
incremental_info = None
|
||||
incremental_db = generate_uuid()
|
||||
restore_instance_id = None
|
||||
backup_count_prior_to_create = 0
|
||||
backup_count_for_instance_prior_to_create = 0
|
||||
@ -69,7 +73,7 @@ class CreateBackups(object):
|
||||
def test_backup_create_instance_not_found(self):
|
||||
"""test create backup with unknown instance"""
|
||||
assert_raises(exceptions.NotFound, instance_info.dbaas.backups.create,
|
||||
BACKUP_NAME, str(uuid.uuid4()), BACKUP_DESC)
|
||||
BACKUP_NAME, generate_uuid(), BACKUP_DESC)
|
||||
|
||||
@test
|
||||
def test_backup_create_instance(self):
|
||||
@ -84,14 +88,14 @@ class CreateBackups(object):
|
||||
result = instance_info.dbaas.backups.create(BACKUP_NAME,
|
||||
instance_info.id,
|
||||
BACKUP_DESC)
|
||||
global backup_info
|
||||
backup_info = result
|
||||
assert_equal(BACKUP_NAME, result.name)
|
||||
assert_equal(BACKUP_DESC, result.description)
|
||||
assert_equal(instance_info.id, result.instance_id)
|
||||
assert_equal('NEW', result.status)
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
assert_equal('BACKUP', instance.status)
|
||||
global backup_info
|
||||
backup_info = result
|
||||
|
||||
|
||||
@test(runs_after=[CreateBackups],
|
||||
@ -193,17 +197,49 @@ class ListBackups(object):
|
||||
|
||||
|
||||
@test(runs_after=[ListBackups],
|
||||
depends_on=[WaitForBackupCreateToFinish],
|
||||
groups=[GROUP, tests.INSTANCES])
|
||||
class IncrementalBackups(object):
|
||||
|
||||
@test
|
||||
def test_create_db(self):
|
||||
databases = [{'name': incremental_db}]
|
||||
instance_info.dbaas.databases.create(instance_info.id, databases)
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
|
||||
@test(runs_after=['test_create_db'])
|
||||
def test_create_incremental_backup(self):
|
||||
result = instance_info.dbaas.backups.create("incremental-backup",
|
||||
backup_info.instance_id,
|
||||
parent_id=backup_info.id)
|
||||
global incremental_info
|
||||
incremental_info = result
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
|
||||
# Wait for the backup to finish
|
||||
def result_is_active():
|
||||
backup = instance_info.dbaas.backups.get(incremental_info.id)
|
||||
if backup.status == "COMPLETED":
|
||||
return True
|
||||
else:
|
||||
assert_not_equal("FAILED", backup.status)
|
||||
return False
|
||||
|
||||
poll_until(result_is_active, time_out=60 * 30)
|
||||
assert_equal(backup_info.id, incremental_info.parent_id)
|
||||
|
||||
|
||||
@test(runs_after=[IncrementalBackups],
|
||||
groups=[GROUP, tests.INSTANCES])
|
||||
class RestoreUsingBackup(object):
|
||||
|
||||
@test
|
||||
def test_restore(self):
|
||||
"""test restore"""
|
||||
_flavor, flavor_href = instance_info.find_default_flavor()
|
||||
restorePoint = {"backupRef": backup_info.id}
|
||||
restorePoint = {"backupRef": incremental_info.id}
|
||||
result = instance_info.dbaas.instances.create(
|
||||
instance_info.name + "_restore",
|
||||
flavor_href,
|
||||
instance_info.dbaas_flavor_href,
|
||||
instance_info.volume,
|
||||
restorePoint=restorePoint)
|
||||
assert_equal(200, instance_info.dbaas.last_http_code)
|
||||
@ -221,7 +257,6 @@ class WaitForRestoreToFinish(object):
|
||||
"""
|
||||
|
||||
@test
|
||||
@time_out(60 * 32)
|
||||
def test_instance_restored(self):
|
||||
# This version just checks the REST API status.
|
||||
def result_is_active():
|
||||
@ -236,12 +271,26 @@ class WaitForRestoreToFinish(object):
|
||||
assert_equal(instance.volume.get('used', None), None)
|
||||
return False
|
||||
|
||||
poll_until(result_is_active)
|
||||
poll_until(result_is_active, time_out=60 * 32, sleep_time=10)
|
||||
|
||||
|
||||
@test(runs_after=[WaitForRestoreToFinish],
|
||||
@test(depends_on_classes=[RestoreUsingBackup, WaitForRestoreToFinish],
|
||||
runs_after=[WaitForRestoreToFinish],
|
||||
enabled=(not CONFIG.fake_mode),
|
||||
groups=[GROUP, tests.INSTANCES])
|
||||
class DeleteBackups(object):
|
||||
class VerifyRestore(object):
|
||||
|
||||
@test
|
||||
def test_database_restored(self):
|
||||
databases = instance_info.dbaas.databases.list(restore_instance_id)
|
||||
dbs = [d.name for d in databases]
|
||||
assert_true(incremental_db in dbs,
|
||||
"%s not found on restored instance" % incremental_db)
|
||||
|
||||
|
||||
@test(runs_after=[VerifyRestore],
|
||||
groups=[GROUP, tests.INSTANCES])
|
||||
class DeleteRestoreInstance(object):
|
||||
|
||||
@test
|
||||
def test_delete_restored_instance(self):
|
||||
@ -256,10 +305,15 @@ class DeleteBackups(object):
|
||||
except exceptions.NotFound:
|
||||
return True
|
||||
|
||||
poll_until(instance_is_gone)
|
||||
poll_until(instance_is_gone, time_out=120)
|
||||
assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
|
||||
restore_instance_id)
|
||||
|
||||
|
||||
@test(runs_after=[DeleteRestoreInstance],
|
||||
groups=[GROUP, tests.INSTANCES])
|
||||
class DeleteBackups(object):
|
||||
|
||||
@test
|
||||
def test_backup_delete_not_found(self):
|
||||
"""test delete unknown backup"""
|
||||
@ -267,10 +321,8 @@ class DeleteBackups(object):
|
||||
'nonexistent_backup')
|
||||
|
||||
@test
|
||||
@time_out(60 * 2)
|
||||
def test_backup_delete(self):
|
||||
"""test delete"""
|
||||
|
||||
def test_backup_delete_other(self):
|
||||
"""Test another user cannot delete backup"""
|
||||
# Test to make sure that user in other tenant is not able
|
||||
# to DELETE this backup
|
||||
reqs = Requirements(is_admin=False)
|
||||
@ -281,15 +333,25 @@ class DeleteBackups(object):
|
||||
assert_raises(exceptions.NotFound, other_client.backups.delete,
|
||||
backup_info.id)
|
||||
|
||||
@test(runs_after=[test_backup_delete_other])
|
||||
def test_backup_delete(self):
|
||||
"""test backup deletion"""
|
||||
instance_info.dbaas.backups.delete(backup_info.id)
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
|
||||
def backup_is_gone():
|
||||
result = instance_info.dbaas.instances.backups(instance_info.id)
|
||||
if len(result) == 0:
|
||||
return True
|
||||
else:
|
||||
try:
|
||||
instance_info.dbaas.backups.get(backup_info.id)
|
||||
return False
|
||||
poll_until(backup_is_gone)
|
||||
except exceptions.NotFound:
|
||||
return True
|
||||
|
||||
poll_until(backup_is_gone, time_out=120)
|
||||
|
||||
@test(runs_after=[test_backup_delete])
|
||||
def test_incremental_deleted(self):
|
||||
"""test backup children are deleted"""
|
||||
if incremental_info is None:
|
||||
raise SkipTest("Incremental Backup not created")
|
||||
assert_raises(exceptions.NotFound, instance_info.dbaas.backups.get,
|
||||
backup_info.id)
|
||||
incremental_info.id)
|
||||
|
@ -28,6 +28,8 @@ from hashlib import md5
|
||||
|
||||
from swiftclient import client as swift
|
||||
|
||||
from trove.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -38,7 +40,7 @@ class FakeSwiftClient(object):
|
||||
|
||||
@classmethod
|
||||
def Connection(self, *args, **kargs):
|
||||
LOG.debug("fake FakeSwiftClient Connection")
|
||||
LOG.debug(_("fake FakeSwiftClient Connection"))
|
||||
return FakeSwiftConnection()
|
||||
|
||||
|
||||
@ -74,7 +76,7 @@ class FakeSwiftConnection(object):
|
||||
'x-account-object-count': '0'}, [])
|
||||
|
||||
def head_container(self, container):
|
||||
LOG.debug("fake head_container(%s)" % container)
|
||||
LOG.debug(_("fake head_container(%s)") % container)
|
||||
if container == 'missing_container':
|
||||
raise swift.ClientException('fake exception',
|
||||
http_status=httplib.NOT_FOUND)
|
||||
@ -86,11 +88,11 @@ class FakeSwiftConnection(object):
|
||||
pass
|
||||
|
||||
def put_container(self, container):
|
||||
LOG.debug("fake put_container(%s)" % container)
|
||||
LOG.debug(_("fake put_container(%s)") % container)
|
||||
pass
|
||||
|
||||
def get_container(self, container, **kwargs):
|
||||
LOG.debug("fake get_container(%s)" % container)
|
||||
LOG.debug(_("fake get_container(%s)") % container)
|
||||
fake_header = None
|
||||
fake_body = [{'name': 'backup_001'},
|
||||
{'name': 'backup_002'},
|
||||
@ -98,7 +100,8 @@ class FakeSwiftConnection(object):
|
||||
return fake_header, fake_body
|
||||
|
||||
def head_object(self, container, name):
|
||||
LOG.debug("fake put_container(%s, %s)" % (container, name))
|
||||
LOG.debug(_("fake put_container(%(container)s, %(name)s)") %
|
||||
{'container': container, 'name': name})
|
||||
checksum = md5()
|
||||
if self.manifest_prefix and self.manifest_name == name:
|
||||
for object_name in sorted(self.container_objects.iterkeys()):
|
||||
@ -119,7 +122,8 @@ class FakeSwiftConnection(object):
|
||||
return {'etag': '"%s"' % checksum.hexdigest()}
|
||||
|
||||
def get_object(self, container, name, resp_chunk_size=None):
|
||||
LOG.debug("fake get_object(%s, %s)" % (container, name))
|
||||
LOG.debug(_("fake get_object(%(container)s, %(name)s)") %
|
||||
{'container': container, 'name': name})
|
||||
if container == 'socket_error_on_get':
|
||||
raise socket.error(111, 'ECONNREFUSED')
|
||||
if 'metadata' in name:
|
||||
@ -156,7 +160,8 @@ class FakeSwiftConnection(object):
|
||||
return (fake_header, fake_object_body)
|
||||
|
||||
def put_object(self, container, name, contents, **kwargs):
|
||||
LOG.debug("fake put_object(%s, %s)" % (container, name))
|
||||
LOG.debug(_("fake put_object(%(container)s, %(name)s)") %
|
||||
{'container': container, 'name': name})
|
||||
if container == 'socket_error_on_put':
|
||||
raise socket.error(111, 'ECONNREFUSED')
|
||||
headers = kwargs.get('headers', {})
|
||||
@ -188,8 +193,13 @@ class FakeSwiftConnection(object):
|
||||
return "this_is_an_intentional_bad_segment_etag"
|
||||
return object_checksum.hexdigest()
|
||||
|
||||
def post_object(self, container, name, headers={}):
|
||||
LOG.debug(_("fake post_object(%(container)s, %(name)s, %(head)s)") %
|
||||
{'container': container, 'name': name, 'head': str(headers)})
|
||||
|
||||
def delete_object(self, container, name):
|
||||
LOG.debug("fake delete_object(%s, %s)" % (container, name))
|
||||
LOG.debug(_("fake delete_object(%(container)s, %(name)s)") %
|
||||
{'container': container, 'name': name})
|
||||
if container == 'socket_error_on_delete':
|
||||
raise socket.error(111, 'ECONNREFUSED')
|
||||
pass
|
||||
|
@ -21,23 +21,47 @@ from trove.common import apischema
|
||||
|
||||
|
||||
class TestBackupController(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestBackupController, self).setUp()
|
||||
self.uuid = "d6338c9c-3cc8-4313-b98f-13cc0684cf15"
|
||||
self.invalid_uuid = "ead-edsa-e23-sdf-23"
|
||||
self.controller = BackupController()
|
||||
|
||||
def test_validate_create_complete(self):
|
||||
body = {"backup": {"instance": "d6338c9c-3cc8-4313-b98f-13cc0684cf15",
|
||||
body = {"backup": {"instance": self.uuid,
|
||||
"name": "testback-backup"}}
|
||||
controller = BackupController()
|
||||
schema = controller.get_schema('create', body)
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
||||
|
||||
def test_validate_create_invalid_uuid(self):
|
||||
invalid_uuid = "ead-edsa-e23-sdf-23"
|
||||
body = {"backup": {"instance": invalid_uuid,
|
||||
body = {"backup": {"instance": self.invalid_uuid,
|
||||
"name": "testback-backup"}}
|
||||
controller = BackupController()
|
||||
schema = controller.get_schema('create', body)
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
self.assertThat(errors[0].message,
|
||||
Equals("'%s' does not match '%s'" %
|
||||
(invalid_uuid, apischema.uuid['pattern'])))
|
||||
(self.invalid_uuid, apischema.uuid['pattern'])))
|
||||
|
||||
def test_validate_create_incremental(self):
|
||||
body = {"backup": {"instance": self.uuid,
|
||||
"name": "testback-backup",
|
||||
"parent_id": self.uuid}}
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
||||
|
||||
def test_invalid_parent_id(self):
|
||||
body = {"backup": {"instance": self.uuid,
|
||||
"name": "testback-backup",
|
||||
"parent_id": self.invalid_uuid}}
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
self.assertThat(errors[0].message,
|
||||
Equals("'%s' does not match '%s'" %
|
||||
(self.invalid_uuid, apischema.uuid['pattern'])))
|
||||
|
@ -78,11 +78,48 @@ class BackupCreateTest(testtools.TestCase):
|
||||
self.assertEqual(self.instance_id, db_record['instance_id'])
|
||||
self.assertEqual(models.BackupState.NEW, db_record['state'])
|
||||
|
||||
def test_create_incremental(self):
|
||||
instance = mock(instance_models.Instance)
|
||||
parent = mock(models.DBBackup)
|
||||
when(instance_models.BuiltInstance).load(any(), any()).thenReturn(
|
||||
instance)
|
||||
when(instance).validate_can_perform_action().thenReturn(None)
|
||||
when(models.Backup).verify_swift_auth_token(any()).thenReturn(
|
||||
None)
|
||||
when(api.API).create_backup(any()).thenReturn(None)
|
||||
when(models.Backup).get_by_id(any(), any()).thenReturn(
|
||||
parent)
|
||||
|
||||
incremental = models.Backup.create(self.context, self.instance_id,
|
||||
BACKUP_NAME, BACKUP_DESC,
|
||||
parent_id='parent_uuid')
|
||||
|
||||
self.created = True
|
||||
|
||||
db_record = models.DBBackup.find_by(id=incremental.id)
|
||||
self.assertEqual(incremental.id, db_record['id'])
|
||||
self.assertEqual(BACKUP_NAME, db_record['name'])
|
||||
self.assertEqual(BACKUP_DESC, db_record['description'])
|
||||
self.assertEqual(self.instance_id, db_record['instance_id'])
|
||||
self.assertEqual(models.BackupState.NEW, db_record['state'])
|
||||
self.assertEqual('parent_uuid', db_record['parent_id'])
|
||||
|
||||
def test_create_instance_not_found(self):
|
||||
self.assertRaises(exception.NotFound, models.Backup.create,
|
||||
self.context, self.instance_id,
|
||||
BACKUP_NAME, BACKUP_DESC)
|
||||
|
||||
def test_create_incremental_not_found(self):
|
||||
instance = mock(instance_models.Instance)
|
||||
when(instance_models.BuiltInstance).load(any(), any()).thenReturn(
|
||||
instance)
|
||||
when(instance).validate_can_perform_action().thenReturn(None)
|
||||
when(models.Backup).verify_swift_auth_token(any()).thenReturn(
|
||||
None)
|
||||
self.assertRaises(exception.NotFound, models.Backup.create,
|
||||
self.context, self.instance_id,
|
||||
BACKUP_NAME, BACKUP_DESC, parent_id='BAD')
|
||||
|
||||
def test_create_instance_not_active(self):
|
||||
instance = mock(instance_models.Instance)
|
||||
when(instance_models.BuiltInstance).load(any(), any()).thenReturn(
|
||||
|
@ -13,21 +13,18 @@
|
||||
#limitations under the License.
|
||||
|
||||
from mock import Mock
|
||||
from mockito import when, unstub, mock, any, contains
|
||||
from mockito import when, unstub, any
|
||||
from webob.exc import HTTPNotFound
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import testtools
|
||||
|
||||
from trove.common import utils
|
||||
from trove.common.context import TroveContext
|
||||
from trove.conductor import api as conductor_api
|
||||
from trove.guestagent.strategies.backup import mysql_impl
|
||||
from trove.guestagent.strategies.restore.base import RestoreRunner
|
||||
from trove.backup.models import DBBackup
|
||||
from trove.backup.models import BackupState
|
||||
from trove.db.models import DatabaseModelBase
|
||||
from trove.guestagent.backup import backupagent
|
||||
from trove.guestagent.strategies.backup.base import BackupRunner
|
||||
from trove.guestagent.strategies.backup.base import UnknownBackupType
|
||||
@ -106,6 +103,12 @@ class MockSwift(object):
|
||||
def load(self, context, storage_url, container, filename, backup_checksum):
|
||||
pass
|
||||
|
||||
def load_metadata(self, location, checksum):
|
||||
return {}
|
||||
|
||||
def save_metadata(self, location, metadata):
|
||||
pass
|
||||
|
||||
|
||||
class MockStorage(Storage):
|
||||
|
||||
@ -118,6 +121,12 @@ class MockStorage(Storage):
|
||||
def save(self, filename, stream):
|
||||
pass
|
||||
|
||||
def load_metadata(self, location, checksum):
|
||||
return {}
|
||||
|
||||
def save_metadata(self, location, metadata={}):
|
||||
pass
|
||||
|
||||
def is_enabled(self):
|
||||
return True
|
||||
|
||||
@ -305,7 +314,6 @@ class BackupAgentTest(testtools.TestCase):
|
||||
context=None, backup_info=backup_info,
|
||||
runner=MockLossyBackup)
|
||||
|
||||
#self.assertThat(backup.state, Is(BackupState.FAILED))
|
||||
self.assertTrue(
|
||||
conductor_api.API.update_backup.called_once_with(
|
||||
any(),
|
||||
@ -319,19 +327,10 @@ class BackupAgentTest(testtools.TestCase):
|
||||
transfers/downloads data and invokes the restore module
|
||||
reports status
|
||||
"""
|
||||
backup = mock(DBBackup)
|
||||
backup.location = "/backup/location/123"
|
||||
backup.backup_type = 'InnoBackupEx'
|
||||
|
||||
when(utils).execute(contains('sudo rm -rf')).thenReturn(None)
|
||||
when(utils).clean_out(any()).thenReturn(None)
|
||||
when(backupagent).get_storage_strategy(any(), any()).thenReturn(
|
||||
MockStorage)
|
||||
|
||||
when(backupagent).get_restore_strategy(
|
||||
'InnoBackupEx', any()).thenReturn(MockRestoreRunner)
|
||||
when(DatabaseModelBase).find_by(id='123').thenReturn(backup)
|
||||
when(backup).save().thenReturn(backup)
|
||||
|
||||
agent = backupagent.BackupAgent()
|
||||
|
||||
@ -343,22 +342,64 @@ class BackupAgentTest(testtools.TestCase):
|
||||
agent.execute_restore(TroveContext(), bkup_info, '/var/lib/mysql')
|
||||
|
||||
def test_restore_unknown(self):
|
||||
backup = mock(DBBackup)
|
||||
backup.location = "/backup/location/123"
|
||||
backup.backup_type = 'foo'
|
||||
when(utils).execute(contains('sudo rm -rf')).thenReturn(None)
|
||||
when(utils).clean_out(any()).thenReturn(None)
|
||||
when(DatabaseModelBase).find_by(id='123').thenReturn(backup)
|
||||
when(backupagent).get_restore_strategy(
|
||||
'foo', any()).thenRaise(ImportError)
|
||||
|
||||
agent = backupagent.BackupAgent()
|
||||
|
||||
bkup_info = {'id': '123',
|
||||
'location': backup.location,
|
||||
'type': backup.backup_type,
|
||||
'location': 'fake-location',
|
||||
'type': 'foo',
|
||||
'checksum': 'fake-checksum',
|
||||
}
|
||||
self.assertRaises(UnknownBackupType, agent.execute_restore,
|
||||
context=None, backup_info=bkup_info,
|
||||
restore_location='/var/lib/mysql')
|
||||
|
||||
def test_backup_incremental_metadata(self):
|
||||
when(backupagent).get_storage_strategy(any(), any()).thenReturn(
|
||||
MockSwift)
|
||||
MockStorage.save_metadata = Mock()
|
||||
when(MockSwift).load_metadata(any(), any()).thenReturn(
|
||||
{'lsn': '54321'})
|
||||
|
||||
meta = {
|
||||
'lsn': '12345',
|
||||
'parent_location': 'fake',
|
||||
'parent_checksum': 'md5',
|
||||
}
|
||||
when(mysql_impl.InnoBackupExIncremental).metadata().thenReturn(meta)
|
||||
when(mysql_impl.InnoBackupExIncremental).check_process().thenReturn(
|
||||
True)
|
||||
|
||||
agent = backupagent.BackupAgent()
|
||||
|
||||
bkup_info = {'id': '123',
|
||||
'location': 'fake-location',
|
||||
'type': 'InnoBackupEx',
|
||||
'checksum': 'fake-checksum',
|
||||
'parent': {'location': 'fake', 'checksum': 'md5'}
|
||||
}
|
||||
|
||||
agent.execute_backup(TroveContext(), bkup_info, '/var/lib/mysql')
|
||||
|
||||
self.assertTrue(MockStorage.save_metadata.called_once_with(
|
||||
any(),
|
||||
meta))
|
||||
|
||||
def test_backup_incremental_bad_metadata(self):
|
||||
when(backupagent).get_storage_strategy(any(), any()).thenReturn(
|
||||
MockSwift)
|
||||
|
||||
agent = backupagent.BackupAgent()
|
||||
|
||||
bkup_info = {'id': '123',
|
||||
'location': 'fake-location',
|
||||
'type': 'InnoBackupEx',
|
||||
'checksum': 'fake-checksum',
|
||||
'parent': {'location': 'fake', 'checksum': 'md5'}
|
||||
}
|
||||
|
||||
self.assertRaises(
|
||||
AttributeError,
|
||||
agent.execute_backup, TroveContext(), bkup_info, 'location')
|
||||
|
@ -13,7 +13,8 @@
|
||||
#limitations under the License.
|
||||
|
||||
import testtools
|
||||
from mockito import when, unstub
|
||||
from mock import Mock
|
||||
from mockito import when, unstub, any
|
||||
import hashlib
|
||||
|
||||
from trove.common.context import TroveContext
|
||||
@ -265,3 +266,58 @@ class StreamReaderTests(testtools.TestCase):
|
||||
results = self.stream.read(0)
|
||||
self.assertEqual('', results, "Results should be empty.")
|
||||
self.assertTrue(self.stream.end_of_file)
|
||||
|
||||
|
||||
class SwiftMetadataTests(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(SwiftMetadataTests, self).setUp()
|
||||
self.swift_client = FakeSwiftConnection()
|
||||
self.context = TroveContext()
|
||||
when(swift).create_swift_client(self.context).thenReturn(
|
||||
self.swift_client)
|
||||
self.swift = SwiftStorage(self.context)
|
||||
|
||||
def tearDown(self):
|
||||
super(SwiftMetadataTests, self).tearDown()
|
||||
unstub()
|
||||
|
||||
def test__get_attr(self):
|
||||
normal_header = self.swift._get_attr('content-type')
|
||||
self.assertEqual('content_type', normal_header)
|
||||
meta_header = self.swift._get_attr('x-object-meta-foo')
|
||||
self.assertEqual('foo', meta_header)
|
||||
meta_header_two = self.swift._get_attr('x-object-meta-foo-bar')
|
||||
self.assertEqual('foo_bar', meta_header_two)
|
||||
|
||||
def test__set_attr(self):
|
||||
meta_header = self.swift._set_attr('foo')
|
||||
self.assertEqual('X-Object-Meta-foo', meta_header)
|
||||
meta_header_two = self.swift._set_attr('foo_bar')
|
||||
self.assertEqual('X-Object-Meta-foo-bar', meta_header_two)
|
||||
|
||||
def test_load_metadata(self):
|
||||
location = 'http://mockswift.com/v1/545433/backups/mybackup.tar'
|
||||
headers = {
|
||||
'etag': '"fake-md5-sum"',
|
||||
'x-object-meta-lsn': '1234567'
|
||||
}
|
||||
when(self.swift_client).head_object(any(), any()).thenReturn(
|
||||
headers)
|
||||
|
||||
metadata = self.swift.load_metadata(location, 'fake-md5-sum')
|
||||
self.assertEqual({'lsn': '1234567'}, metadata)
|
||||
|
||||
def test_save_metadata(self):
|
||||
location = 'http://mockswift.com/v1/545433/backups/mybackup.tar'
|
||||
metadata = {'lsn': '1234567'}
|
||||
self.swift_client.post_object = Mock()
|
||||
|
||||
self.swift.save_metadata(location, metadata=metadata)
|
||||
|
||||
headers = {
|
||||
'X-Object-Meta-lsn': '1234567',
|
||||
'X-Object-Manifest': None
|
||||
}
|
||||
self.swift_client.post_object.assert_called_with(
|
||||
'backups', 'mybackup.tar', headers=headers)
|
||||
|
@ -21,6 +21,10 @@ BACKUP_XTRA_CLS = ("trove.guestagent.strategies.backup."
|
||||
"mysql_impl.InnoBackupEx")
|
||||
RESTORE_XTRA_CLS = ("trove.guestagent.strategies.restore."
|
||||
"mysql_impl.InnoBackupEx")
|
||||
BACKUP_XTRA_INCR_CLS = ("trove.guestagent.strategies.backup."
|
||||
"mysql_impl.InnoBackupExIncremental")
|
||||
RESTORE_XTRA_INCR_CLS = ("trove.guestagent.strategies.restore."
|
||||
"mysql_impl.InnoBackupExIncremental")
|
||||
BACKUP_SQLDUMP_CLS = ("trove.guestagent.strategies.backup."
|
||||
"mysql_impl.MySQLDump")
|
||||
RESTORE_SQLDUMP_CLS = ("trove.guestagent.strategies.restore."
|
||||
@ -34,13 +38,22 @@ XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s"
|
||||
" /var/lib/mysql 2>/tmp/innobackupex.log")
|
||||
XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''}
|
||||
XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'}
|
||||
XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream'
|
||||
' --incremental --incremental-lsn=%(lsn)s'
|
||||
' %(extra_opts)s /var/lib/mysql 2>/tmp/innobackupex.log')
|
||||
SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s "
|
||||
"--opt --password=password -u user"
|
||||
" 2>/tmp/mysqldump.log")
|
||||
SQLDUMP_BACKUP = SQLDUMP_BACKUP_RAW % {'extra_opts': ''}
|
||||
SQLDUMP_BACKUP_EXTRA_OPTS = (SQLDUMP_BACKUP_RAW %
|
||||
{'extra_opts': '--events --routines --triggers'})
|
||||
XTRA_RESTORE = "sudo xbstream -x -C /var/lib/mysql"
|
||||
XTRA_RESTORE_RAW = "sudo xbstream -x -C %(restore_location)s"
|
||||
XTRA_RESTORE = XTRA_RESTORE_RAW % {'restore_location': '/var/lib/mysql'}
|
||||
XTRA_INCR_PREPARE = ("sudo innobackupex --apply-log"
|
||||
" --redo-only /var/lib/mysql"
|
||||
" --defaults-file=/var/lib/mysql/backup-my.cnf"
|
||||
" --ibbackup xtrabackup %(incr)s"
|
||||
" 2>/tmp/innoprepare.log")
|
||||
SQLDUMP_RESTORE = "sudo mysql"
|
||||
PREPARE = ("sudo innobackupex --apply-log /var/lib/mysql "
|
||||
"--defaults-file=/var/lib/mysql/backup-my.cnf "
|
||||
@ -78,6 +91,47 @@ class GuestAgentBackupTest(testtools.TestCase):
|
||||
XTRA_BACKUP + PIPE + ZIP + PIPE + ENCRYPT)
|
||||
self.assertEqual(bkup.manifest, "12345.xbstream.gz.enc")
|
||||
|
||||
def test_backup_xtrabackup_incremental(self):
|
||||
backupBase.BackupRunner.is_zipped = True
|
||||
backupBase.BackupRunner.is_encrypted = False
|
||||
RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS)
|
||||
opts = {'lsn': '54321', 'extra_opts': ''}
|
||||
|
||||
expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP
|
||||
|
||||
bkup = RunnerClass(12345, user="user", password="password",
|
||||
extra_opts="", lsn="54321")
|
||||
self.assertEqual(expected, bkup.command)
|
||||
self.assertEqual("12345.xbstream.gz", bkup.manifest)
|
||||
|
||||
def test_backup_xtrabackup_incremental_with_extra_opts_command(self):
|
||||
backupBase.BackupRunner.is_zipped = True
|
||||
backupBase.BackupRunner.is_encrypted = False
|
||||
RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS)
|
||||
opts = {'lsn': '54321', 'extra_opts': '--no-lock'}
|
||||
|
||||
expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP
|
||||
|
||||
bkup = RunnerClass(12345, user="user", password="password",
|
||||
extra_opts="--no-lock", lsn="54321")
|
||||
self.assertEqual(expected, bkup.command)
|
||||
self.assertEqual("12345.xbstream.gz", bkup.manifest)
|
||||
|
||||
def test_backup_xtrabackup_incremental_encrypted(self):
|
||||
backupBase.BackupRunner.is_zipped = True
|
||||
backupBase.BackupRunner.is_encrypted = True
|
||||
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
|
||||
RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS)
|
||||
opts = {'lsn': '54321', 'extra_opts': ''}
|
||||
|
||||
expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP + PIPE + ENCRYPT
|
||||
|
||||
bkup = RunnerClass(12345, user="user", password="password",
|
||||
extra_opts="", lsn="54321")
|
||||
|
||||
self.assertEqual(expected, bkup.command)
|
||||
self.assertEqual("12345.xbstream.gz.enc", bkup.manifest)
|
||||
|
||||
def test_backup_decrypted_mysqldump_command(self):
|
||||
backupBase.BackupRunner.is_zipped = True
|
||||
backupBase.BackupRunner.is_encrypted = False
|
||||
@ -128,6 +182,52 @@ class GuestAgentBackupTest(testtools.TestCase):
|
||||
DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE)
|
||||
self.assertEqual(restr.prepare_cmd, PREPARE)
|
||||
|
||||
def test_restore_xtrabackup_incremental_prepare_command(self):
|
||||
RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS)
|
||||
restr = RunnerClass(None, restore_location="/var/lib/mysql",
|
||||
location="filename", checksum="m5d")
|
||||
# Final prepare command (same as normal xtrabackup)
|
||||
self.assertEqual(PREPARE, restr.prepare_cmd)
|
||||
# Incremental backup prepare command
|
||||
expected = XTRA_INCR_PREPARE % {'incr': '--incremental-dir=/foo/bar/'}
|
||||
observed = restr._incremental_prepare_cmd('/foo/bar/')
|
||||
self.assertEqual(expected, observed)
|
||||
# Full backup prepare command
|
||||
expected = XTRA_INCR_PREPARE % {'incr': ''}
|
||||
observed = restr._incremental_prepare_cmd(None)
|
||||
self.assertEqual(expected, observed)
|
||||
|
||||
def test_restore_decrypted_xtrabackup_incremental_command(self):
|
||||
restoreBase.RestoreRunner.is_zipped = True
|
||||
restoreBase.RestoreRunner.is_encrypted = False
|
||||
RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS)
|
||||
restr = RunnerClass(None, restore_location="/var/lib/mysql",
|
||||
location="filename", checksum="m5d")
|
||||
# Full restore command
|
||||
expected = UNZIP + PIPE + XTRA_RESTORE
|
||||
self.assertEqual(expected, restr.restore_cmd)
|
||||
# Incremental backup restore command
|
||||
opts = {'restore_location': '/foo/bar/'}
|
||||
expected = UNZIP + PIPE + (XTRA_RESTORE_RAW % opts)
|
||||
observed = restr._incremental_restore_cmd('/foo/bar/')
|
||||
self.assertEqual(expected, observed)
|
||||
|
||||
def test_restore_encrypted_xtrabackup_incremental_command(self):
|
||||
restoreBase.RestoreRunner.is_zipped = True
|
||||
restoreBase.RestoreRunner.is_encrypted = True
|
||||
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
|
||||
RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS)
|
||||
restr = RunnerClass(None, restore_location="/var/lib/mysql",
|
||||
location="filename", checksum="md5")
|
||||
# Full restore command
|
||||
expected = DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE
|
||||
self.assertEqual(expected, restr.restore_cmd)
|
||||
# Incremental backup restore command
|
||||
opts = {'restore_location': '/foo/bar/'}
|
||||
expected = DECRYPT + PIPE + UNZIP + PIPE + (XTRA_RESTORE_RAW % opts)
|
||||
observed = restr._incremental_restore_cmd('/foo/bar/')
|
||||
self.assertEqual(expected, observed)
|
||||
|
||||
def test_restore_decrypted_mysqldump_command(self):
|
||||
restoreBase.RestoreRunner.is_zipped = True
|
||||
restoreBase.RestoreRunner.is_encrypted = False
|
||||
|
Loading…
x
Reference in New Issue
Block a user