|
def create(self, context, name, description, volume_id,
container, incremental=False, availability_zone=None,
force=False, snapshot_id=None):
"""Make the RPC call to create a volume backup."""
check_policy(context, 'create') # 校验policy权限
volume = self.volume_api.get(context, volume_id)
snapshot = None
if snapshot_id:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
if volume['status'] not in ["available", "in-use"]:
msg = (_('Volume to be backed up must be available '
'or in-use, but the current status is "%s".')
% volume['status'])
raise exception.InvalidVolume(reason=msg)
elif volume['status'] in ["in-use"] and not snapshot_id and not force:
msg = _('Backing up an in-use volume must use '
'the force flag.')
raise exception.InvalidVolume(reason=msg)
elif snapshot_id and snapshot['status'] not in ["available"]:
msg = (_('Snapshot to be backed up must be available, '
'but the current status is "%s".')
% snapshot['status'])
raise exception.InvalidSnapshot(reason=msg)
previous_status = volume['status']
host = self._get_available_backup_service_host(
None, volume.availability_zone,
volume_utils.extract_host(volume.host, 'host'))
# Reserve a quota before setting volume status and backup status
try:
reserve_opts = {'backups': 1,
'backup_gigabytes': volume['size']}
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(resource_name):
return (usages[resource_name]['reserved'] +
usages[resource_name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG backup (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeBackupSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('backup_gigabytes'),
quota=quotas['backup_gigabytes'])
elif 'backups' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"backups (%(d_consumed)d backups "
"already consumed)")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.BackupLimitExceeded(
allowed=quotas[over])
# Find the latest backup and use it as the parent backup to do an
# incremental backup.
latest_backup = None
if incremental:
backups = objects.BackupList.get_all_by_volume(context.elevated(),
volume_id)
if backups.objects:
# NOTE(xyang): The 'data_timestamp' field records the time
# when the data on the volume was first saved. If it is
# a backup from volume, 'data_timestamp' will be the same
# as 'created_at' for a backup. If it is a backup from a
# snapshot, 'data_timestamp' will be the same as
# 'created_at' for a snapshot.
# If not backing up from snapshot, the backup with the latest
# 'data_timestamp' will be the parent; If backing up from
# snapshot, the backup with the latest 'data_timestamp' will
# be chosen only if 'data_timestamp' is earlier than the
# 'created_at' timestamp of the snapshot; Otherwise, the
# backup will not be chosen as the parent.
# For example, a volume has a backup taken at 8:00, then
# a snapshot taken at 8:10, and then a backup at 8:20.
# When taking an incremental backup of the snapshot, the
# parent should be the backup at 8:00, not 8:20, and the
# 'data_timestamp' of this new backup will be 8:10.
latest_backup = max(
backups.objects,
key=lambda x: x['data_timestamp']
if (not snapshot or (snapshot and x['data_timestamp']
< snapshot['created_at']))
else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
else:
msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg)
parent_id = None
if latest_backup:
parent_id = latest_backup.id
if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
msg = _('The parent backup must be available for '
'incremental backup.')
raise exception.InvalidBackup(reason=msg)
data_timestamp = None
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
data_timestamp = snapshot.created_at
self.db.volume_update(context, volume_id,
{'status': 'backing-up',
'previous_status': previous_status})
backup = None
try:
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'display_description': description,
'volume_id': volume_id,
'status': fields.BackupStatus.CREATING,
'container': container,
'parent_id': parent_id,
'size': volume['size'],
'host': host,
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp,
}
backup = objects.Backup(context=context, **kwargs)
backup.create()
if not snapshot_id:
backup.data_timestamp = backup.created_at
backup.save()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if backup and 'id' in backup:
backup.destroy()
finally:
QUOTAS.rollback(context, reservations)
# TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
# better load balancing and isolation of services
self.backup_rpcapi.create_backup(context, backup) # rpc请求通过消息队列这里不贴出来了
return backup
|
|