summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorSoren Hansen <soren@linux2go.dk>2011-07-26 14:08:29 -0700
committerSoren Hansen <soren@linux2go.dk>2011-07-26 14:08:29 -0700
commit27f2f9d7efd72cc5275a82dfdb8f9f44ba7b71b3 (patch)
tree81f1bc74d4ec1a6b955ab1185e5146aae45ee5a3 /nova/compute
parent3841a5515807b42e2e74e3119f76cdb2ef0f5575 (diff)
parent4a52d4984e9349115f37d34e47e4d1141a8cf6fc (diff)
Merge trunk
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py140
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/compute/manager.py181
3 files changed, 212 insertions, 111 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index ad8886f23..adc023a4d 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -32,6 +32,7 @@ from nova import quota
from nova import rpc
from nova import utils
from nova import volume
+from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute.utils import terminate_volumes
@@ -126,7 +127,7 @@ class API(base.Base):
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
if quota_metadata < num_metadata:
pid = context.project_id
- msg = _("Quota exceeeded for %(pid)s, tried to set "
+ msg = _("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties") % locals()
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
@@ -137,7 +138,7 @@ class API(base.Base):
for k, v in metadata.iteritems():
if len(k) > 255 or len(v) > 255:
pid = context.project_id
- msg = _("Quota exceeeded for %(pid)s, metadata property "
+ msg = _("Quota exceeded for %(pid)s, metadata property "
"key or value too long") % locals()
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
@@ -164,7 +165,7 @@ class API(base.Base):
instance_type)
if num_instances < min_count:
pid = context.project_id
- LOG.warn(_("Quota exceeeded for %(pid)s,"
+ LOG.warn(_("Quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances") % locals())
if num_instances <= 0:
message = _("Instance quota exceeded. You cannot run any "
@@ -217,6 +218,9 @@ class API(base.Base):
if reservation_id is None:
reservation_id = utils.generate_uid('r')
+ root_device_name = ec2utils.properties_root_device_name(
+ image['properties'])
+
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
@@ -241,11 +245,61 @@ class API(base.Base):
'availability_zone': availability_zone,
'os_type': os_type,
'architecture': architecture,
- 'vm_mode': vm_mode}
+ 'vm_mode': vm_mode,
+ 'root_device_name': root_device_name}
+
+ return (num_instances, base_options, image)
+
+ def _update_image_block_device_mapping(self, elevated_context, instance_id,
+ mappings):
+ """tell vm driver to create ephemeral/swap device at boot time by
+ updating BlockDeviceMapping
+ """
+ for bdm in ec2utils.mappings_prepend_dev(mappings):
+ LOG.debug(_("bdm %s"), bdm)
+
+ virtual_name = bdm['virtual']
+ if virtual_name == 'ami' or virtual_name == 'root':
+ continue
- return (num_instances, base_options)
+ assert (virtual_name == 'swap' or
+ virtual_name.startswith('ephemeral'))
+ values = {
+ 'instance_id': instance_id,
+ 'device_name': bdm['device'],
+ 'virtual_name': virtual_name, }
+ self.db.block_device_mapping_update_or_create(elevated_context,
+ values)
+
+ def _update_block_device_mapping(self, elevated_context, instance_id,
+ block_device_mapping):
+ """tell vm driver to attach volume at boot time by updating
+ BlockDeviceMapping
+ """
+ for bdm in block_device_mapping:
+ LOG.debug(_('bdm %s'), bdm)
+ assert 'device_name' in bdm
- def create_db_entry_for_new_instance(self, context, base_options,
+ values = {'instance_id': instance_id}
+ for key in ('device_name', 'delete_on_termination', 'virtual_name',
+ 'snapshot_id', 'volume_id', 'volume_size',
+ 'no_device'):
+ values[key] = bdm.get(key)
+
+ # NOTE(yamahata): NoDevice eliminates devices defined in image
+ # files by command line option.
+ # (--block-device-mapping)
+ if bdm.get('virtual_name') == 'NoDevice':
+ values['no_device'] = True
+ for k in ('delete_on_termination', 'volume_id',
+ 'snapshot_id', 'volume_id', 'volume_size',
+ 'virtual_name'):
+ values[k] = None
+
+ self.db.block_device_mapping_update_or_create(elevated_context,
+ values)
+
+ def create_db_entry_for_new_instance(self, context, image, base_options,
security_group, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
@@ -278,23 +332,14 @@ class API(base.Base):
instance_id,
security_group_id)
- block_device_mapping = block_device_mapping or []
- # NOTE(yamahata)
- # tell vm driver to attach volume at boot time by updating
- # BlockDeviceMapping
- for bdm in block_device_mapping:
- LOG.debug(_('bdm %s'), bdm)
- assert 'device_name' in bdm
- values = {
- 'instance_id': instance_id,
- 'device_name': bdm['device_name'],
- 'delete_on_termination': bdm.get('delete_on_termination'),
- 'virtual_name': bdm.get('virtual_name'),
- 'snapshot_id': bdm.get('snapshot_id'),
- 'volume_id': bdm.get('volume_id'),
- 'volume_size': bdm.get('volume_size'),
- 'no_device': bdm.get('no_device')}
- self.db.block_device_mapping_create(elevated, values)
+ # BlockDeviceMapping table
+ self._update_image_block_device_mapping(elevated, instance_id,
+ image['properties'].get('mappings', []))
+ self._update_block_device_mapping(elevated, instance_id,
+ image['properties'].get('block_device_mapping', []))
+ # override via command line option
+ self._update_block_device_mapping(elevated, instance_id,
+ block_device_mapping)
# Set sane defaults if not specified
updates = {}
@@ -352,7 +397,7 @@ class API(base.Base):
"""Provision the instances by passing the whole request to
the Scheduler for execution. Returns a Reservation ID
related to the creation of all of these instances."""
- num_instances, base_options = self._check_create_parameters(
+ num_instances, base_options, image = self._check_create_parameters(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -390,7 +435,7 @@ class API(base.Base):
Returns a list of instance dicts.
"""
- num_instances, base_options = self._check_create_parameters(
+ num_instances, base_options, image = self._check_create_parameters(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -400,10 +445,11 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
+ block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
- instance = self.create_db_entry_for_new_instance(context,
+ instance = self.create_db_entry_for_new_instance(context, image,
base_options, security_group,
block_device_mapping, num=num)
instances.append(instance)
@@ -417,10 +463,10 @@ class API(base.Base):
return [dict(x.iteritems()) for x in instances]
- def has_finished_migration(self, context, instance_id):
+ def has_finished_migration(self, context, instance_uuid):
"""Returns true if an instance has a finished migration."""
try:
- db.migration_get_by_instance_and_status(context, instance_id,
+ db.migration_get_by_instance_and_status(context, instance_uuid,
'finished')
return True
except exception.NotFound:
@@ -513,6 +559,7 @@ class API(base.Base):
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{'method': 'refresh_provider_fw_rules', 'args': {}})
+ @scheduler_api.reroute_compute("update")
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
@@ -728,6 +775,7 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
+ @scheduler_api.reroute_compute("backup")
def backup(self, context, instance_id, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
@@ -744,6 +792,7 @@ class API(base.Base):
extra_properties=extra_properties)
return recv_meta
+ @scheduler_api.reroute_compute("snapshot")
def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
@@ -786,10 +835,12 @@ class API(base.Base):
params=params)
return recv_meta
+ @scheduler_api.reroute_compute("reboot")
def reboot(self, context, instance_id):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
+ @scheduler_api.reroute_compute("rebuild")
def rebuild(self, context, instance_id, image_href, name=None,
metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
@@ -820,39 +871,50 @@ class API(base.Base):
instance_id,
params=rebuild_params)
+ @scheduler_api.reroute_compute("revert_resize")
def revert_resize(self, context, instance_id):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
+ instance_ref = self._get_instance(context, instance_id,
+ 'revert_resize')
migration_ref = self.db.migration_get_by_instance_and_status(context,
- instance_id, 'finished')
+ instance_ref['uuid'], 'finished')
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
params = {'migration_id': migration_ref['id']}
- self._cast_compute_message('revert_resize', context, instance_id,
- migration_ref['dest_compute'], params=params)
+ self._cast_compute_message('revert_resize', context,
+ instance_ref['uuid'],
+ migration_ref['source_compute'],
+ params=params)
+
self.db.migration_update(context, migration_ref['id'],
{'status': 'reverted'})
+ @scheduler_api.reroute_compute("confirm_resize")
def confirm_resize(self, context, instance_id):
"""Confirms a migration/resize and deletes the 'old' instance."""
context = context.elevated()
+ instance_ref = self._get_instance(context, instance_id,
+ 'confirm_resize')
migration_ref = self.db.migration_get_by_instance_and_status(context,
- instance_id, 'finished')
+ instance_ref['uuid'], 'finished')
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
- instance_ref = self.db.instance_get(context, instance_id)
params = {'migration_id': migration_ref['id']}
- self._cast_compute_message('confirm_resize', context, instance_id,
- migration_ref['source_compute'], params=params)
+ self._cast_compute_message('confirm_resize', context,
+ instance_ref['uuid'],
+ migration_ref['dest_compute'],
+ params=params)
self.db.migration_update(context, migration_ref['id'],
{'status': 'confirmed'})
self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], })
+ @scheduler_api.reroute_compute("resize")
def resize(self, context, instance_id, flavor_id=None):
"""Resize (ie, migrate) a running instance.
@@ -860,8 +922,8 @@ class API(base.Base):
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
- instance = self.db.instance_get(context, instance_id)
- current_instance_type = instance['instance_type']
+ instance_ref = self._get_instance(context, instance_id, 'resize')
+ current_instance_type = instance_ref['instance_type']
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
@@ -889,10 +951,11 @@ class API(base.Base):
raise exception.ApiError(_("Invalid flavor: cannot use"
"the same flavor. "))
+ instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id,
+ "instance_id": instance_ref['uuid'],
"flavor_id": new_instance_type['id']}})
@scheduler_api.reroute_compute("add_fixed_ip")
@@ -964,6 +1027,7 @@ class API(base.Base):
"""Unrescue the given instance."""
self._cast_compute_message('unrescue_instance', context, instance_id)
+ @scheduler_api.reroute_compute("set_admin_password")
def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
host = self._find_host(context, instance_id)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 1d246e445..c13a629a9 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -112,7 +112,7 @@ def get_instance_type(id):
return get_default_instance_type()
try:
ctxt = context.get_admin_context()
- return db.instance_type_get_by_id(ctxt, id)
+ return db.instance_type_get(ctxt, id)
except exception.DBError:
raise exception.ApiError(_("Unknown instance type: %s") % id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 960dfea54..173469bc3 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -77,8 +77,6 @@ flags.DEFINE_integer('live_migration_retry_count', 30,
flags.DEFINE_integer("rescue_timeout", 0,
"Automatically unrescue an instance after N seconds."
" Set to 0 to disable.")
-flags.DEFINE_bool('auto_assign_floating_ip', False,
- 'Autoassigning floating ip to VM')
flags.DEFINE_integer('host_state_interval', 120,
'Interval in seconds for querying the host status')
@@ -93,6 +91,10 @@ def checks_instance_lock(function):
"""Decorator to prevent action against locked instances for non-admins."""
@functools.wraps(function)
def decorated_function(self, context, instance_id, *args, **kwargs):
+ #TODO(anyone): this being called instance_id is forcing a slightly
+ # confusing convention of pushing instance_uuids
+ # through an "instance_id" key in the queue args dict when
+ # casting through the compute API
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
@@ -212,6 +214,15 @@ class ComputeManager(manager.SchedulerDependentManager):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
+ def _get_instance_nw_info(self, context, instance):
+ """Get a list of dictionaries of network data of an instance.
+ Returns an empty list if stub_network flag is set."""
+ network_info = []
+ if not FLAGS.stub_network:
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ return network_info
+
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
@@ -224,6 +235,17 @@ class ComputeManager(manager.SchedulerDependentManager):
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance_id):
LOG.debug(_("setting up bdm %s"), bdm)
+
+ if bdm['no_device']:
+ continue
+ if bdm['virtual_name']:
+ # TODO(yamahata):
+ # block devices for swap and ephemeralN will be
+ # created by virt driver locally in compute node.
+ assert (bdm['virtual_name'] == 'swap' or
+ bdm['virtual_name'].startswith('ephemeral'))
+ continue
+
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
@@ -256,15 +278,6 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_mapping.append({'device_path': dev_path,
'mount_device':
bdm['device_name']})
- elif bdm['virtual_name'] is not None:
- # TODO(yamahata): ephemeral/swap device support
- LOG.debug(_('block_device_mapping: '
- 'ephemeral device is not supported yet'))
- else:
- # TODO(yamahata): NoDevice support
- assert bdm['no_device']
- LOG.debug(_('block_device_mapping: '
- 'no device is not supported yet'))
return block_device_mapping
@@ -272,16 +285,19 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Launch a new instance with specified options."""
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
- instance.injected_files = kwargs.get('injected_files', [])
- instance.admin_pass = kwargs.get('admin_password', None)
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
- self.db.instance_update(context,
- instance_id,
- {'host': self.host, 'launched_on': self.host})
-
+ updates = {}
+ updates['host'] = self.host
+ updates['launched_on'] = self.host
+ # NOTE(vish): used by virt but not in database
+ updates['injected_files'] = kwargs.get('injected_files', [])
+ updates['admin_pass'] = kwargs.get('admin_password', None)
+ instance = self.db.instance_update(context,
+ instance_id,
+ updates)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -297,8 +313,6 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self.network_api.allocate_for_instance(context,
instance, vpn=is_vpn)
LOG.debug(_("instance network_info: |%s|"), network_info)
- self.network_manager.setup_compute_network(context,
- instance_id)
else:
# TODO(tr3buchet) not really sure how this should be handled.
# virt requires network_info to be passed in but stub_network
@@ -352,6 +366,7 @@ class ComputeManager(manager.SchedulerDependentManager):
{'action_str': action_str, 'instance_id': instance_id},
context=context)
+ network_info = self._get_instance_nw_info(context, instance)
if not FLAGS.stub_network:
self.network_api.deallocate_for_instance(context, instance)
@@ -364,7 +379,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
- self.driver.destroy(instance)
+ self.driver.destroy(instance, network_info)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@@ -409,11 +424,16 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id, power_state.BUILDING)
- self.driver.destroy(instance_ref)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+
+ self.driver.destroy(instance_ref, network_info)
image_ref = kwargs.get('image_ref')
instance_ref.image_ref = image_ref
instance_ref.injected_files = kwargs.get('injected_files', [])
- self.driver.spawn(instance_ref)
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance_ref)
+ bd_mapping = self._setup_block_device_mapping(context, instance_id)
+ self.driver.spawn(instance_ref, network_info, bd_mapping)
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
@@ -446,8 +466,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
power_state.NOSTATE,
'rebooting')
- self.network_manager.setup_compute_network(context, instance_id)
- self.driver.reboot(instance_ref)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.reboot(instance_ref, network_info)
self._update_state(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -637,10 +657,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
power_state.NOSTATE,
'rescuing')
- self.network_manager.setup_compute_network(context, instance_id)
_update_state = lambda result: self._update_state_callback(
self, context, instance_id, result)
- self.driver.rescue(instance_ref, _update_state)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.rescue(instance_ref, _update_state, network_info)
self._update_state(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -656,7 +676,8 @@ class ComputeManager(manager.SchedulerDependentManager):
'unrescuing')
_update_state = lambda result: self._update_state_callback(
self, context, instance_id, result)
- self.driver.unrescue(instance_ref, _update_state)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.unrescue(instance_ref, _update_state, network_info)
self._update_state(context, instance_id)
@staticmethod
@@ -668,9 +689,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def confirm_resize(self, context, instance_id, migration_id):
"""Destroys the source instance."""
- context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- self.driver.destroy(instance_ref)
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
+
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.destroy(instance_ref, network_info)
usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
@@ -686,17 +710,17 @@ class ComputeManager(manager.SchedulerDependentManager):
source machine.
"""
- instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
- self.driver.destroy(instance_ref)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.destroy(instance_ref, network_info)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host'])
rpc.cast(context, topic,
{'method': 'finish_revert_resize',
- 'args': {
- 'migration_id': migration_ref['id'],
- 'instance_id': instance_id, },
+ 'args': {'migration_id': migration_ref['id']},
})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -708,17 +732,20 @@ class ComputeManager(manager.SchedulerDependentManager):
in the database.
"""
- instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
+
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['old_flavor_id'])
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self.db.instance_update(context, instance_id,
+ self.db.instance_update(context, instance_ref['uuid'],
dict(memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
- local_gb=instance_type['local_gb']))
+ local_gb=instance_type['local_gb'],
+ instance_type_id=instance_type['id']))
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
@@ -738,35 +765,42 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+
+ # Because of checks_instance_lock, this must currently be called
+ # instance_id. However, the compute API is always passing the UUID
+ # of the instance down
+ instance_ref = self.db.instance_get_by_uuid(context, instance_id)
+
if instance_ref['host'] == FLAGS.host:
raise exception.Error(_(
'Migration error: destination same as source!'))
- instance_type = self.db.instance_type_get_by_flavor_id(context,
+ old_instance_type = self.db.instance_type_get(context,
+ instance_ref['instance_type_id'])
+ new_instance_type = self.db.instance_type_get_by_flavor_id(context,
flavor_id)
+
migration_ref = self.db.migration_create(context,
- {'instance_id': instance_id,
+ {'instance_uuid': instance_ref['uuid'],
'source_compute': instance_ref['host'],
'dest_compute': FLAGS.host,
'dest_host': self.driver.get_host_ip_addr(),
- 'old_flavor_id': instance_type['flavorid'],
+ 'old_flavor_id': old_instance_type['flavorid'],
'new_flavor_id': flavor_id,
'status': 'pre-migrating'})
- LOG.audit(_('instance %s: migrating to '), instance_id,
+ LOG.audit(_('instance %s: migrating'), instance_ref['uuid'],
context=context)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host'])
rpc.cast(context, topic,
{'method': 'resize_instance',
- 'args': {
- 'migration_id': migration_ref['id'],
- 'instance_id': instance_id, },
- })
+ 'args': {'instance_id': instance_ref['uuid'],
+ 'migration_id': migration_ref['id']}})
+
usage_info = utils.usage_from_instance(instance_ref,
- new_instance_type=instance_type['name'],
- new_instance_type_id=instance_type['id'])
+ new_instance_type=new_instance_type['name'],
+ new_instance_type_id=new_instance_type['id'])
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
notifier.INFO,
@@ -777,7 +811,9 @@ class ComputeManager(manager.SchedulerDependentManager):
def resize_instance(self, context, instance_id, migration_id):
"""Starts the migration of a running instance to another host."""
migration_ref = self.db.migration_get(context, migration_id)
- instance_ref = self.db.instance_get(context, instance_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
+
self.db.migration_update(context,
migration_id,
{'status': 'migrating'})
@@ -793,10 +829,11 @@ class ComputeManager(manager.SchedulerDependentManager):
topic = self.db.queue_get_for(context,
FLAGS.compute_topic,
migration_ref['dest_compute'])
+ params = {'migration_id': migration_id,
+ 'disk_info': disk_info,
+ 'instance_id': instance_ref['uuid']}
rpc.cast(context, topic, {'method': 'finish_resize',
- 'args': {'migration_id': migration_id,
- 'instance_id': instance_id,
- 'disk_info': disk_info}})
+ 'args': params})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -808,24 +845,20 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
migration_ref = self.db.migration_get(context, migration_id)
- instance_ref = self.db.instance_get(context,
- migration_ref['instance_id'])
- # TODO(mdietz): apply the rest of the instance_type attributes going
- # after they're supported
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['new_flavor_id'])
- self.db.instance_update(context, instance_id,
+ self.db.instance_update(context, instance_ref.uuid,
dict(instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
- # reload the updated instance ref
- # FIXME(mdietz): is there reload functionality?
- instance = self.db.instance_get(context, instance_id)
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
- self.driver.finish_resize(instance, disk_info, network_info)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ instance_ref.uuid)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.finish_resize(instance_ref, disk_info, network_info)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@@ -838,7 +871,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
self.network_api.add_fixed_ip_to_instance(context, instance_id,
- network_id)
+ self.host, network_id)
self.inject_network_info(context, instance_id)
self.reset_network(context, instance_id)
@@ -957,7 +990,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
LOG.debug(_('instance %s: getting locked state'), instance_id,
context=context)
- instance_ref = self.db.instance_get(context, instance_id)
+ if utils.is_uuid_like(instance_id):
+ uuid = instance_id
+ instance_ref = self.db.instance_get_by_uuid(context, uuid)
+ else:
+ instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
@checks_instance_lock
@@ -974,8 +1011,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
instance = self.db.instance_get(context, instance_id)
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_("network_info to inject: |%s|"), network_info)
self.driver.inject_network_info(instance, network_info)
@@ -1193,17 +1229,17 @@ class ComputeManager(manager.SchedulerDependentManager):
#
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
+ network_info = self._get_instance_nw_info(context, instance_ref)
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
- self.network_manager.setup_compute_network(context,
- instance_id)
+ self.driver.plug_vifs(instance_ref, network_info)
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
- LOG.warn(_("setup_compute_network() failed %(cnt)d."
+ LOG.warn(_("plug_vifs() failed %(cnt)d."
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
@@ -1281,8 +1317,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# Releasing vlan.
# (not necessary in current implementation?)
+ network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
- self.driver.unfilter_instance(instance_ref)
+ self.driver.unfilter_instance(instance_ref, network_info)
# Database updating.
i_name = instance_ref.name