summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorBrian Waldon <brian.waldon@rackspace.com>2011-07-26 13:13:41 -0400
committerBrian Waldon <brian.waldon@rackspace.com>2011-07-26 13:13:41 -0400
commitd4803039c19a01087964c499c7e9ef9abfa82f74 (patch)
tree6e8ac36399f6a7863eecca88ff6255af1cf46a59 /nova/compute
parent8501cc95aa60a0a5759cf911e8adaf624fa9e547 (diff)
parent48a6bf42b3af5323d35f9a31bd4233712165b276 (diff)
merging trunk; resolving conflicts
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py42
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/compute/manager.py154
3 files changed, 121 insertions, 77 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index b13bd5013..d1e5647d2 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -127,7 +127,7 @@ class API(base.Base):
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
if quota_metadata < num_metadata:
pid = context.project_id
- msg = _("Quota exceeeded for %(pid)s, tried to set "
+ msg = _("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties") % locals()
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
@@ -138,7 +138,7 @@ class API(base.Base):
for k, v in metadata.iteritems():
if len(k) > 255 or len(v) > 255:
pid = context.project_id
- msg = _("Quota exceeeded for %(pid)s, metadata property "
+ msg = _("Quota exceeded for %(pid)s, metadata property "
"key or value too long") % locals()
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
@@ -165,7 +165,7 @@ class API(base.Base):
instance_type)
if num_instances < min_count:
pid = context.project_id
- LOG.warn(_("Quota exceeeded for %(pid)s,"
+ LOG.warn(_("Quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances") % locals())
if num_instances <= 0:
message = _("Instance quota exceeded. You cannot run any "
@@ -467,10 +467,10 @@ class API(base.Base):
return [dict(x.iteritems()) for x in instances]
- def has_finished_migration(self, context, instance_id):
+ def has_finished_migration(self, context, instance_uuid):
"""Returns true if an instance has a finished migration."""
try:
- db.migration_get_by_instance_and_status(context, instance_id,
+ db.migration_get_by_instance_and_status(context, instance_uuid,
'finished')
return True
except exception.NotFound:
@@ -873,39 +873,50 @@ class API(base.Base):
instance_id,
params=rebuild_params)
+ @scheduler_api.reroute_compute("revert_resize")
def revert_resize(self, context, instance_id):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
+ instance_ref = self._get_instance(context, instance_id,
+ 'revert_resize')
migration_ref = self.db.migration_get_by_instance_and_status(context,
- instance_id, 'finished')
+ instance_ref['uuid'], 'finished')
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
params = {'migration_id': migration_ref['id']}
- self._cast_compute_message('revert_resize', context, instance_id,
- migration_ref['dest_compute'], params=params)
+ self._cast_compute_message('revert_resize', context,
+ instance_ref['uuid'],
+ migration_ref['source_compute'],
+ params=params)
+
self.db.migration_update(context, migration_ref['id'],
{'status': 'reverted'})
+ @scheduler_api.reroute_compute("confirm_resize")
def confirm_resize(self, context, instance_id):
"""Confirms a migration/resize and deletes the 'old' instance."""
context = context.elevated()
+ instance_ref = self._get_instance(context, instance_id,
+ 'confirm_resize')
migration_ref = self.db.migration_get_by_instance_and_status(context,
- instance_id, 'finished')
+ instance_ref['uuid'], 'finished')
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
- instance_ref = self.db.instance_get(context, instance_id)
params = {'migration_id': migration_ref['id']}
- self._cast_compute_message('confirm_resize', context, instance_id,
- migration_ref['source_compute'], params=params)
+ self._cast_compute_message('confirm_resize', context,
+ instance_ref['uuid'],
+ migration_ref['dest_compute'],
+ params=params)
self.db.migration_update(context, migration_ref['id'],
{'status': 'confirmed'})
self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], })
+ @scheduler_api.reroute_compute("resize")
def resize(self, context, instance_id, flavor_id=None):
"""Resize (ie, migrate) a running instance.
@@ -913,8 +924,8 @@ class API(base.Base):
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
- instance = self.db.instance_get(context, instance_id)
- current_instance_type = instance['instance_type']
+ instance_ref = self._get_instance(context, instance_id, 'resize')
+ current_instance_type = instance_ref['instance_type']
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
@@ -942,10 +953,11 @@ class API(base.Base):
raise exception.ApiError(_("Invalid flavor: cannot use"
"the same flavor. "))
+ instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id,
+ "instance_id": instance_ref['uuid'],
"flavor_id": new_instance_type['id']}})
@scheduler_api.reroute_compute("add_fixed_ip")
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 1d246e445..c13a629a9 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -112,7 +112,7 @@ def get_instance_type(id):
return get_default_instance_type()
try:
ctxt = context.get_admin_context()
- return db.instance_type_get_by_id(ctxt, id)
+ return db.instance_type_get(ctxt, id)
except exception.DBError:
raise exception.ApiError(_("Unknown instance type: %s") % id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 04609d7c5..c79abd696 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -77,8 +77,6 @@ flags.DEFINE_integer('live_migration_retry_count', 30,
flags.DEFINE_integer("rescue_timeout", 0,
"Automatically unrescue an instance after N seconds."
" Set to 0 to disable.")
-flags.DEFINE_bool('auto_assign_floating_ip', False,
- 'Autoassigning floating ip to VM')
flags.DEFINE_integer('host_state_interval', 120,
'Interval in seconds for querying the host status')
@@ -93,6 +91,10 @@ def checks_instance_lock(function):
"""Decorator to prevent action against locked instances for non-admins."""
@functools.wraps(function)
def decorated_function(self, context, instance_id, *args, **kwargs):
+ #TODO(anyone): this being called instance_id is forcing a slightly
+ # confusing convention of pushing instance_uuids
+ # through an "instance_id" key in the queue args dict when
+ # casting through the compute API
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
@@ -212,6 +214,15 @@ class ComputeManager(manager.SchedulerDependentManager):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
+ def _get_instance_nw_info(self, context, instance):
+ """Get a list of dictionaries of network data of an instance.
+ Returns an empty list if stub_network flag is set."""
+ network_info = []
+ if not FLAGS.stub_network:
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ return network_info
+
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
@@ -274,16 +285,19 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Launch a new instance with specified options."""
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
- instance.injected_files = kwargs.get('injected_files', [])
- instance.admin_pass = kwargs.get('admin_password', None)
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
- self.db.instance_update(context,
- instance_id,
- {'host': self.host, 'launched_on': self.host})
-
+ updates = {}
+ updates['host'] = self.host
+ updates['launched_on'] = self.host
+ # NOTE(vish): used by virt but not in database
+ updates['injected_files'] = kwargs.get('injected_files', [])
+ updates['admin_pass'] = kwargs.get('admin_password', None)
+ instance = self.db.instance_update(context,
+ instance_id,
+ updates)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -299,8 +313,6 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self.network_api.allocate_for_instance(context,
instance, vpn=is_vpn)
LOG.debug(_("instance network_info: |%s|"), network_info)
- self.network_manager.setup_compute_network(context,
- instance_id)
else:
# TODO(tr3buchet) not really sure how this should be handled.
# virt requires network_info to be passed in but stub_network
@@ -354,6 +366,7 @@ class ComputeManager(manager.SchedulerDependentManager):
{'action_str': action_str, 'instance_id': instance_id},
context=context)
+ network_info = self._get_instance_nw_info(context, instance)
if not FLAGS.stub_network:
self.network_api.deallocate_for_instance(context, instance)
@@ -366,7 +379,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
- self.driver.destroy(instance)
+ self.driver.destroy(instance, network_info)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@@ -411,7 +424,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id, power_state.BUILDING)
- self.driver.destroy(instance_ref)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+
+ self.driver.destroy(instance_ref, network_info)
image_ref = kwargs.get('image_ref')
instance_ref.image_ref = image_ref
instance_ref.injected_files = kwargs.get('injected_files', [])
@@ -451,8 +466,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
power_state.NOSTATE,
'rebooting')
- self.network_manager.setup_compute_network(context, instance_id)
- self.driver.reboot(instance_ref)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.reboot(instance_ref, network_info)
self._update_state(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -642,10 +657,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
power_state.NOSTATE,
'rescuing')
- self.network_manager.setup_compute_network(context, instance_id)
_update_state = lambda result: self._update_state_callback(
self, context, instance_id, result)
- self.driver.rescue(instance_ref, _update_state)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.rescue(instance_ref, _update_state, network_info)
self._update_state(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -661,7 +676,8 @@ class ComputeManager(manager.SchedulerDependentManager):
'unrescuing')
_update_state = lambda result: self._update_state_callback(
self, context, instance_id, result)
- self.driver.unrescue(instance_ref, _update_state)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.unrescue(instance_ref, _update_state, network_info)
self._update_state(context, instance_id)
@staticmethod
@@ -673,9 +689,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def confirm_resize(self, context, instance_id, migration_id):
"""Destroys the source instance."""
- context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- self.driver.destroy(instance_ref)
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
+
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.destroy(instance_ref, network_info)
usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
@@ -691,17 +710,17 @@ class ComputeManager(manager.SchedulerDependentManager):
source machine.
"""
- instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
- self.driver.destroy(instance_ref)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.destroy(instance_ref, network_info)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host'])
rpc.cast(context, topic,
{'method': 'finish_revert_resize',
- 'args': {
- 'migration_id': migration_ref['id'],
- 'instance_id': instance_id, },
+ 'args': {'migration_id': migration_ref['id']},
})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -713,17 +732,20 @@ class ComputeManager(manager.SchedulerDependentManager):
in the database.
"""
- instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
+
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['old_flavor_id'])
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self.db.instance_update(context, instance_id,
+ self.db.instance_update(context, instance_ref['uuid'],
dict(memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
- local_gb=instance_type['local_gb']))
+ local_gb=instance_type['local_gb'],
+ instance_type_id=instance_type['id']))
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
@@ -743,35 +765,42 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+
+ # Because of checks_instance_lock, this must currently be called
+ # instance_id. However, the compute API is always passing the UUID
+ # of the instance down
+ instance_ref = self.db.instance_get_by_uuid(context, instance_id)
+
if instance_ref['host'] == FLAGS.host:
raise exception.Error(_(
'Migration error: destination same as source!'))
- instance_type = self.db.instance_type_get_by_flavor_id(context,
+ old_instance_type = self.db.instance_type_get(context,
+ instance_ref['instance_type_id'])
+ new_instance_type = self.db.instance_type_get_by_flavor_id(context,
flavor_id)
+
migration_ref = self.db.migration_create(context,
- {'instance_id': instance_id,
+ {'instance_uuid': instance_ref['uuid'],
'source_compute': instance_ref['host'],
'dest_compute': FLAGS.host,
'dest_host': self.driver.get_host_ip_addr(),
- 'old_flavor_id': instance_type['flavorid'],
+ 'old_flavor_id': old_instance_type['flavorid'],
'new_flavor_id': flavor_id,
'status': 'pre-migrating'})
- LOG.audit(_('instance %s: migrating to '), instance_id,
+ LOG.audit(_('instance %s: migrating'), instance_ref['uuid'],
context=context)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host'])
rpc.cast(context, topic,
{'method': 'resize_instance',
- 'args': {
- 'migration_id': migration_ref['id'],
- 'instance_id': instance_id, },
- })
+ 'args': {'instance_id': instance_ref['uuid'],
+ 'migration_id': migration_ref['id']}})
+
usage_info = utils.usage_from_instance(instance_ref,
- new_instance_type=instance_type['name'],
- new_instance_type_id=instance_type['id'])
+ new_instance_type=new_instance_type['name'],
+ new_instance_type_id=new_instance_type['id'])
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
notifier.INFO,
@@ -782,7 +811,9 @@ class ComputeManager(manager.SchedulerDependentManager):
def resize_instance(self, context, instance_id, migration_id):
"""Starts the migration of a running instance to another host."""
migration_ref = self.db.migration_get(context, migration_id)
- instance_ref = self.db.instance_get(context, instance_id)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
+
self.db.migration_update(context,
migration_id,
{'status': 'migrating'})
@@ -798,10 +829,11 @@ class ComputeManager(manager.SchedulerDependentManager):
topic = self.db.queue_get_for(context,
FLAGS.compute_topic,
migration_ref['dest_compute'])
+ params = {'migration_id': migration_id,
+ 'disk_info': disk_info,
+ 'instance_id': instance_ref['uuid']}
rpc.cast(context, topic, {'method': 'finish_resize',
- 'args': {'migration_id': migration_id,
- 'instance_id': instance_id,
- 'disk_info': disk_info}})
+ 'args': params})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -813,24 +845,20 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
migration_ref = self.db.migration_get(context, migration_id)
- instance_ref = self.db.instance_get(context,
- migration_ref['instance_id'])
- # TODO(mdietz): apply the rest of the instance_type attributes going
- # after they're supported
+ instance_ref = self.db.instance_get_by_uuid(context,
+ migration_ref.instance_uuid)
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['new_flavor_id'])
- self.db.instance_update(context, instance_id,
+ self.db.instance_update(context, instance_ref.uuid,
dict(instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
- # reload the updated instance ref
- # FIXME(mdietz): is there reload functionality?
- instance = self.db.instance_get(context, instance_id)
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
- self.driver.finish_resize(instance, disk_info, network_info)
+ instance_ref = self.db.instance_get_by_uuid(context,
+ instance_ref.uuid)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.finish_resize(instance_ref, disk_info, network_info)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@@ -962,7 +990,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
LOG.debug(_('instance %s: getting locked state'), instance_id,
context=context)
- instance_ref = self.db.instance_get(context, instance_id)
+ if utils.is_uuid_like(instance_id):
+ uuid = instance_id
+ instance_ref = self.db.instance_get_by_uuid(context, uuid)
+ else:
+ instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
@checks_instance_lock
@@ -979,8 +1011,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
instance = self.db.instance_get(context, instance_id)
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_("network_info to inject: |%s|"), network_info)
self.driver.inject_network_info(instance, network_info)
@@ -1198,17 +1229,17 @@ class ComputeManager(manager.SchedulerDependentManager):
#
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
+ network_info = self._get_instance_nw_info(context, instance_ref)
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
- self.network_manager.setup_compute_network(context,
- instance_id)
+ self.driver.plug_vifs(instance_ref, network_info)
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
- LOG.warn(_("setup_compute_network() failed %(cnt)d."
+ LOG.warn(_("plug_vifs() failed %(cnt)d."
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
@@ -1286,8 +1317,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# Releasing vlan.
# (not necessary in current implementation?)
+ network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
- self.driver.unfilter_instance(instance_ref)
+ self.driver.unfilter_instance(instance_ref, network_info)
# Database updating.
i_name = instance_ref.name