summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst21
-rw-r--r--nova/cells/rpcapi.py2
-rwxr-xr-xnova/compute/manager.py223
-rw-r--r--nova/db/sqlalchemy/api.py4
-rw-r--r--nova/network/linux_net.py6
-rw-r--r--nova/objects/instance.py65
-rw-r--r--nova/objects/instance_info_cache.py42
-rw-r--r--nova/objects/utils.py17
-rw-r--r--nova/scheduler/manager.py5
-rw-r--r--nova/test.py5
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_availability_zone.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_config_drive.py7
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_start_stop.py2
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py5
-rw-r--r--nova/tests/compute/test_compute.py74
-rw-r--r--nova/tests/compute/test_virtapi.py6
-rw-r--r--nova/tests/db/test_db_api.py144
-rw-r--r--nova/tests/db/test_migrations.py8
-rw-r--r--nova/tests/fakeguestfs.py8
-rw-r--r--nova/tests/image/fake.py2
-rw-r--r--nova/tests/objects/test_instance.py37
-rw-r--r--nova/tests/objects/test_instance_info_cache.py54
-rw-r--r--nova/tests/scheduler/test_scheduler.py30
-rw-r--r--nova/tests/test_availability_zones.py37
-rw-r--r--nova/tests/test_service.py3
-rw-r--r--nova/tests/test_wsgi.py5
-rw-r--r--nova/tests/utils.py14
-rw-r--r--nova/tests/virt/baremetal/test_ipmi.py2
-rw-r--r--nova/tests/virt/libvirt/test_libvirt.py4
-rw-r--r--nova/tests/virt/test_virt_disk.py22
-rw-r--r--nova/tests/virt/test_virt_disk_vfs_guestfs.py6
-rw-r--r--nova/tests/virt/test_virt_disk_vfs_localfs.py8
-rw-r--r--nova/tests/virt/vmwareapi/test_vmwareapi.py6
-rw-r--r--nova/tests/virt/xenapi/test_xenapi.py32
-rwxr-xr-xnova/virt/disk/api.py8
-rwxr-xr-xnova/virt/fake.py6
-rwxr-xr-xnova/virt/libvirt/driver.py2
-rw-r--r--nova/virt/virtapi.py14
-rwxr-xr-xnova/virt/vmwareapi/driver.py10
-rw-r--r--nova/virt/vmwareapi/vmops.py25
-rw-r--r--nova/virt/xenapi/host.py30
-rw-r--r--nova/virt/xenapi/vmops.py43
43 files changed, 679 insertions, 373 deletions
diff --git a/HACKING.rst b/HACKING.rst
index c272bfc25..082beb87d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -261,6 +261,27 @@ run --parallel` will run it in parallel (this is the default incantation tox
uses.) More information about testr can be found at:
http://wiki.openstack.org/testr
+Building Docs
+-------------
+Normal Sphinx docs can be built via the setuptools `build_sphinx` command. To
+do this via `tox`, simply run `tox -evenv -- python setup.py build_sphinx`,
+which will cause a virtualenv with all of the needed dependencies to be
+created and then inside of the virtualenv, the docs will be created and
+put into doc/build/html.
+
+If you'd like a PDF of the documentation, you'll need LaTeX installed, and
+additionally some fonts. On Ubuntu systems, you can get what you need with::
+
+ apt-get install texlive-latex-recommended texlive-latex-extra texlive-fonts-recommended
+
+Then run `build_sphinx_latex`, change to the build dir and run `make`.
+Like so::
+
+ tox -evenv -- python setup.py build_sphinx_latex
+ cd build/sphinx/latex
+ make
+
+You should wind up with a PDF - Nova.pdf.
oslo-incubator
----------------
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index dd757c818..767498291 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -97,7 +97,7 @@ class CellsAPI(rpc_proxy.RpcProxy):
build_inst_kwargs['image'])
self.cast(ctxt, self.make_msg('build_instances',
build_inst_kwargs=build_inst_kwargs),
- version=1.8)
+ version='1.8')
def instance_update_at_top(self, ctxt, instance):
"""Update instance at API level."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c84afa25b..2ae435f9a 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -304,14 +304,6 @@ class ComputeVirtAPI(virtapi.VirtAPI):
instance_uuid,
**updates)
- def instance_get_by_uuid(self, context, instance_uuid):
- return self._compute.conductor_api.instance_get_by_uuid(
- context, instance_uuid)
-
- def instance_get_all_by_host(self, context, host):
- return self._compute.conductor_api.instance_get_all_by_host(
- context, host)
-
def aggregate_get_by_host(self, context, host, key=None):
return self._compute.conductor_api.aggregate_get_by_host(context,
host, key=key)
@@ -382,9 +374,9 @@ class ComputeManager(manager.SchedulerDependentManager):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if nodename not in self.driver.get_available_nodes():
- msg = _("%(nodename)s is not a valid node managed by this "
- "compute host.") % locals()
- raise exception.NovaException(msg)
+ raise exception.NovaException(
+ _("%s is not a valid node managed by this "
+ "compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
@@ -463,7 +455,8 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.info(_('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
- locals(), instance=instance)
+ {'instance_host': instance_host,
+ 'our_host': our_host}, instance=instance)
destroy_disks = False
try:
network_info = self._get_instance_nw_info(context,
@@ -533,7 +526,23 @@ class ComputeManager(manager.SchedulerDependentManager):
legacy_net_info[0][1].get('vif_type') is None):
# Call to network API to get instance info, this will
# force an update to the instance's info_cache
- net_info = self._get_instance_nw_info(context, instance)
+ retry_time = 0
+ # Continue retrying until _get_instance_nw_info() succeeds.
+ while True:
+ try:
+ net_info = self._get_instance_nw_info(context, instance)
+ break
+ except Exception:
+ # Retry in an exponential backoff fashion
+ # capped at 60 seconds.
+ if retry_time < 60:
+ retry_time += 6
+ LOG.exception(_("Error raised getting network info for "
+ "instance %(instance_uuid)s. Retrying "
+ "in %(retry_time)s seconds."),
+ {'instance_uuid': instance['uuid'],
+ 'retry_time': retry_time})
+ time.sleep(retry_time)
legacy_net_info = self._legacy_nw_info(net_info)
self.driver.plug_vifs(instance, legacy_net_info)
@@ -543,7 +552,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
power_on = sys_meta.get('old_vm_state') != vm_states.STOPPED
block_dev_info = self._get_instance_volume_block_device_info(
@@ -569,12 +578,13 @@ class ComputeManager(manager.SchedulerDependentManager):
drv_state != db_state)
LOG.debug(_('Current state is %(drv_state)s, state in DB is '
- '%(db_state)s.'), locals(), instance=instance)
+ '%(db_state)s.'),
+ {'drv_state': drv_state, 'db_state': db_state},
+ instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
- LOG.info(
- _('Rebooting instance after nova-compute restart.'),
- locals(), instance=instance)
+ LOG.info(_('Rebooting instance after nova-compute restart.'),
+ instance=instance)
block_device_info = \
self._get_instance_volume_block_device_info(
@@ -643,8 +653,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
- instances = self.conductor_api.instance_get_all_by_host(context,
- self.host)
+ instances = instance_obj.InstanceList.get_by_host(
+ context, self.host, expected_attrs=['info_cache'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
@@ -937,8 +947,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if node is None:
node = self.driver.get_available_nodes()[0]
- LOG.debug(_("No node specified, defaulting to %(node)s") %
- locals())
+ LOG.debug(_("No node specified, defaulting to %s"), node)
network_info = None
bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
@@ -982,9 +991,8 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
self._deallocate_network(context, instance)
except Exception:
- msg = _('Failed to dealloc network '
- 'for deleted instance')
- LOG.exception(msg, instance=instance)
+ LOG.exception(_('Failed to dealloc network for '
+ 'deleted instance'), instance=instance)
except exception.UnexpectedTaskStateError as e:
exc_info = sys.exc_info()
# Make sure the async call finishes
@@ -1490,8 +1498,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self._delete_instance(context, instance, bdms,
reservations=reservations)
except exception.InstanceTerminationFailure as error:
- msg = _('%s. Setting instance vm_state to ERROR')
- LOG.error(msg % error, instance=instance)
+ LOG.error(_('%s. Setting instance vm_state to ERROR'),
+ error, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
except exception.InstanceNotFound as e:
LOG.warn(e, instance=instance)
@@ -1821,9 +1829,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
- LOG.warn(_('trying to reboot a non-running '
- 'instance: (state: %(state)s '
- 'expected: %(running)s)') % locals(),
+ LOG.warn(_('trying to reboot a non-running instance:'
+ ' (state: %(state)s expected: %(running)s)'),
+ {'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
@@ -1837,7 +1845,7 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as exc:
- LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
+ LOG.error(_('Cannot reboot instance: %s'), exc,
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, exc, sys.exc_info())
@@ -1883,9 +1891,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
- LOG.warn(_('trying to snapshot a non-running '
- 'instance: (state: %(state)s '
- 'expected: %(running)s)') % locals(),
+ LOG.warn(_('trying to snapshot a non-running instance: '
+ '(state: %(state)s expected: %(running)s)'),
+ {'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
@@ -1945,7 +1953,8 @@ class ComputeManager(manager.SchedulerDependentManager):
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"),
- locals(), instance=instance)
+ {'num_images': num_images, 'rotation': rotation},
+ instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
@@ -2033,11 +2042,12 @@ class ComputeManager(manager.SchedulerDependentManager):
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
- LOG.warn(_('trying to inject a file into a non-running '
- '(state: %(current_power_state)s '
- 'expected: %(expected_state)s)') % locals(),
+ LOG.warn(_('trying to inject a file into a non-running (state: '
+ '%(current_state)s expected: %(expected_state)s)'),
+ {'current_state': current_power_state,
+ 'expected_state': expected_state},
instance=instance)
- LOG.audit(_('injecting file to %(path)s') % locals(),
+ LOG.audit(_('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
@@ -2132,8 +2142,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
- LOG.debug(_("Changing instance metadata according to %(diff)r") %
- locals(), instance=instance)
+ LOG.debug(_("Changing instance metadata according to %r"),
+ diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, migration, instance,
@@ -2147,10 +2157,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
if restore_old:
- instance_type = flavors.extract_flavor(instance,
- 'old_')
- sys_meta = flavors.save_flavor_info(sys_meta,
- instance_type)
+ instance_type = flavors.extract_flavor(instance, 'old_')
+ sys_meta = flavors.save_flavor_info(sys_meta, instance_type)
else:
instance_type = flavors.extract_flavor(instance)
@@ -2436,8 +2444,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
if node is None:
node = self.driver.get_available_nodes()[0]
- LOG.debug(_("No node specified, defaulting to %(node)s") %
- locals())
+ LOG.debug(_("No node specified, defaulting to %s"), node)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
@@ -2669,9 +2676,9 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
self._quota_rollback(context, reservations)
except Exception as qr_error:
- reason = _("Failed to rollback quota for failed "
- "finish_resize: %(qr_error)s")
- LOG.exception(reason % locals(), instance=instance)
+ LOG.exception(_("Failed to rollback quota for failed "
+ "finish_resize: %s"),
+ qr_error, instance=instance)
LOG.error(_('%s. Setting instance vm_state to ERROR') % error,
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
@@ -2976,7 +2983,8 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id = volume['id']
context = context.elevated()
LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'),
- locals(), context=context, instance=instance)
+ {'volume_id': volume_id, 'mountpoint': mountpoint},
+ context=context, instance=instance)
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume_id,
@@ -3026,7 +3034,8 @@ class ComputeManager(manager.SchedulerDependentManager):
def _attach_volume(self, context, volume_id, mountpoint, instance):
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
- locals(), context=context, instance=instance)
+ {'volume_id': volume_id, 'mountpoint': mountpoint},
+ context=context, instance=instance)
try:
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
@@ -3034,10 +3043,11 @@ class ComputeManager(manager.SchedulerDependentManager):
connector)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
- msg = _("Failed to connect to volume %(volume_id)s "
- "while attaching at %(mountpoint)s")
- LOG.exception(msg % locals(), context=context,
- instance=instance)
+ LOG.exception(_("Failed to connect to volume %(volume_id)s "
+ "while attaching at %(mountpoint)s"),
+ {'volume_id': volume_id,
+ 'mountpoint': mountpoint},
+ context=context, instance=instance)
self.volume_api.unreserve_volume(context, volume_id)
if 'serial' not in connection_info:
@@ -3049,10 +3059,11 @@ class ComputeManager(manager.SchedulerDependentManager):
mountpoint)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
- msg = _("Failed to attach volume %(volume_id)s "
- "at %(mountpoint)s")
- LOG.exception(msg % locals(), context=context,
- instance=instance)
+ LOG.exception(_("Failed to attach volume %(volume_id)s "
+ "at %(mountpoint)s") %
+ {'volume_id': volume_id,
+ 'mountpoint': mountpoint},
+ context=context, instance=instance)
self.volume_api.terminate_connection(context,
volume_id,
connector)
@@ -3080,7 +3091,8 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id = bdm['volume_id']
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
- locals(), context=context, instance=instance)
+ {'volume_id': volume_id, 'mp': mp},
+ context=context, instance=instance)
connection_info = jsonutils.loads(bdm['connection_info'])
# NOTE(vish): We currently don't use the serial when disconnecting,
@@ -3096,9 +3108,10 @@ class ComputeManager(manager.SchedulerDependentManager):
mp)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
- msg = _("Failed to detach volume %(volume_id)s from %(mp)s")
- LOG.exception(msg % locals(), context=context,
- instance=instance)
+ LOG.exception(_('Failed to detach volume %(volume_id)s '
+ 'from %(mp)s'),
+ {'volume_id': volume_id, 'mp': mp},
+ context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -3175,8 +3188,8 @@ class ComputeManager(manager.SchedulerDependentManager):
condemned = (network, mapping)
break
if condemned is None:
- raise exception.PortNotFound(_("Port %(port_id)s is not "
- "attached") % locals())
+ raise exception.PortNotFound(_("Port %s is not "
+ "attached") % port_id)
self.network_api.deallocate_port_for_instance(context, instance,
port_id,
@@ -3189,7 +3202,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
return compute_node_ref['compute_node'][0]
except IndexError:
- raise exception.NotFound(_("Host %(host)s not found") % locals())
+ raise exception.NotFound(_("Host %s not found") % host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_instance_shared_storage(self, ctxt, instance, data):
@@ -3342,8 +3355,8 @@ class ComputeManager(manager.SchedulerDependentManager):
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Pre live migration failed at %(dest)s'),
- locals(), instance=instance)
+ LOG.exception(_('Pre live migration failed at %s'),
+ dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
@@ -3425,8 +3438,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.setup_networks_on_host(ctxt, instance_ref,
self.host, teardown=True)
- LOG.info(_('Migrating instance to %(dest)s finished successfully.'),
- locals(), instance=instance_ref)
+ LOG.info(_('Migrating instance to %s finished successfully.'),
+ dest, instance=instance_ref)
LOG.info(_("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
@@ -3658,46 +3671,48 @@ class ComputeManager(manager.SchedulerDependentManager):
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
- migration_id = migration['id']
- msg = _("Setting migration %(migration_id)s to error: "
- "%(reason)s") % locals()
- LOG.warn(msg, **kwargs)
+ LOG.warn(_("Setting migration %(migration_id)s to error: "
+ "%(reason)s"),
+ {'migration_id': migration['id'], 'reason': reason},
+ **kwargs)
self.conductor_api.migration_update(context, migration,
'error')
for migration in migrations:
- migration_id = migration['id']
instance_uuid = migration['instance_uuid']
LOG.info(_("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
- locals())
+ {'migration_id': migration['id'],
+ 'instance_uuid': instance_uuid})
try:
instance = self.conductor_api.instance_get_by_uuid(
context, instance_uuid)
except exception.InstanceNotFound:
- reason = _("Instance %(instance_uuid)s not found")
- _set_migration_to_error(migration, reason % locals())
+ reason = (_("Instance %s not found") %
+ instance_uuid)
+ _set_migration_to_error(migration, reason)
continue
if instance['vm_state'] == vm_states.ERROR:
reason = _("In ERROR state")
- _set_migration_to_error(migration, reason % locals(),
+ _set_migration_to_error(migration, reason,
instance=instance)
continue
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
- reason = _("In states %(vm_state)s/%(task_state)s, not "
- "RESIZED/None")
- _set_migration_to_error(migration, reason % locals(),
+ reason = (_("In states %(vm_state)s/%(task_state)s, not "
+ "RESIZED/None") %
+ {'vm_state': vm_state,
+ 'task_state': task_state})
+ _set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.conductor_api.compute_confirm_resize(
context, instance, migration_ref=migration)
except Exception as e:
- msg = _("Error auto-confirming resize: %(e)s. "
- "Will retry later.")
- LOG.error(msg % locals(), instance=instance)
+ LOG.error(_("Error auto-confirming resize: %s. "
+ "Will retry later.") % e, instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
@@ -3715,10 +3730,11 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.info(_("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
- " instances.") % dict(host=self.host,
- begin_time=begin,
- end_time=end,
- number_instances=num_instances))
+ " instances."),
+ dict(host=self.host,
+ begin_time=begin,
+ end_time=end,
+ number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
@@ -3902,7 +3918,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if num_vm_instances != num_db_instances:
LOG.warn(_("Found %(num_db_instances)s in the database and "
- "%(num_vm_instances)s on the hypervisor.") % locals())
+ "%(num_vm_instances)s on the hypervisor."),
+ {'num_db_instances': num_db_instances,
+ 'num_vm_instances': num_vm_instances})
for db_instance in db_instances:
if db_instance['task_state'] is not None:
@@ -4054,6 +4072,7 @@ class ComputeManager(manager.SchedulerDependentManager):
return
filters = {'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': None,
'host': self.host}
instances = self.conductor_api.instance_get_all_by_filters(context,
filters)
@@ -4139,24 +4158,22 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance)
if action == "log":
- name = instance['name']
LOG.warning(_("Detected instance with name label "
- "'%(name)s' which is marked as "
+ "'%s' which is marked as "
"DELETED but still present on host."),
- locals(), instance=instance)
+ instance['name'], instance=instance)
elif action == 'reap':
- name = instance['name']
LOG.info(_("Destroying instance with name label "
- "'%(name)s' which is marked as "
+ "'%s' which is marked as "
"DELETED but still present on host."),
- locals(), instance=instance)
+ instance['name'], instance=instance)
self._shutdown_instance(context, instance, bdms)
self._cleanup_volumes(context, instance['uuid'], bdms)
else:
- raise Exception(_("Unrecognized value '%(action)s'"
+ raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
- "instance_action"), locals(),
+ "instance_action"), action,
instance=instance)
def _running_deleted_instances(self, context):
@@ -4181,8 +4198,8 @@ class ComputeManager(manager.SchedulerDependentManager):
yield
except exception.InstanceFaultRollback as error:
self._quota_rollback(context, reservations)
- msg = _("Setting instance back to ACTIVE after: %s")
- LOG.info(msg % error, instance_uuid=instance_uuid)
+ LOG.info(_("Setting instance back to ACTIVE after: %s"),
+ error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
@@ -4190,8 +4207,8 @@ class ComputeManager(manager.SchedulerDependentManager):
except Exception as error:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations)
- msg = _('%s. Setting instance vm_state to ERROR')
- LOG.error(msg % error, instance_uuid=instance_uuid)
+ LOG.error(_('%s. Setting instance vm_state to ERROR'),
+ error, instance_uuid=instance_uuid)
self._set_instance_error_state(context, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 7eea08d3c..01f7ad3de 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1753,7 +1753,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
- 'metadata', 'host']
+ 'metadata', 'host', 'task_state']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
@@ -4055,6 +4055,8 @@ def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell = _cell_get_by_name_query(context, cell_name, session=session)
+ if not cell:
+ raise exception.CellNotFound(cell_name=cell_name)
cell.update(values)
return cell
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index dad496f23..8eb128acf 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -999,13 +999,13 @@ def restart_dhcp(context, dev, network_ref):
# are not in multi_host mode.
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
- os.chmod(optsfile, 0644)
+ os.chmod(optsfile, 0o644)
if network_ref['multi_host']:
_add_dhcp_mangle_rule(dev)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
- os.chmod(conffile, 0644)
+ os.chmod(conffile, 0o644)
pid = _dnsmasq_pid_for(dev)
@@ -1091,7 +1091,7 @@ interface %s
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
- os.chmod(conffile, 0644)
+ os.chmod(conffile, 0o644)
pid = _ra_pid_for(dev)
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index 58c581542..18fdfb94d 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -15,6 +15,7 @@
from nova import db
from nova import notifications
from nova.objects import base
+from nova.objects import instance_info_cache
from nova.objects import utils as obj_utils
from nova import utils
@@ -24,7 +25,17 @@ from oslo.config import cfg
CONF = cfg.CONF
+# These are fields that can be specified as expected_attrs
+INSTANCE_OPTIONAL_FIELDS = ['metadata', 'system_metadata']
+# These are fields that are always joined by the db right now
+INSTANCE_IMPLIED_FIELDS = ['info_cache']
+
+
class Instance(base.NovaObject):
+ # Version 1.0: Initial version
+ # Version 1.1: Added info_cache
+ VERSION = '1.1'
+
fields = {
'id': int,
@@ -94,6 +105,9 @@ class Instance(base.NovaObject):
'metadata': dict,
'system_metadata': dict,
+ 'info_cache': obj_utils.nested_object_or_none(
+ instance_info_cache.InstanceInfoCache)
+
}
@property
@@ -132,11 +146,15 @@ class Instance(base.NovaObject):
_attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at')
_attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at')
_attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at')
+ _attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache')
_attr_scheduled_at_from_primitive = obj_utils.dt_deserializer
_attr_launched_at_from_primitive = obj_utils.dt_deserializer
_attr_terminated_at_from_primitive = obj_utils.dt_deserializer
+ def _attr_info_cache_from_primitive(self, val):
+ return base.NovaObject.obj_from_primitive(val)
+
@staticmethod
def _from_db_object(instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
@@ -147,7 +165,7 @@ class Instance(base.NovaObject):
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
- if field in ['metadata', 'system_metadata']:
+ if field in INSTANCE_OPTIONAL_FIELDS + INSTANCE_IMPLIED_FIELDS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
@@ -159,6 +177,13 @@ class Instance(base.NovaObject):
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.metadata_to_dict(
db_inst['system_metadata'])
+ # NOTE(danms): info_cache and security_groups are almost always joined
+ # in the DB layer right now, so check to see if they're filled instead
+ # of looking at expected_attrs
+ if db_inst['info_cache']:
+ instance['info_cache'] = instance_info_cache.InstanceInfoCache()
+ instance_info_cache.InstanceInfoCache._from_db_object(
+ instance['info_cache'], db_inst['info_cache'])
instance.obj_reset_changes()
return instance
@@ -174,6 +199,9 @@ class Instance(base.NovaObject):
columns_to_join.append('metadata')
if 'system_metadata' in expected_attrs:
columns_to_join.append('system_metadata')
+ # NOTE(danms): The DB API currently always joins info_cache and
+ # security_groups for get operations, so don't add them to the
+ # list of columns
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join)
@@ -193,28 +221,33 @@ class Instance(base.NovaObject):
"""
updates = {}
changes = self.obj_what_changed()
- for field in changes:
- updates[field] = self[field]
+ for field in self.fields:
+ if (hasattr(self, base.get_attrname(field)) and
+ isinstance(self[field], base.NovaObject)):
+ self[field].save(context)
+ elif field in changes:
+ updates[field] = self[field]
if expected_task_state is not None:
updates['expected_task_state'] = expected_task_state
- old_ref, inst_ref = db.instance_update_and_get_original(context,
- self.uuid,
- updates)
-
- expected_attrs = []
- for attr in ('metadata', 'system_metadata'):
- if hasattr(self, base.get_attrname(attr)):
- expected_attrs.append(attr)
- Instance._from_db_object(self, inst_ref, expected_attrs)
- if 'vm_state' in changes or 'task_state' in changes:
- notifications.send_update(context, old_ref, inst_ref)
+
+ if updates:
+ old_ref, inst_ref = db.instance_update_and_get_original(context,
+ self.uuid,
+ updates)
+ expected_attrs = []
+ for attr in INSTANCE_OPTIONAL_FIELDS:
+ if hasattr(self, base.get_attrname(attr)):
+ expected_attrs.append(attr)
+ Instance._from_db_object(self, inst_ref, expected_attrs)
+ if 'vm_state' in changes or 'task_state' in changes:
+ notifications.send_update(context, old_ref, inst_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
extra = []
- for field in ['system_metadata', 'metadata']:
+ for field in INSTANCE_OPTIONAL_FIELDS:
if hasattr(self, base.get_attrname(field)):
extra.append(field)
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
@@ -230,6 +263,8 @@ class Instance(base.NovaObject):
extra.append('system_metadata')
elif attrname == 'metadata':
extra.append('metadata')
+ elif attrname == 'info_cache':
+ extra.append('info_cache')
if not extra:
raise Exception('Cannot load "%s" from instance' % attrname)
diff --git a/nova/objects/instance_info_cache.py b/nova/objects/instance_info_cache.py
new file mode 100644
index 000000000..6b46559ed
--- /dev/null
+++ b/nova/objects/instance_info_cache.py
@@ -0,0 +1,42 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova.objects import base
+
+
+class InstanceInfoCache(base.NovaObject):
+ fields = {
+ 'instance_uuid': str,
+ 'network_info': str,
+ }
+
+ @staticmethod
+ def _from_db_object(info_cache, db_obj):
+ info_cache.instance_uuid = db_obj['instance_uuid']
+ info_cache.network_info = db_obj['network_info']
+ info_cache.obj_reset_changes()
+ return info_cache
+
+ @base.remotable_classmethod
+ def get_by_instance_uuid(cls, context, instance_uuid):
+ db_obj = db.instance_info_cache_get(context, instance_uuid)
+ return InstanceInfoCache._from_db_object(cls(), db_obj)
+
+ @base.remotable
+ def save(self, context):
+ if 'network_info' in self.obj_what_changed():
+ db.instance_info_cache_update(context, self.instance_uuid,
+ {'network_info': self.network_info})
+ self.obj_reset_changes()
diff --git a/nova/objects/utils.py b/nova/objects/utils.py
index 21ef6faeb..e79b36e7e 100644
--- a/nova/objects/utils.py
+++ b/nova/objects/utils.py
@@ -70,6 +70,14 @@ def ip_or_none(version):
return validator
+def nested_object_or_none(objclass):
+ def validator(val, objclass=objclass):
+ if val is None or isinstance(val, objclass):
+ return val
+ raise ValueError('An object of class %s is required here' % objclass)
+ return validator
+
+
def dt_serializer(name):
"""Return a datetime serializer for a named attribute."""
def serializer(self, name=name):
@@ -86,3 +94,12 @@ def dt_deserializer(instance, val):
return None
else:
return timeutils.parse_isotime(val)
+
+
+def obj_serializer(name):
+ def serializer(self, name=name):
+ if getattr(self, name) is not None:
+ return getattr(self, name).obj_to_primitive()
+ else:
+ return None
+ return serializer
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 136d05582..9429a0662 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -123,10 +123,13 @@ class SchedulerManager(manager.Manager):
expected_task_state=task_states.MIGRATING,),
context, ex, request_spec)
except Exception as ex:
+ request_spec = {'instance_properties': {
+ 'uuid': instance['uuid'], },
+ }
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
{'vm_state': vm_states.ERROR},
- context, ex, {})
+ context, ex, request_spec)
def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
diff --git a/nova/test.py b/nova/test.py
index d7502b4ea..85ba990c0 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -41,6 +41,7 @@ from nova import context
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
+from nova.objects import base as objects_base
from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -232,6 +233,10 @@ class TestCase(testtools.TestCase):
self.useFixture(_DB_CACHE)
+ # NOTE(danms): Make sure to reset us back to non-remote objects
+ # for each test to avoid interactions.
+ objects_base.NovaObject.indirection_api = None
+
mox_fixture = self.useFixture(MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 4b578a787..e22796252 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -601,8 +601,8 @@ class CinderCloudTestCase(test.TestCase):
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
- {'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
- {'device_name': '/dev/sdb2', 'volume_id': 01234567},
+ {'device_name': '/dev/sdb1', 'snapshot_id': 1234567},
+ {'device_name': '/dev/sdb2', 'volume_id': 1234567},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
@@ -625,7 +625,7 @@ class CinderCloudTestCase(test.TestCase):
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
- 'snapshot_id': 01234567}]
+ 'snapshot_id': 1234567}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
index b9066a975..2ccb9fa31 100644
--- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
+++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
@@ -56,7 +56,7 @@ def fake_service_get_all(context, disabled=None):
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
- datetime.datetime(2012, 11, 14, 9, 57, 03, 0),
+ datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
diff --git a/nova/tests/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/api/openstack/compute/contrib/test_config_drive.py
index 235b9373b..357350b9f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_config_drive.py
+++ b/nova/tests/api/openstack/compute/contrib/test_config_drive.py
@@ -47,10 +47,11 @@ class ConfigDriveTest(test.TestCase):
self.assertTrue('config_drive' in res_dict['server'])
def test_detail_servers(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail')
res = req.get_response(fakes.wsgi_app(init_only=('servers,')))
server_dicts = jsonutils.loads(res.body)['servers']
+ self.assertNotEqual(len(server_dicts), 0)
for server_dict in server_dicts:
- self.asserTrue('config_drive' in server_dict)
+ self.assertTrue('config_drive' in server_dict)
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index 31b832084..05b83131c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -29,6 +29,8 @@ def fake_instance_get(self, context, instance_id):
result['deleted_at'] = None
result['updated_at'] = None
result['deleted'] = 0
+ result['info_cache'] = {'network_info': 'foo',
+ 'instance_uuid': result['uuid']}
return result
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
index e44c0be4a..4d58bdb9e 100644
--- a/nova/tests/cells/test_cells_rpcapi.py
+++ b/nova/tests/cells/test_cells_rpcapi.py
@@ -55,6 +55,9 @@ class CellsAPITestCase(test.TestCase):
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(self.fake_topic, call_info['topic'])
self.assertEqual(method, call_info['msg']['method'])
+ msg_version = call_info['msg']['version']
+ self.assertTrue(isinstance(msg_version, basestring),
+ "Message version %s is not a string" % msg_version)
self.assertEqual(version, call_info['msg']['version'])
self.assertEqual(args, call_info['msg']['args'])
@@ -127,7 +130,7 @@ class CellsAPITestCase(test.TestCase):
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
- expected_args, version=1.8)
+ expected_args, version='1.8')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 9787984bf..bcf48ebb6 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -63,6 +63,7 @@ from nova.openstack.common import uuidutils
from nova import policy
from nova import quota
from nova import test
+from nova.tests.api.openstack import fakes
from nova.tests.compute import fake_resource_tracker
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_instance
@@ -4772,19 +4773,27 @@ class ComputeTestCase(BaseTestCase):
def test_init_host(self):
our_host = self.compute.host
fake_context = 'fake-context'
- startup_instances = ['inst1', 'inst2', 'inst3']
+ inst = dict(fakes.stub_instance(1),
+ deleted_at=None, created_at=None, updated_at=None,
+ deleted=0, info_cache={'instance_uuid': 'fake-uuid',
+ 'network_info': None})
+ startup_instances = [inst, inst, inst]
def _do_mock_calls(defer_iptables_apply):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
- self.compute.conductor_api.instance_get_all_by_host(
- fake_context, our_host).AndReturn(startup_instances)
+ db.instance_get_all_by_host(
+ fake_context, our_host, columns_to_join=['info_cache']
+ ).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
self.compute._destroy_evacuated_instances(fake_context)
- self.compute._init_instance(fake_context, startup_instances[0])
- self.compute._init_instance(fake_context, startup_instances[1])
- self.compute._init_instance(fake_context, startup_instances[2])
+ self.compute._init_instance(fake_context,
+ mox.IsA(instance_obj.Instance))
+ self.compute._init_instance(fake_context,
+ mox.IsA(instance_obj.Instance))
+ self.compute._init_instance(fake_context,
+ mox.IsA(instance_obj.Instance))
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_off()
self.compute._report_driver_status(fake_context)
@@ -4795,8 +4804,7 @@ class ComputeTestCase(BaseTestCase):
'filter_defer_apply_on')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_off')
- self.mox.StubOutWithMock(self.compute.conductor_api,
- 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute,
'_destroy_evacuated_instances')
@@ -4840,8 +4848,7 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.mox.StubOutWithMock(self.compute.conductor_api,
- 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute, 'init_virt_events')
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
@@ -4852,8 +4859,9 @@ class ComputeTestCase(BaseTestCase):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
- self.compute.conductor_api.instance_get_all_by_host(
- fake_context, our_host).AndReturn([])
+ db.instance_get_all_by_host(fake_context, our_host,
+ columns_to_join=['info_cache']
+ ).AndReturn([])
self.compute.init_virt_events()
# simulate failed instance
@@ -4923,7 +4931,7 @@ class ComputeTestCase(BaseTestCase):
}
fixed = dict(instance, task_state=None)
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
- self.mox.StubOutWithMock(utils, 'metadata_to_dict')
+ self.mox.StubOutWithMock(utils, 'instance_sys_meta')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'finish_revert_migration')
@@ -4935,7 +4943,7 @@ class ComputeTestCase(BaseTestCase):
compute_utils.get_nw_info_for_instance(instance).AndReturn(
network_model.NetworkInfo())
self.compute.driver.plug_vifs(instance, [])
- utils.metadata_to_dict(instance['system_metadata']).AndReturn(sys_meta)
+ utils.instance_sys_meta(instance).AndReturn(sys_meta)
self.compute._get_instance_volume_block_device_info(
self.context, instance).AndReturn([])
self.compute.driver.finish_revert_migration(instance, [], [], power_on)
@@ -5112,6 +5120,44 @@ class ComputeTestCase(BaseTestCase):
updated_ats = (updated_at_1, updated_at_2, updated_at_3)
self.assertEqual(len(updated_ats), len(set(updated_ats)))
+ def test_reclaim_queued_deletes(self):
+ self.flags(reclaim_instance_interval=3600)
+ ctxt = context.get_admin_context()
+
+ # Active
+ self._create_fake_instance(params={'host': CONF.host})
+
+ # Deleted not old enough
+ self._create_fake_instance(params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': timeutils.utcnow()})
+
+ # Deleted old enough (only this one should be reclaimed)
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance = self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+
+ # Restoring
+ # NOTE(hanlind): This specifically tests for a race condition
+ # where restoring a previously soft deleted instance sets
+ # deleted_at back to None, causing reclaim to think it can be
+ # deleted, see LP #1186243.
+ self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': task_states.RESTORING})
+
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ instance_ref = get_primitive_instance_by_uuid(ctxt, instance['uuid'])
+ self.compute._delete_instance(ctxt, instance_ref, [])
+
+ self.mox.ReplayAll()
+
+ self.compute._reclaim_queued_deletes(ctxt)
+
class ComputeAPITestCase(BaseTestCase):
diff --git a/nova/tests/compute/test_virtapi.py b/nova/tests/compute/test_virtapi.py
index 7cf05d87d..c090c85f1 100644
--- a/nova/tests/compute/test_virtapi.py
+++ b/nova/tests/compute/test_virtapi.py
@@ -45,12 +45,6 @@ class VirtAPIBaseTest(test.TestCase, test.APICoverage):
self.assertExpected('instance_update', 'fake-uuid',
dict(host='foohost'))
- def test_instance_get_by_uuid(self):
- self.assertExpected('instance_get_by_uuid', 'fake-uuid')
-
- def test_instance_get_all_by_host(self):
- self.assertExpected('instance_get_all_by_host', 'fake-host')
-
def test_aggregate_get_by_host(self):
self.assertExpected('aggregate_get_by_host', 'fake-host', key=None)
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index 2670c99c7..d40d05bdb 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -363,7 +363,7 @@ class DbApiTestCase(DbTestCase):
'fake_host2')
self.assertEqual(0, len(results))
- updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
+ updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(ctxt, values)
@@ -398,7 +398,7 @@ class DbApiTestCase(DbTestCase):
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
- updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
+ updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"task_state": "rebooting", "updated_at": updated_at}
instance = db.instance_create(ctxt, values)
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
@@ -589,83 +589,6 @@ class DbApiTestCase(DbTestCase):
# Ensure that metadata is updated during instance_update
self._test_instance_update_updates_metadata('metadata')
- def test_instance_fault_create(self):
- # Ensure we can create an instance fault.
- ctxt = context.get_admin_context()
- uuid = str(stdlib_uuid.uuid4())
-
- # Create a fault
- fault_values = {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuid,
- 'code': 404,
- }
- db.instance_fault_create(ctxt, fault_values)
-
- # Retrieve the fault to ensure it was successfully added
- faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
- self.assertEqual(404, faults[uuid][0]['code'])
-
- def test_instance_fault_get_by_instance(self):
- # ensure we can retrieve an instance fault by instance UUID.
- ctxt = context.get_admin_context()
- instance1 = db.instance_create(ctxt, {})
- instance2 = db.instance_create(ctxt, {})
- uuids = [instance1['uuid'], instance2['uuid']]
-
- # Create faults
- fault_values = {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuids[0],
- 'code': 404,
- }
- fault1 = db.instance_fault_create(ctxt, fault_values)
-
- fault_values = {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuids[0],
- 'code': 500,
- }
- fault2 = db.instance_fault_create(ctxt, fault_values)
-
- fault_values = {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuids[1],
- 'code': 404,
- }
- fault3 = db.instance_fault_create(ctxt, fault_values)
-
- fault_values = {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuids[1],
- 'code': 500,
- }
- fault4 = db.instance_fault_create(ctxt, fault_values)
-
- instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
-
- expected = {
- uuids[0]: [fault2, fault1],
- uuids[1]: [fault4, fault3],
- }
-
- self.assertEqual(instance_faults, expected)
-
- def test_instance_faults_get_by_instance_uuids_no_faults(self):
- # None should be returned when no faults exist.
- ctxt = context.get_admin_context()
- instance1 = db.instance_create(ctxt, {})
- instance2 = db.instance_create(ctxt, {})
- uuids = [instance1['uuid'], instance2['uuid']]
- instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
- expected = {uuids[0]: [], uuids[1]: []}
- self.assertEqual(expected, instance_faults)
-
def test_instance_action_start(self):
"""Create an instance action."""
ctxt = context.get_admin_context()
@@ -2315,6 +2238,69 @@ class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
return db.instance_type_create(self.ctxt, v)
+class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(InstanceFaultTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_fault_values(self, uuid, code=404):
+ return {
+ 'message': 'message',
+ 'details': 'detail',
+ 'instance_uuid': uuid,
+ 'code': code,
+ 'host': 'localhost'
+ }
+
+ def test_instance_fault_create(self):
+ """Ensure we can create an instance fault."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ # Ensure no faults registered for this instance
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ self.assertEqual(0, len(faults[uuid]))
+
+ # Create a fault
+ fault_values = self._create_fault_values(uuid)
+ fault = db.instance_fault_create(self.ctxt, fault_values)
+
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(fault_values, fault, ignored_keys)
+
+ # Retrieve the fault to ensure it was successfully added
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ self.assertEqual(1, len(faults[uuid]))
+ self._assertEqualObjects(fault, faults[uuid][0])
+
+ def test_instance_fault_get_by_instance(self):
+ """Ensure we can retrieve faults for instance."""
+ uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
+ fault_codes = [404, 500]
+ expected = {}
+
+ # Create faults
+ for uuid in uuids:
+ expected[uuid] = []
+ for code in fault_codes:
+ fault_values = self._create_fault_values(uuid, code)
+ fault = db.instance_fault_create(self.ctxt, fault_values)
+ expected[uuid].append(fault)
+
+ # Ensure faults are saved
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
+ self.assertEqual(len(expected), len(faults))
+ for uuid in uuids:
+ self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
+
+ def test_instance_faults_get_by_instance_uuids_no_faults(self):
+ uuid = str(stdlib_uuid.uuid4())
+ # None should be returned when no faults exist.
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ expected = {uuid: []}
+ self.assertEqual(expected, faults)
+
+
class InstanceTypeTestCase(BaseInstanceTypeTestCase):
def test_instance_type_create(self):
diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py
index a1d2f402e..f6be7a87f 100644
--- a/nova/tests/db/test_migrations.py
+++ b/nova/tests/db/test_migrations.py
@@ -707,8 +707,8 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
'task_name': 'The name of the task',
'state': 'The state of the task',
'host': 'compute-host1',
- 'period_beginning': str(datetime.datetime(2013, 02, 11)),
- 'period_ending': str(datetime.datetime(2013, 02, 12)),
+ 'period_beginning': str(datetime.datetime(2013, 2, 11)),
+ 'period_ending': str(datetime.datetime(2013, 2, 12)),
'message': 'The task_log message',
}
result = task_log.insert().values(data).execute()
@@ -1436,8 +1436,8 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
('key_pairs', {'user_id': 1, 'name': "name_qwer", 'deleted': 0}),
('networks', {'vlan': '123', 'deleted': 0}),
('task_log', {'task_name': 'task_123', 'host': 'localhost',
- 'period_beginning': datetime.datetime(2013, 02, 11),
- 'period_ending': datetime.datetime(2015, 01, 01),
+ 'period_beginning': datetime.datetime(2013, 2, 11),
+ 'period_ending': datetime.datetime(2015, 1, 1),
'state': 'state_1', 'message': 'msg_1'}),
('virtual_interfaces', {'address': '192.168.0.0'})
]
diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py
index 2ffc50227..8add963d1 100644
--- a/nova/tests/fakeguestfs.py
+++ b/nova/tests/fakeguestfs.py
@@ -62,7 +62,7 @@ class GuestFS(object):
"isdir": True,
"gid": 100,
"uid": 100,
- "mode": 0700
+ "mode": 0o700
}
def read_file(self, path):
@@ -72,7 +72,7 @@ class GuestFS(object):
"content": "Hello World",
"gid": 100,
"uid": 100,
- "mode": 0700
+ "mode": 0o700
}
return self.files[path]["content"]
@@ -84,7 +84,7 @@ class GuestFS(object):
"content": "Hello World",
"gid": 100,
"uid": 100,
- "mode": 0700
+ "mode": 0o700
}
self.files[path]["content"] = content
@@ -96,7 +96,7 @@ class GuestFS(object):
"content": "Hello World",
"gid": 100,
"uid": 100,
- "mode": 0700
+ "mode": 0o700
}
self.files[path]["content"] = self.files[path]["content"] + content
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index d2d80ab35..13d21d524 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -40,7 +40,7 @@ class _FakeImageService(object):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
- timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
+ timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py
index 54e010a8a..a9238a924 100644
--- a/nova/tests/objects/test_instance.py
+++ b/nova/tests/objects/test_instance.py
@@ -18,6 +18,7 @@ import netaddr
from nova import context
from nova import db
+from nova.objects import base
from nova.objects import instance
from nova.openstack.common import timeutils
from nova.tests.api.openstack import fakes
@@ -39,6 +40,7 @@ class _TestInstanceObject(object):
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['deleted'] = False
+ fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
return fake_instance
def test_datetime_deserialization(self):
@@ -90,8 +92,9 @@ class _TestInstanceObject(object):
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, uuid='uuid')
# Make sure these weren't loaded
- self.assertFalse(hasattr(inst, '_metadata'))
- self.assertFalse(hasattr(inst, '_system_metadata'))
+ for attr in instance.INSTANCE_OPTIONAL_FIELDS:
+ attrname = base.get_attrname(attr)
+ self.assertFalse(hasattr(inst, attrname))
self.assertRemotes()
def test_get_with_expected(self):
@@ -99,12 +102,13 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(
ctxt, 'uuid',
- ['metadata', 'system_metadata']).AndReturn(self.fake_instance)
+ instance.INSTANCE_OPTIONAL_FIELDS).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(
- ctxt, 'uuid', expected_attrs=['metadata', 'system_metadata'])
- self.assertTrue(hasattr(inst, '_metadata'))
- self.assertTrue(hasattr(inst, '_system_metadata'))
+ ctxt, 'uuid', expected_attrs=instance.INSTANCE_OPTIONAL_FIELDS)
+ for attr in instance.INSTANCE_OPTIONAL_FIELDS:
+ attrname = base.get_attrname(attr)
+ self.assertTrue(hasattr(inst, attrname))
self.assertRemotes()
def test_load(self):
@@ -166,6 +170,7 @@ class _TestInstanceObject(object):
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(ctxt, fake_uuid, []).AndReturn(fake_inst)
db.instance_update_and_get_original(
ctxt, fake_uuid, {'user_data': 'foo'}).AndReturn(
@@ -187,6 +192,26 @@ class _TestInstanceObject(object):
# NOTE(danms): Make sure it's actually a bool
self.assertEqual(inst.deleted, True)
+ def test_with_info_cache(self):
+ ctxt = context.get_admin_context()
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_inst['info_cache'] = {'network_info': 'foo',
+ 'instance_uuid': fake_uuid}
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_get_by_uuid(ctxt, fake_uuid, []).AndReturn(fake_inst)
+ db.instance_info_cache_update(ctxt, fake_uuid,
+ {'network_info': 'bar'})
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
+ self.assertEqual(inst.info_cache.network_info,
+ fake_inst['info_cache']['network_info'])
+ self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
+ inst.info_cache.network_info = 'bar'
+ inst.save()
+
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
diff --git a/nova/tests/objects/test_instance_info_cache.py b/nova/tests/objects/test_instance_info_cache.py
new file mode 100644
index 000000000..74362d178
--- /dev/null
+++ b/nova/tests/objects/test_instance_info_cache.py
@@ -0,0 +1,54 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import db
+from nova.objects import instance_info_cache
+from nova.tests.objects import test_objects
+
+
+class _TestInstanceInfoCacheObject(object):
+ def test_get_by_instance_uuid(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(ctxt, 'fake-uuid').AndReturn(
+ {'instance_uuid': 'fake-uuid', 'network_info': 'foo'})
+ self.mox.ReplayAll()
+ obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid(
+ ctxt, 'fake-uuid')
+ self.assertEqual(obj.instance_uuid, 'fake-uuid')
+ self.assertEqual(obj.network_info, 'foo')
+ self.assertRemotes()
+
+ def test_save(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_info_cache_update(ctxt, 'fake-uuid',
+ {'network_info': 'foo'})
+ self.mox.ReplayAll()
+ obj = instance_info_cache.InstanceInfoCache()
+ obj._context = ctxt
+ obj.instance_uuid = 'fake-uuid'
+ obj.network_info = 'foo'
+ obj.save()
+
+
+class TestInstanceInfoCacheObject(test_objects._LocalTest,
+ _TestInstanceInfoCacheObject):
+ pass
+
+
+class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest,
+ _TestInstanceInfoCacheObject):
+ pass
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 687444dae..0574f6d2e 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -277,6 +277,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.context, inst, dest, block_migration,
disk_over_commit)
+ def test_live_migration_set_vmstate_error(self):
+ inst = {"uuid": "fake-instance-id",
+ "vm_state": vm_states.ACTIVE, }
+
+ dest = 'fake_host'
+ block_migration = False
+ disk_over_commit = False
+
+ self._mox_schedule_method_helper('schedule_live_migration')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.manager.driver.schedule_live_migration(self.context,
+ inst, dest, block_migration, disk_over_commit).AndRaise(
+ ValueError)
+ db.instance_update_and_get_original(self.context, inst["uuid"],
+ {"vm_state": vm_states.ERROR,
+ }).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context,
+ mox.IsA(conductor_api.LocalAPI), inst,
+ mox.IsA(ValueError),
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.stub_out_client_exceptions()
+ self.assertRaises(ValueError,
+ self.manager.live_migration,
+ self.context, inst, dest, block_migration,
+ disk_over_commit)
+
def test_prep_resize_no_valid_host_back_in_active_state(self):
fake_instance_uuid = 'fake-instance-id'
fake_instance = {'uuid': fake_instance_uuid}
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
index eefa1daf8..3923dd9a1 100644
--- a/nova/tests/test_availability_zones.py
+++ b/nova/tests/test_availability_zones.py
@@ -59,6 +59,10 @@ class AvailabilityZoneTestCases(test.TestCase):
return agg
+ def _update_az(self, aggregate, az_name):
+ metadata = {'availability_zone': az_name}
+ db.aggregate_update(self.context, aggregate['id'], metadata)
+
def _create_service_with_topic(self, topic, host, disabled=False):
values = {
'binary': 'bin',
@@ -77,7 +81,7 @@ class AvailabilityZoneTestCases(test.TestCase):
def _delete_from_aggregate(self, service, aggregate):
return db.aggregate_host_delete(self.context,
- self.aggregate['id'], service['host'])
+ aggregate['id'], service['host'])
def test_set_availability_zone_compute_service(self):
"""Test for compute service get right availability zone."""
@@ -119,6 +123,37 @@ class AvailabilityZoneTestCases(test.TestCase):
self.assertEquals(self.availability_zone,
az.get_host_availability_zone(self.context, self.host))
+ def test_update_host_availability_zone(self):
+ """Test availability zone could be update by given host."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ self.assertEquals(az_name,
+ az.get_host_availability_zone(self.context, self.host))
+ # Update AZ
+ new_az_name = 'az2'
+ self._update_az(agg_az1, new_az_name)
+ self.assertEquals(new_az_name,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_delete_host_availability_zone(self):
+ """Test availability zone could be deleted successfully."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ self.assertEquals(az_name,
+ az.get_host_availability_zone(self.context, self.host))
+ # Delete the AZ via deleting the aggregate
+ self._delete_from_aggregate(service, agg_az1)
+ self.assertEquals(self.default_az,
+ az.get_host_availability_zone(self.context, self.host))
+
def test_get_availability_zones(self):
"""Test get_availability_zones."""
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 3ca6d7bc1..055e46bfd 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -21,6 +21,7 @@ Unit Tests for remote procedure calls using queue
"""
import sys
+import testtools
import mox
from oslo.config import cfg
@@ -31,6 +32,7 @@ from nova import exception
from nova import manager
from nova import service
from nova import test
+from nova.tests import utils
from nova import wsgi
test_service_opts = [
@@ -184,6 +186,7 @@ class TestWSGIService(test.TestCase):
self.assertNotEqual(0, test_service.port)
test_service.stop()
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_service_random_port_with_ipv6(self):
CONF.set_default("test_service_listen", "::1")
test_service = service.WSGIService("test_service")
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
index cd64688a2..d1d659fe3 100644
--- a/nova/tests/test_wsgi.py
+++ b/nova/tests/test_wsgi.py
@@ -20,6 +20,7 @@
import os.path
import tempfile
+import testtools
import eventlet
import httplib2
@@ -27,6 +28,7 @@ import paste
import nova.exception
from nova import test
+from nova.tests import utils
import nova.wsgi
import urllib2
import webob
@@ -101,6 +103,7 @@ class TestWSGIServer(test.TestCase):
server.stop()
server.wait()
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_start_random_port_with_ipv6(self):
server = nova.wsgi.Server("test_random_port", None,
host="::1", port=0)
@@ -198,6 +201,7 @@ class TestWSGIServerWithSSL(test.TestCase):
fake_ssl_server.stop()
fake_ssl_server.wait()
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_app_using_ipv6_and_ssl(self):
greetings = 'Hello, World!!!'
@@ -210,6 +214,7 @@ class TestWSGIServerWithSSL(test.TestCase):
host="::1",
port=0,
use_ssl=True)
+
server.start()
response = urllib2.urlopen('https://[::1]:%d/' % server.port)
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 75b4eab73..994e4f220 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -14,7 +14,9 @@
# License for the specific language governing permissions and limitations
#
+import errno
import platform
+import socket
from oslo.config import cfg
@@ -200,3 +202,15 @@ def killer_xml_body():
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
+
+
+def is_ipv6_supported():
+ has_ipv6_support = True
+ try:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ except socket.error as e:
+ if e.errno == errno.EAFNOSUPPORT:
+ has_ipv6_support = False
+ else:
+ raise e
+ return has_ipv6_support
diff --git a/nova/tests/virt/baremetal/test_ipmi.py b/nova/tests/virt/baremetal/test_ipmi.py
index ba7a875cf..4b289db15 100644
--- a/nova/tests/virt/baremetal/test_ipmi.py
+++ b/nova/tests/virt/baremetal/test_ipmi.py
@@ -56,7 +56,7 @@ class BareMetalIPMITestCase(test.TestCase):
pw_file = ipmi._make_password_file(self.node['pm_password'])
try:
self.assertTrue(os.path.isfile(pw_file))
- self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0777, 0600)
+ self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0o777, 0o600)
with open(pw_file, "r") as f:
pm_password = f.read()
self.assertEqual(pm_password, self.node['pm_password'])
diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py
index 3e0da41b2..77744cfc2 100644
--- a/nova/tests/virt/libvirt/test_libvirt.py
+++ b/nova/tests/virt/libvirt/test_libvirt.py
@@ -4607,11 +4607,11 @@ disk size: 4.4M''', ''))
os.close(dst_fd)
os.unlink(dst_path)
- libvirt_utils.write_to_file(dst_path, 'hello', umask=0277)
+ libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
- self.assertEquals(mode & 0277, 0)
+ self.assertEquals(mode & 0o277, 0)
finally:
os.unlink(dst_path)
diff --git a/nova/tests/virt/test_virt_disk.py b/nova/tests/virt/test_virt_disk.py
index 0c51e8267..20fad1cd4 100644
--- a/nova/tests/virt/test_virt_disk.py
+++ b/nova/tests/virt/test_virt_disk.py
@@ -59,7 +59,7 @@ class VirtDiskTest(test.TestCase):
self.assertTrue("/root/.ssh" in vfs.handle.files)
self.assertEquals(vfs.handle.files["/root/.ssh"],
- {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0700})
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertTrue("/root/.ssh/authorized_keys" in vfs.handle.files)
self.assertEquals(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
@@ -67,7 +67,7 @@ class VirtDiskTest(test.TestCase):
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
- 'mode': 0600})
+ 'mode': 0o600})
vfs.teardown()
@@ -89,11 +89,11 @@ class VirtDiskTest(test.TestCase):
"-RF root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
- 'mode': 0700})
+ 'mode': 0o700})
self.assertTrue("/root/.ssh" in vfs.handle.files)
self.assertEquals(vfs.handle.files["/root/.ssh"],
- {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0700})
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertTrue("/root/.ssh/authorized_keys" in vfs.handle.files)
self.assertEquals(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
@@ -101,7 +101,7 @@ class VirtDiskTest(test.TestCase):
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
- 'mode': 0600})
+ 'mode': 0o600})
vfs.teardown()
@@ -124,7 +124,7 @@ class VirtDiskTest(test.TestCase):
"root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
- 'mode': 0700})
+ 'mode': 0o700})
vfs.teardown()
def test_inject_net(self):
@@ -139,7 +139,7 @@ class VirtDiskTest(test.TestCase):
{'content': 'mynetconfig',
'gid': 100,
'isdir': False,
- 'mode': 0700,
+ 'mode': 0o700,
'uid': 100})
vfs.teardown()
@@ -158,7 +158,7 @@ class VirtDiskTest(test.TestCase):
'"eek": "wizz"}',
'gid': 100,
'isdir': False,
- 'mode': 0700,
+ 'mode': 0o700,
'uid': 100})
vfs.teardown()
@@ -190,7 +190,7 @@ class VirtDiskTest(test.TestCase):
"/sbin/nologin\n",
'gid': 100,
'isdir': False,
- 'mode': 0700,
+ 'mode': 0o700,
'uid': 100})
shadow = vfs.handle.files["/etc/shadow"]
@@ -204,7 +204,7 @@ class VirtDiskTest(test.TestCase):
"daemon:*:14495:0:99999:7:::\n",
'gid': 100,
'isdir': False,
- 'mode': 0700,
+ 'mode': 0o700,
'uid': 100})
else:
self.assertEquals(shadow,
@@ -214,6 +214,6 @@ class VirtDiskTest(test.TestCase):
"daemon:*:14495:0:99999:7:::\n",
'gid': 100,
'isdir': False,
- 'mode': 0700,
+ 'mode': 0o700,
'uid': 100})
vfs.teardown()
diff --git a/nova/tests/virt/test_virt_disk_vfs_guestfs.py b/nova/tests/virt/test_virt_disk_vfs_guestfs.py
index 16c85f815..778530741 100644
--- a/nova/tests/virt/test_virt_disk_vfs_guestfs.py
+++ b/nova/tests/virt/test_virt_disk_vfs_guestfs.py
@@ -173,10 +173,10 @@ class VirtDiskVFSGuestFSTest(test.TestCase):
vfs.setup()
vfs.read_file("/some/file")
- self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0700)
+ self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0o700)
- vfs.set_permissions("/some/file", 0777)
- self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0777)
+ vfs.set_permissions("/some/file", 0o7777)
+ self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0o7777)
vfs.teardown()
diff --git a/nova/tests/virt/test_virt_disk_vfs_localfs.py b/nova/tests/virt/test_virt_disk_vfs_localfs.py
index b52817b18..18df393ce 100644
--- a/nova/tests/virt/test_virt_disk_vfs_localfs.py
+++ b/nova/tests/virt/test_virt_disk_vfs_localfs.py
@@ -98,7 +98,7 @@ def fake_execute(*args, **kwargs):
"content": "Hello World",
"gid": 100,
"uid": 100,
- "mode": 0700
+ "mode": 0o700
}
return files[path]["content"], ""
elif args[0] == "tee":
@@ -113,7 +113,7 @@ def fake_execute(*args, **kwargs):
"content": "Hello World",
"gid": 100,
"uid": 100,
- "mode": 0700,
+ "mode": 0o700,
}
if append:
files[path]["content"] += kwargs["process_input"]
@@ -306,8 +306,8 @@ class VirtDiskVFSLocalFSTest(test.TestCase):
vfs.imgdir = "/scratch/dir"
vfs.read_file("/some/file")
- vfs.set_permissions("/some/file", 0777)
- self.assertEquals(files["/scratch/dir/some/file"]["mode"], 0777)
+ vfs.set_permissions("/some/file", 0o777)
+ self.assertEquals(files["/scratch/dir/some/file"]["mode"], 0o777)
root_helper = 'sudo nova-rootwrap %s' % CONF.rootwrap_config
self.assertEqual(commands,
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi.py b/nova/tests/virt/vmwareapi/test_vmwareapi.py
index da9ed1467..dd1a0e923 100644
--- a/nova/tests/virt/vmwareapi/test_vmwareapi.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi.py
@@ -216,12 +216,6 @@ class VMwareAPIVMTestCase(test.TestCase):
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
- def test_list_interfaces(self):
- self._create_vm()
- interfaces = self.conn.list_interfaces(1)
- self.assertEquals(len(interfaces), 1)
- self.assertEquals(interfaces[0], 4000)
-
def test_spawn(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py
index ad41345d8..f0e4c3379 100644
--- a/nova/tests/virt/xenapi/test_xenapi.py
+++ b/nova/tests/virt/xenapi/test_xenapi.py
@@ -38,6 +38,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.db import fakes as db_fakes
+from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_processutils
import nova.tests.image.fake as fake_image
@@ -1184,6 +1185,19 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
+ def test_uuid_find(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ fake_inst = fake_instance.fake_db_instance(id=123)
+ fake_inst2 = fake_instance.fake_db_instance(id=456)
+ db.instance_get_all_by_host(self.context, fake_inst['host'],
+ columns_to_join=None
+ ).AndReturn([fake_inst, fake_inst2])
+ self.mox.ReplayAll()
+ expected_name = CONF.instance_name_template % fake_inst['id']
+ inst_uuid = host._uuid_find(self.context, fake_inst['host'],
+ expected_name)
+ self.assertEqual(inst_uuid, fake_inst['uuid'])
+
def test_session_virtapi(self):
was = {'called': False}
@@ -3316,7 +3330,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
self.xenstore = dict(persist={}, ephem={})
def fake_get_vm_opaque_ref(inst, instance):
- self.assertEqual(instance, 'instance')
+ self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
@@ -3329,12 +3343,12 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
- self.assertEqual(instance, 'instance')
+ self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
- self.assertEqual(instance, 'instance')
+ self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
@@ -3363,7 +3377,8 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
- {'key': 'sys_c', 'value': 3}])
+ {'key': 'sys_c', 'value': 3}],
+ uuid='fake')
self.conn._vmops.inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
@@ -3380,6 +3395,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
+ instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
@@ -3393,7 +3409,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
},
}
- self.conn._vmops.change_instance_metadata('instance', diff)
+ self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
@@ -3412,6 +3428,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
+ instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
@@ -3425,7 +3442,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
},
}
- self.conn._vmops.change_instance_metadata('instance', diff)
+ self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
@@ -3442,6 +3459,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
+ instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
@@ -3455,7 +3473,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
},
}
- self.conn._vmops.change_instance_metadata('instance', diff)
+ self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 9fdec0e87..5b8782924 100755
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -425,7 +425,7 @@ def _setup_selinux_for_keys(fs, sshdir):
restorecon.insert(0, '#!/bin/sh')
_inject_file_into_fs(fs, rclocal, ''.join(restorecon), append=True)
- fs.set_permissions(rclocal, 0700)
+ fs.set_permissions(rclocal, 0o700)
def _inject_key_into_fs(key, fs):
@@ -439,7 +439,7 @@ def _inject_key_into_fs(key, fs):
sshdir = os.path.join('root', '.ssh')
fs.make_path(sshdir)
fs.set_ownership(sshdir, "root", "root")
- fs.set_permissions(sshdir, 0700)
+ fs.set_permissions(sshdir, 0o700)
keyfile = os.path.join(sshdir, 'authorized_keys')
@@ -452,7 +452,7 @@ def _inject_key_into_fs(key, fs):
])
_inject_file_into_fs(fs, keyfile, key_data, append=True)
- fs.set_permissions(keyfile, 0600)
+ fs.set_permissions(keyfile, 0o600)
_setup_selinux_for_keys(fs, sshdir)
@@ -467,7 +467,7 @@ def _inject_net_into_fs(net, fs):
netdir = os.path.join('etc', 'network')
fs.make_path(netdir)
fs.set_ownership(netdir, "root", "root")
- fs.set_permissions(netdir, 0744)
+ fs.set_permissions(netdir, 0o744)
netfile = os.path.join('etc', 'network', 'interfaces')
_inject_file_into_fs(fs, netfile, net)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 77973fc32..801c8e827 100755
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -447,12 +447,6 @@ class FakeVirtAPI(virtapi.VirtAPI):
instance_uuid,
updates)
- def instance_get_by_uuid(self, context, instance_uuid):
- return db.instance_get_by_uuid(context, instance_uuid)
-
- def instance_get_all_by_host(self, context, host):
- return db.instance_get_all_by_host(context, host)
-
def aggregate_get_by_host(self, context, host, key=None):
return db.aggregate_get_by_host(context, host, key=key)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 48ba40280..be47a1e1d 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1780,7 +1780,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
- self._get_console_log_path(instance), '', 007)
+ self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
index 963b3c788..5b0a31826 100644
--- a/nova/virt/virtapi.py
+++ b/nova/virt/virtapi.py
@@ -26,20 +26,6 @@ class VirtAPI(object):
"""
raise NotImplementedError()
- def instance_get_by_uuid(self, context, instance_uuid):
- """Look up an instance by uuid
- :param context: security context
- :param instance_uuid: uuid of the instance to be fetched
- """
- raise NotImplementedError()
-
- def instance_get_all_by_host(self, context, host):
- """Find all instances on a given host
- :param context: security context
- :param host: host running instances to be returned
- """
- raise NotImplementedError()
-
def aggregate_get_by_host(self, context, host, key=None):
"""Get a list of aggregates to which the specified host belongs
:param context: security context
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index cb9612653..3cf9d32b4 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -333,16 +333,6 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
- def list_interfaces(self, instance_name):
- """
- Return the IDs of all the virtual network interfaces attached to the
- specified instance, as a list. These IDs are opaque to the caller
- (they are only useful for giving back to this layer as a parameter to
- interface_stats). These IDs only need to be unique for a given
- instance.
- """
- return self._vmops.list_interfaces(instance_name)
-
class VMwareVCDriver(VMwareESXDriver):
"""The ESX host connection object."""
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 3d6ef86af..23c85025b 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -1323,28 +1323,3 @@ class VMwareVMOps(object):
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
-
- def list_interfaces(self, instance_name):
- """
- Return the IDs of all the virtual network interfaces attached to the
- specified instance, as a list. These IDs are opaque to the caller
- (they are only useful for giving back to this layer as a parameter to
- interface_stats). These IDs only need to be unique for a given
- instance.
- """
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance_name)
-
- interfaces = []
- # Get the virtual network interfaces attached to the VM
- hardware_devices = self._session._call_method(vim_util,
- "get_dynamic_property", vm_ref,
- "VirtualMachine", "config.hardware.device")
-
- for device in hardware_devices:
- if device.__class__.__name__ in ["VirtualE1000", "VirtualE1000e",
- "VirtualPCNet32", "VirtualVmxnet"]:
- interfaces.append(device.key)
-
- return interfaces
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 5ed554674..85b86e75f 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -23,6 +23,7 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
+from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
@@ -64,7 +65,7 @@ class Host(object):
uuid = vm_rec['other_config'].get('nova_uuid')
if not uuid:
name = vm_rec['name_label']
- uuid = _uuid_find(self._virtapi, ctxt, host, name)
+ uuid = _uuid_find(ctxt, host, name)
if not uuid:
msg = _('Instance %(name)s running on %(host)s'
' could not be found in the database:'
@@ -72,7 +73,7 @@ class Host(object):
' ping migration to a new host')
LOG.info(msg % locals())
continue
- instance = self._virtapi.instance_get_by_uuid(ctxt, uuid)
+ instance = instance_obj.Instance.get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
aggregate = self._virtapi.aggregate_get_by_host(
@@ -84,27 +85,24 @@ class Host(object):
dest = _host_find(ctxt, self._session, aggregate[0],
host_ref)
- self._virtapi.instance_update(
- ctxt, instance['uuid'],
- {'host': dest,
- 'task_state': task_states.MIGRATING})
+ instance.host = dest
+ instance.task_state = task_states.MIGRATING
+ instance.save()
self._session.call_xenapi('VM.pool_migrate',
vm_ref, host_ref, {})
migrations_counter = migrations_counter + 1
- self._virtapi.instance_update(
- ctxt, instance['uuid'],
- {'vm_state': vm_states.ACTIVE})
+ instance.vm_state = vm_states.ACTIVE
+ instance.save()
break
except self._session.XenAPI.Failure:
LOG.exception(_('Unable to migrate VM %(vm_ref)s'
'from %(host)s') % locals())
- self._virtapi.instance_update(
- ctxt, instance['uuid'],
- {'host': host,
- 'vm_state': vm_states.ACTIVE})
+ instance.host = host
+ instance.vm_state = vm_states.ACTIVE
+ instance.save()
if vm_counter == migrations_counter:
return 'on_maintenance'
@@ -208,11 +206,11 @@ def call_xenhost(session, method, arg_dict):
return e.details[1]
-def _uuid_find(virtapi, context, host, name_label):
+def _uuid_find(context, host, name_label):
"""Return instance uuid by name_label."""
- for i in virtapi.instance_get_all_by_host(context, host):
+ for i in instance_obj.InstanceList.get_by_host(context, host):
if i.name == name_label:
- return i['uuid']
+ return i.uuid
return None
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d0802473f..23dc38fb5 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1096,6 +1096,7 @@ class VMOps(object):
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
+ @utils.synchronized('xenstore-' + instance['uuid'])
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
@@ -1109,9 +1110,8 @@ class VMOps(object):
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
- for key, change in diff.items():
- key = self._sanitize_xenstore_key(key)
- location = 'vm-data/user-metadata/%s' % key
+
+ def process_change(location, change):
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
@@ -1130,6 +1130,14 @@ class VMOps(object):
# catch KeyError for domid if instance isn't running
pass
+ @utils.synchronized('xenstore-' + instance['uuid'])
+ def update_meta():
+ for key, change in diff.items():
+ key = self._sanitize_xenstore_key(key)
+ location = 'vm-data/user-metadata/%s' % key
+ process_change(location, change)
+ update_meta()
+
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
@@ -1533,19 +1541,22 @@ class VMOps(object):
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
- for vif in network_info:
- xs_data = self._vif_xenstore_data(vif)
- location = ('vm-data/networking/%s' %
- vif['address'].replace(':', ''))
- self._add_to_param_xenstore(vm_ref,
- location,
- jsonutils.dumps(xs_data))
- try:
- self._write_to_xenstore(instance, location, xs_data,
- vm_ref=vm_ref)
- except KeyError:
- # catch KeyError for domid if instance isn't running
- pass
+ @utils.synchronized('xenstore-' + instance['uuid'])
+ def update_nwinfo():
+ for vif in network_info:
+ xs_data = self._vif_xenstore_data(vif)
+ location = ('vm-data/networking/%s' %
+ vif['address'].replace(':', ''))
+ self._add_to_param_xenstore(vm_ref,
+ location,
+ jsonutils.dumps(xs_data))
+ try:
+ self._write_to_xenstore(instance, location, xs_data,
+ vm_ref=vm_ref)
+ except KeyError:
+ # catch KeyError for domid if instance isn't running
+ pass
+ update_nwinfo()
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""