summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--etc/nova/rootwrap.d/baremetal-compute-ipmi.filters (renamed from etc/nova/rootwrap.d/baremetal_compute_ipmi.filters)0
-rw-r--r--etc/nova/rootwrap.d/baremetal-compute-pxe.filters11
-rw-r--r--nova/api/openstack/compute/__init__.py3
-rw-r--r--nova/api/openstack/compute/versions.py4
-rw-r--r--nova/cells/manager.py84
-rw-r--r--nova/cells/utils.py48
-rw-r--r--nova/compute/manager.py83
-rw-r--r--nova/compute/task_states.py2
-rw-r--r--nova/conductor/api.py74
-rw-r--r--nova/conductor/manager.py40
-rw-r--r--nova/conductor/rpcapi.py37
-rw-r--r--nova/config.py12
-rw-r--r--nova/db/sqlalchemy/api.py156
-rw-r--r--nova/exception.py4
-rw-r--r--nova/locale/nova.pot1325
-rw-r--r--nova/manager.py96
-rw-r--r--nova/network/linux_net.py2
-rw-r--r--nova/network/manager.py10
-rw-r--r--nova/openstack/common/rpc/__init__.py16
-rw-r--r--nova/openstack/common/rpc/amqp.py5
-rw-r--r--nova/scheduler/filter_scheduler.py2
-rw-r--r--nova/scheduler/filters/retry_filter.py11
-rw-r--r--nova/scheduler/host_manager.py3
-rw-r--r--nova/service.py38
-rw-r--r--nova/servicegroup/db_driver.py2
-rw-r--r--nova/test.py2
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py12
-rw-r--r--nova/tests/api/ec2/test_cloud.py12
-rw-r--r--nova/tests/api/openstack/compute/test_versions.py48
-rw-r--r--nova/tests/baremetal/test_pxe.py534
-rw-r--r--nova/tests/baremetal/test_utils.py36
-rw-r--r--nova/tests/cells/fakes.py6
-rw-r--r--nova/tests/cells/test_cells_manager.py62
-rw-r--r--nova/tests/cells/test_cells_utils.py82
-rw-r--r--nova/tests/compute/test_compute.py47
-rw-r--r--nova/tests/conductor/test_conductor.py117
-rw-r--r--nova/tests/fakeguestfs.py10
-rw-r--r--nova/tests/integrated/test_servers.py4
-rw-r--r--nova/tests/matchers.py15
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py6
-rw-r--r--nova/tests/scheduler/test_host_filters.py6
-rw-r--r--nova/tests/test_hypervapi.py34
-rw-r--r--nova/tests/test_libvirt.py146
-rw-r--r--nova/tests/test_periodic_tasks.py109
-rw-r--r--nova/tests/test_virt_drivers.py6
-rw-r--r--nova/tests/test_vmwareapi.py18
-rw-r--r--nova/tests/test_xenapi.py20
-rw-r--r--nova/utils.py59
-rw-r--r--nova/virt/baremetal/ipmi.py4
-rw-r--r--nova/virt/baremetal/net-dhcp.ubuntu.template21
-rw-r--r--nova/virt/baremetal/net-static.ubuntu.template (renamed from nova/virt/baremetal/interfaces.template)1
-rw-r--r--nova/virt/baremetal/pxe.py460
-rw-r--r--nova/virt/baremetal/pxe_config.template11
-rw-r--r--nova/virt/baremetal/utils.py7
-rw-r--r--nova/virt/disk/vfs/guestfs.py39
-rw-r--r--nova/virt/driver.py2
-rw-r--r--nova/virt/fake.py4
-rw-r--r--nova/virt/hyperv/driver.py4
-rw-r--r--nova/virt/hyperv/snapshotops.py7
-rw-r--r--nova/virt/libvirt/driver.py46
-rw-r--r--nova/virt/vmwareapi/driver.py8
-rw-r--r--nova/virt/vmwareapi/vmops.py6
-rw-r--r--nova/virt/xenapi/driver.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py9
-rw-r--r--nova/virt/xenapi/vmops.py8
65 files changed, 3208 insertions, 892 deletions
diff --git a/etc/nova/rootwrap.d/baremetal_compute_ipmi.filters b/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
index a2858cd11..a2858cd11 100644
--- a/etc/nova/rootwrap.d/baremetal_compute_ipmi.filters
+++ b/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
diff --git a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
new file mode 100644
index 000000000..35fa61723
--- /dev/null
+++ b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
@@ -0,0 +1,11 @@
+# nova-rootwrap command filters for compute nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+
+# nova/virt/baremetal/pxe.py: 'dnsmasq', ...
+dnsmasq: CommandFilter, /usr/sbin/dnsmasq, root
+
+# nova/virt/baremetal/pxe.py: 'kill', '-TERM', str(dnsmasq_pid)
+kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -15, -TERM
+
diff --git a/nova/api/openstack/compute/__init__.py b/nova/api/openstack/compute/__init__.py
index f88671733..92c84c13f 100644
--- a/nova/api/openstack/compute/__init__.py
+++ b/nova/api/openstack/compute/__init__.py
@@ -57,7 +57,8 @@ class APIRouter(nova.api.openstack.APIRouter):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
- action='show')
+ action='show',
+ conditions={"method": ['GET']})
mapper.redirect("", "/")
diff --git a/nova/api/openstack/compute/versions.py b/nova/api/openstack/compute/versions.py
index 76e37cf41..5c416908e 100644
--- a/nova/api/openstack/compute/versions.py
+++ b/nova/api/openstack/compute/versions.py
@@ -26,9 +26,9 @@ from nova.openstack.common import timeutils
LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
+ 'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
+ 'api/openstack-compute/2/wadl/os-compute-2.wadl'
},
}
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index a1352601c..0942bae28 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -16,19 +16,31 @@
"""
Cells Service Manager
"""
+import datetime
+import time
from nova.cells import messaging
from nova.cells import state as cells_state
+from nova.cells import utils as cells_utils
from nova import context
+from nova import exception
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
+ cfg.IntOpt("instance_updated_at_threshold",
+ default=3600,
+ help="Number of seconds after an instance was updated "
+ "or deleted to continue to update cells"),
+ cfg.IntOpt("instance_update_num_instances",
+ default=1,
+ help="Number of instances to update per periodic task run")
]
@@ -66,6 +78,7 @@ class CellsManager(manager.Manager):
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
+ self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its consumers for inter-cell communication.
@@ -93,6 +106,77 @@ class CellsManager(manager.Manager):
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
+ @manager.periodic_task
+ def _heal_instances(self, ctxt):
+ """Periodic task to send updates for a number of instances to
+ parent cells.
+
+ On every run of the periodic task, we will attempt to sync
+ 'CONF.cells.instance_update_num_instances' number of instances.
+ When we get the list of instances, we shuffle them so that multiple
+ nova-cells services aren't attempting to sync the same instances
+ in lockstep.
+
+ If CONF.cells.instance_update_at_threshold is set, only attempt
+ to sync instances that have been updated recently. The CONF
+ setting defines the maximum number of seconds old the updated_at
+ can be. Ie, a threshold of 3600 means to only update instances
+ that have modified in the last hour.
+ """
+
+ if not self.state_manager.get_parent_cells():
+ # No need to sync up if we have no parents.
+ return
+
+ info = {'updated_list': False}
+
+ def _next_instance():
+ try:
+ instance = self.instances_to_heal.next()
+ except StopIteration:
+ if info['updated_list']:
+ return
+ threshold = CONF.cells.instance_updated_at_threshold
+ updated_since = None
+ if threshold > 0:
+ updated_since = timeutils.utcnow() - datetime.timedelta(
+ seconds=threshold)
+ self.instances_to_heal = cells_utils.get_instances_to_sync(
+ ctxt, updated_since=updated_since, shuffle=True,
+ uuids_only=True)
+ info['updated_list'] = True
+ try:
+ instance = self.instances_to_heal.next()
+ except StopIteration:
+ return
+ return instance
+
+ rd_context = ctxt.elevated(read_deleted='yes')
+
+ for i in xrange(CONF.cells.instance_update_num_instances):
+ while True:
+ # Yield to other greenthreads
+ time.sleep(0)
+ instance_uuid = _next_instance()
+ if not instance_uuid:
+ return
+ try:
+ instance = self.db.instance_get_by_uuid(rd_context,
+ instance_uuid)
+ except exception.InstanceNotFound:
+ continue
+ self._sync_instance(ctxt, instance)
+ break
+
+ def _sync_instance(self, ctxt, instance):
+ """Broadcast an instance_update or instance_destroy message up to
+ parent cells.
+ """
+ if instance['deleted']:
+ self.instance_destroy_at_top(ctxt, instance)
+ else:
+ self.instance_update_at_top(ctxt, instance)
+
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
diff --git a/nova/cells/utils.py b/nova/cells/utils.py
new file mode 100644
index 000000000..d25f98fab
--- /dev/null
+++ b/nova/cells/utils.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cells Utility Methods
+"""
+import random
+
+from nova import db
+
+
+def get_instances_to_sync(context, updated_since=None, project_id=None,
+ deleted=True, shuffle=False, uuids_only=False):
+ """Return a generator that will return a list of active and
+ deleted instances to sync with parent cells. The list may
+ optionally be shuffled for periodic updates so that multiple
+ cells services aren't self-healing the same instances in nearly
+ lockstep.
+ """
+ filters = {}
+ if updated_since is not None:
+ filters['changes-since'] = updated_since
+ if project_id is not None:
+ filters['project_id'] = project_id
+ if not deleted:
+ filters['deleted'] = False
+ # Active instances first.
+ instances = db.instance_get_all_by_filters(
+ context, filters, 'deleted', 'asc')
+ if shuffle:
+ random.shuffle(instances)
+ for instance in instances:
+ if uuids_only:
+ yield instance['uuid']
+ else:
+ yield instance
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 953342924..7ac6b1518 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -116,9 +116,9 @@ interval_opts = [
default=120,
help='Interval in seconds for querying the host status'),
cfg.IntOpt("image_cache_manager_interval",
- default=40,
- help="Number of periodic scheduler ticks to wait between "
- "runs of the image cache manager."),
+ default=2400,
+ help='Number of seconds to wait between runs of the image '
+ 'cache manager'),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
@@ -155,9 +155,9 @@ running_deleted_opts = [
"Valid options are 'noop', 'log' and 'reap'. "
"Set to 'noop' to disable."),
cfg.IntOpt("running_deleted_instance_poll_interval",
- default=30,
- help="Number of periodic scheduler ticks to wait between "
- "runs of the cleanup task."),
+ default=1800,
+ help="Number of seconds to wait between runs of the cleanup "
+ "task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
@@ -629,8 +629,9 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.exception(msg, instance=instance)
raise
except Exception:
+ exc_info = sys.exc_info()
# try to re-schedule instance:
- self._reschedule_or_reraise(context, instance,
+ self._reschedule_or_reraise(context, instance, exc_info,
requested_networks, admin_password, injected_files,
is_first_time, request_spec, filter_properties)
else:
@@ -652,16 +653,18 @@ class ComputeManager(manager.SchedulerDependentManager):
traceback.format_exception(type_, value, tb),
instance_uuid=instance_uuid)
- def _reschedule_or_reraise(self, context, instance, requested_networks,
- admin_password, injected_files, is_first_time,
- request_spec, filter_properties):
+ def _reschedule_or_reraise(self, context, instance, exc_info,
+ requested_networks, admin_password, injected_files, is_first_time,
+ request_spec, filter_properties):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
- exc_info = sys.exc_info()
instance_uuid = instance['uuid']
rescheduled = False
+ compute_utils.add_instance_fault_from_exc(context, instance_uuid,
+ exc_info[0], exc_info=exc_info)
+
try:
self._deallocate_network(context, instance)
except Exception:
@@ -1392,16 +1395,21 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "snapshot.start")
- self.driver.snapshot(context, instance, image_id)
-
if image_type == 'snapshot':
expected_task_state = task_states.IMAGE_SNAPSHOT
elif image_type == 'backup':
expected_task_state = task_states.IMAGE_BACKUP
+ def update_task_state(task_state, expected_state=expected_task_state):
+ self._instance_update(context, instance['uuid'],
+ task_state=task_state,
+ expected_task_state=expected_state)
+
+ self.driver.snapshot(context, instance, image_id, update_task_state)
+
self._instance_update(context, instance['uuid'], task_state=None,
- expected_task_state=expected_task_state)
+ expected_task_state=task_states.IMAGE_UPLOADING)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
@@ -1845,8 +1853,9 @@ class ComputeManager(manager.SchedulerDependentManager):
reservations, request_spec, filter_properties, node)
except Exception:
# try to re-schedule the resize elsewhere:
+ exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
- instance_type, reservations, request_spec,
+ exc_info, instance_type, reservations, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
@@ -1857,7 +1866,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
- def _reschedule_resize_or_reraise(self, context, image, instance,
+ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, reservations, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
@@ -1867,10 +1876,12 @@ class ComputeManager(manager.SchedulerDependentManager):
if not filter_properties:
filter_properties = {}
- exc_info = sys.exc_info()
rescheduled = False
instance_uuid = instance['uuid']
+ compute_utils.add_instance_fault_from_exc(context, instance_uuid,
+ exc_info[0], exc_info=exc_info)
+
try:
scheduler_method = self.scheduler_rpcapi.prep_resize
method_args = (instance, instance_type, image, request_spec,
@@ -2430,9 +2441,11 @@ class ComputeManager(manager.SchedulerDependentManager):
if vol_stats:
LOG.debug(_("Updating volume usage cache with totals"))
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
- self.db.vol_usage_update(context, volume_id, rd_req, rd_bytes,
- wr_req, wr_bytes, instance['id'],
- update_totals=True)
+ self.conductor_api.vol_usage_update(context, volume_id,
+ rd_req, rd_bytes,
+ wr_req, wr_bytes,
+ instance,
+ update_totals=True)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
@@ -2458,8 +2471,8 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
def _get_compute_info(self, context, host):
- compute_node_ref = self.db.service_get_all_compute_by_host(context,
- host)
+ compute_node_ref = self.conductor_api.service_get_all_compute_by_host(
+ context, host)
try:
return compute_node_ref[0]['compute_node'][0]
except IndexError:
@@ -2874,7 +2887,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window > 0:
- migrations = self.db.migration_get_unconfirmed_by_dest_compute(
+ capi = self.conductor_api
+ migrations = capi.migration_get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host)
migrations_info = dict(migration_count=len(migrations),
@@ -3056,10 +3070,13 @@ class ComputeManager(manager.SchedulerDependentManager):
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
- self.db.vol_usage_update(context, usage['volume'], usage['rd_req'],
- usage['rd_bytes'], usage['wr_req'],
- usage['wr_bytes'], usage['instance_id'],
- last_refreshed=refreshed)
+ self.conductor_api.vol_usage_update(context, usage['volume'],
+ usage['rd_req'],
+ usage['rd_bytes'],
+ usage['wr_req'],
+ usage['wr_bytes'],
+ usage['instance'],
+ last_refreshed=refreshed)
def _send_volume_usage_notifications(self, context, start_time):
"""Queries vol usage cache table and sends a vol usage notification"""
@@ -3067,7 +3084,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# the last run of get_all_volume_usage and this one
# but detach stats will be recorded in db and returned from
# vol_get_usage_by_time
- vol_usages = self.db.vol_get_usage_by_time(context, start_time)
+ vol_usages = self.conductor_api.vol_get_usage_by_time(context,
+ start_time)
for vol_usage in vol_usages:
notifier.notify(context, 'volume.%s' % self.host, 'volume.usage',
notifier.INFO,
@@ -3119,7 +3137,7 @@ class ComputeManager(manager.SchedulerDependentManager):
capability['host_ip'] = CONF.my_ip
self.update_service_capabilities(capabilities)
- @manager.periodic_task(ticks_between_runs=10)
+ @manager.periodic_task(spacing=600.0)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
@@ -3295,8 +3313,7 @@ class ComputeManager(manager.SchedulerDependentManager):
new_resource_tracker_dict[nodename] = rt
self._resource_tracker_dict = new_resource_tracker_dict
- @manager.periodic_task(
- ticks_between_runs=CONF.running_deleted_instance_poll_interval)
+ @manager.periodic_task(spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
@@ -3418,8 +3435,8 @@ class ComputeManager(manager.SchedulerDependentManager):
aggregate, host,
isinstance(e, exception.AggregateError))
- @manager.periodic_task(
- ticks_between_runs=CONF.image_cache_manager_interval)
+ @manager.periodic_task(spacing=CONF.image_cache_manager_interval,
+ external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index c2966d554..8e2b8344a 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -33,6 +33,8 @@ SPAWNING = 'spawning'
# possible task states during snapshot()
IMAGE_SNAPSHOT = 'image_snapshot'
+IMAGE_PENDING_UPLOAD = 'image_pending_upload'
+IMAGE_UPLOADING = 'image_uploading'
# possible task states during backup()
IMAGE_BACKUP = 'image_backup'
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index ca65e83db..66badb756 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -68,6 +68,9 @@ class LocalAPI(object):
# other/future users of this sort of functionality.
self._manager = ExceptionHelper(manager.ConductorManager())
+ def ping(self, context, arg, timeout=None):
+ return self._manager.ping(context, arg)
+
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database"""
return self._manager.instance_update(context, instance_uuid, updates)
@@ -110,6 +113,12 @@ class LocalAPI(object):
def migration_get(self, context, migration_id):
return self._manager.migration_get(context, migration_id)
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ return self._manager.migration_get_unconfirmed_by_dest_compute(
+ context, confirm_window, dest_compute)
+
def migration_update(self, context, migration, status):
return self._manager.migration_update(context, migration, status)
@@ -198,6 +207,33 @@ class LocalAPI(object):
return self._manager.block_device_mapping_destroy(
context, instance=instance, volume_id=volume_id)
+ def vol_get_usage_by_time(self, context, start_time):
+ return self._manager.vol_get_usage_by_time(context, start_time)
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ return self._manager.vol_usage_update(context, vol_id,
+ rd_req, rd_bytes,
+ wr_req, wr_bytes,
+ instance, last_refreshed,
+ update_totals)
+
+ def service_get_all(self, context):
+ return self._manager.service_get_all_by(context)
+
+ def service_get_all_by_topic(self, context, topic):
+ return self._manager.service_get_all_by(context, topic=topic)
+
+ def service_get_all_by_host(self, context, host):
+ return self._manager.service_get_all_by(context, host=host)
+
+ def service_get_by_host_and_topic(self, context, host, topic):
+ return self._manager.service_get_all_by(context, topic, host)
+
+ def service_get_all_compute_by_host(self, context, host):
+ return self._manager.service_get_all_by(context, 'compute', host)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager"""
@@ -205,6 +241,9 @@ class API(object):
def __init__(self):
self.conductor_rpcapi = rpcapi.ConductorAPI()
+ def ping(self, context, arg, timeout=None):
+ return self.conductor_rpcapi.ping(context, arg, timeout)
+
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database"""
return self.conductor_rpcapi.instance_update(context, instance_uuid,
@@ -251,6 +290,13 @@ class API(object):
def migration_get(self, context, migration_id):
return self.conductor_rpcapi.migration_get(context, migration_id)
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ crpcapi = self.conductor_rpcapi
+ return crpcapi.migration_get_unconfirmed_by_dest_compute(
+ context, confirm_window, dest_compute)
+
def migration_update(self, context, migration, status):
return self.conductor_rpcapi.migration_update(context, migration,
status)
@@ -348,3 +394,31 @@ class API(object):
volume_id):
return self.conductor_rpcapi.block_device_mapping_destroy(
context, instance=instance, volume_id=volume_id)
+
+ def vol_get_usage_by_time(self, context, start_time):
+ return self.conductor_rpcapi.vol_get_usage_by_time(context, start_time)
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ return self.conductor_rpcapi.vol_usage_update(context, vol_id,
+ rd_req, rd_bytes,
+ wr_req, wr_bytes,
+ instance, last_refreshed,
+ update_totals)
+
+ def service_get_all(self, context):
+ return self.conductor_rpcapi.service_get_all_by(context)
+
+ def service_get_all_by_topic(self, context, topic):
+ return self.conductor_rpcapi.service_get_all_by(context, topic=topic)
+
+ def service_get_all_by_host(self, context, host):
+ return self.conductor_rpcapi.service_get_all_by(context, host=host)
+
+ def service_get_by_host_and_topic(self, context, host, topic):
+ return self.conductor_rpcapi.service_get_all_by(context, topic, host)
+
+ def service_get_all_compute_by_host(self, context, host):
+ return self.conductor_rpcapi.service_get_all_by(context, 'compute',
+ host)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 7ddfd497e..123e7e13f 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,12 +43,15 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD"""
- RPC_API_VERSION = '1.18'
+ RPC_API_VERSION = '1.22'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
+ def ping(self, context, arg):
+ return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
+
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
@@ -83,6 +86,13 @@ class ConductorManager(manager.SchedulerDependentManager):
migration_id)
return jsonutils.to_primitive(migration_ref)
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ migrations = self.db.migration_get_unconfirmed_by_dest_compute(
+ context, confirm_window, dest_compute)
+ return jsonutils.to_primitive(migrations)
+
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
@@ -215,3 +225,31 @@ class ConductorManager(manager.SchedulerDependentManager):
def instance_type_get(self, context, instance_type_id):
result = self.db.instance_type_get(context, instance_type_id)
return jsonutils.to_primitive(result)
+
+ def vol_get_usage_by_time(self, context, start_time):
+ result = self.db.vol_get_usage_by_time(context, start_time)
+ return jsonutils.to_primitive(result)
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ self.db.vol_usage_update(context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance['uuid'], last_refreshed,
+ update_totals)
+
+ def service_get_all_by(self, context, topic=None, host=None):
+ if not any((topic, host)):
+ result = self.db.service_get_all(context)
+ elif all((topic, host)):
+ if topic == 'compute':
+ result = self.db.service_get_all_compute_by_host(context,
+ host)
+ else:
+ result = self.db.service_get_by_host_and_topic(context,
+ host, topic)
+ elif topic:
+ result = self.db.service_get_all_by_topic(context, topic)
+ elif host:
+ result = self.db.service_get_all_by_host(context, host)
+
+ return jsonutils.to_primitive(result)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index aeaad8868..0f2fe1f0c 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -50,6 +50,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.16 - Added instance_destroy
1.17 - Added instance_info_cache_delete
1.18 - Added instance_type_get
+ 1.19 - Added vol_get_usage_by_time and vol_usage_update
+ 1.20 - Added migration_get_unconfirmed_by_dest_compute
+ 1.21 - Added service_get_all_by
+ 1.22 - Added ping
"""
BASE_RPC_API_VERSION = '1.0'
@@ -59,6 +63,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=CONF.conductor.topic,
default_version=self.BASE_RPC_API_VERSION)
+ def ping(self, context, arg, timeout=None):
+ arg_p = jsonutils.to_primitive(arg)
+ msg = self.make_msg('ping', arg=arg_p)
+ return self.call(context, msg, version='1.22', timeout=timeout)
+
def instance_update(self, context, instance_uuid, updates):
updates_p = jsonutils.to_primitive(updates)
return self.call(context,
@@ -75,6 +84,14 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('migration_get', migration_id=migration_id)
return self.call(context, msg, version='1.4')
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ msg = self.make_msg('migration_get_unconfirmed_by_dest_compute',
+ confirm_window=confirm_window,
+ dest_compute=dest_compute)
+ return self.call(context, msg, version='1.20')
+
def migration_update(self, context, migration, status):
migration_p = jsonutils.to_primitive(migration)
msg = self.make_msg('migration_update', migration=migration_p,
@@ -208,3 +225,23 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('instance_type_get',
instance_type_id=instance_type_id)
return self.call(context, msg, version='1.18')
+
+ def vol_get_usage_by_time(self, context, start_time):
+ start_time_p = jsonutils.to_primitive(start_time)
+ msg = self.make_msg('vol_get_usage_by_time', start_time=start_time_p)
+ return self.call(context, msg, version='1.19')
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('vol_usage_update', vol_id=vol_id, rd_req=rd_req,
+ rd_bytes=rd_bytes, wr_req=wr_req,
+ wr_bytes=wr_bytes,
+ instance=instance_p, last_refreshed=last_refreshed,
+ update_totals=update_totals)
+ return self.call(context, msg, version='1.19')
+
+ def service_get_all_by(self, context, topic=None, host=None):
+ msg = self.make_msg('service_get_all_by', topic=topic, host=host)
+ return self.call(context, msg, version='1.21')
diff --git a/nova/config.py b/nova/config.py
index 4f4fbe822..2cc153203 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -21,6 +21,7 @@ import os
import socket
from nova.openstack.common import cfg
+from nova.openstack.common import rpc
def _get_my_ip():
@@ -55,14 +56,7 @@ core_opts = [
help="Top-level directory for maintaining nova's state"),
]
-debug_opts = [
- cfg.BoolOpt('fake_network',
- default=False,
- help='If passed, use fake network devices and addresses'),
-]
-
cfg.CONF.register_cli_opts(core_opts)
-cfg.CONF.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
@@ -176,15 +170,13 @@ global_opts = [
cfg.StrOpt('volume_api_class',
default='nova.volume.cinder.API',
help='The full class name of the volume API class to use'),
- cfg.StrOpt('control_exchange',
- default='nova',
- help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
cfg.CONF.register_opts(global_opts)
def parse_args(argv, default_config_files=None):
+ rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:],
project='nova',
default_config_files=default_config_files)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e37ccac92..dce92ba54 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -3800,43 +3800,42 @@ def instance_metadata_get_item(context, instance_uuid, key, session=None):
@require_context
def instance_metadata_update(context, instance_uuid, metadata, delete,
session=None):
+ all_keys = metadata.keys()
+ synchronize_session = "fetch"
if session is None:
session = get_session()
- # Set existing metadata to deleted if delete argument is True
- if delete:
- original_metadata = instance_metadata_get(context, instance_uuid,
- session=session)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = instance_metadata_get_item(context, instance_uuid,
- meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- # Now update all existing items with new values, or create new meta objects
- for meta_key, meta_value in metadata.iteritems():
+ synchronize_session = False
+ with session.begin(subtransactions=True):
+ if delete:
+ _instance_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(~models.InstanceMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=synchronize_session)
+
+ already_existing_keys = []
+ meta_refs = _instance_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(models.InstanceMetadata.key.in_(all_keys)).\
+ all()
- # update the value whether it exists or not
- item = {"value": meta_value}
+ for meta_ref in meta_refs:
+ already_existing_keys.append(meta_ref.key)
+ meta_ref.update({"value": metadata[meta_ref.key]})
- try:
- meta_ref = instance_metadata_get_item(context, instance_uuid,
- meta_key, session)
- except exception.InstanceMetadataNotFound:
+ new_keys = set(all_keys) - set(already_existing_keys)
+ for key in new_keys:
meta_ref = models.InstanceMetadata()
- item.update({"key": meta_key, "instance_uuid": instance_uuid})
-
- meta_ref.update(item)
- meta_ref.save(session=session)
+ meta_ref.update({"key": key, "value": metadata[key],
+ "instance_uuid": instance_uuid})
+ session.add(meta_ref)
- return metadata
+ return metadata
#######################
# System-owned metadata
+
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
@@ -3872,39 +3871,36 @@ def _instance_system_metadata_get_item(context, instance_uuid, key,
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete,
session=None):
+ all_keys = metadata.keys()
+ synchronize_session = "fetch"
if session is None:
session = get_session()
+ synchronize_session = False
+ with session.begin(subtransactions=True):
+ if delete:
+ _instance_system_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=synchronize_session)
+
+ already_existing_keys = []
+ meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
+ all()
- # Set existing metadata to deleted if delete argument is True
- if delete:
- original_metadata = instance_system_metadata_get(
- context, instance_uuid, session=session)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = _instance_system_metadata_get_item(
- context, instance_uuid, meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- # Now update all existing items with new values, or create new meta objects
- for meta_key, meta_value in metadata.iteritems():
-
- # update the value whether it exists or not
- item = {"value": meta_value}
+ for meta_ref in meta_refs:
+ already_existing_keys.append(meta_ref.key)
+ meta_ref.update({"value": metadata[meta_ref.key]})
- try:
- meta_ref = _instance_system_metadata_get_item(
- context, instance_uuid, meta_key, session)
- except exception.InstanceSystemMetadataNotFound:
+ new_keys = set(all_keys) - set(already_existing_keys)
+ for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
- item.update({"key": meta_key, "instance_uuid": instance_uuid})
+ meta_ref.update({"key": key, "value": metadata[key],
+ "instance_uuid": instance_uuid})
+ session.add(meta_ref)
- meta_ref.update(item)
- meta_ref.save(session=session)
-
- return metadata
+ return metadata
####################
@@ -4347,6 +4343,16 @@ def aggregate_get_all(context):
@require_admin_context
+def aggregate_metadata_get_query(context, aggregate_id, session=None,
+ read_deleted="yes"):
+ return model_query(context,
+ models.AggregateMetadata,
+ read_deleted=read_deleted,
+ session=session).\
+ filter_by(aggregate_id=aggregate_id)
+
+
+@require_admin_context
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
@@ -4391,33 +4397,31 @@ def aggregate_metadata_get_item(context, aggregate_id, key, session=None):
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
session = get_session()
+ all_keys = metadata.keys()
+ with session.begin():
+ query = aggregate_metadata_get_query(context, aggregate_id,
+ session=session)
+ if set_delete:
+ query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=False)
- if set_delete:
- original_metadata = aggregate_metadata_get(context, aggregate_id)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = aggregate_metadata_get_item(context, aggregate_id,
- meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
+ query = query.filter(models.AggregateMetadata.key.in_(all_keys))
+ already_existing_keys = []
+ for meta_ref in query.all():
+ key = meta_ref.key
+ meta_ref.update({"value": metadata[key],
+ "deleted": False,
+ "deleted_at": None})
+ already_existing_keys.append(key)
- for meta_key, meta_value in metadata.iteritems():
- item = {"value": meta_value}
- try:
- meta_ref = aggregate_metadata_get_item(context, aggregate_id,
- meta_key, session)
- if meta_ref.deleted:
- item.update({'deleted': False, 'deleted_at': None})
- except exception.AggregateMetadataNotFound:
+ for key in set(all_keys) - set(already_existing_keys):
meta_ref = models.AggregateMetadata()
- item.update({"key": meta_key, "aggregate_id": aggregate_id})
-
- meta_ref.update(item)
- meta_ref.save(session=session)
+ meta_ref.update({"key": key,
+ "value": metadata[key],
+ "aggregate_id": aggregate_id})
+ session.add(meta_ref)
- return metadata
+ return metadata
@require_admin_context
diff --git a/nova/exception.py b/nova/exception.py
index c484b5120..9507a0088 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -410,6 +410,10 @@ class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
+class InvalidPeriodicTaskArg(Invalid):
+ message = _("Unexpected argument for periodic task creation: %(arg)s.")
+
+
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index d171d1377..3fb397298 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2013-01-03 00:03+0000\n"
+"POT-Creation-Date: 2013-01-06 00:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -145,8 +145,8 @@ msgstr ""
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:233 nova/api/ec2/cloud.py:436 nova/api/ec2/cloud.py:461
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2313
+#: nova/exception.py:233 nova/api/ec2/cloud.py:447 nova/api/ec2/cloud.py:472
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2321
msgid "Keypair data is invalid"
msgstr ""
@@ -176,7 +176,7 @@ msgstr ""
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:261 nova/api/ec2/cloud.py:618
+#: nova/exception.py:261 nova/api/ec2/cloud.py:629
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
@@ -774,320 +774,353 @@ msgstr ""
#: nova/exception.py:773
#, python-format
-msgid "Scheduler Host Filter %(filter_name)s could not be found."
+msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
#: nova/exception.py:777
#, python-format
+msgid "Service API method not found: %(detail)s"
+msgstr ""
+
+#: nova/exception.py:781
+msgid "Timeout waiting for response from cell"
+msgstr ""
+
+#: nova/exception.py:785
+#, python-format
+msgid "Cell message has reached maximum hop count: %(hop_count)s"
+msgstr ""
+
+#: nova/exception.py:789
+msgid "No cells available matching scheduling criteria."
+msgstr ""
+
+#: nova/exception.py:793
+#, python-format
+msgid "Exception received during cell processing: %(exc_name)s."
+msgstr ""
+
+#: nova/exception.py:797
+#, python-format
+msgid "Cell is not known for instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/exception.py:801
+#, python-format
+msgid "Scheduler Host Filter %(filter_name)s could not be found."
+msgstr ""
+
+#: nova/exception.py:805
+#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:782
+#: nova/exception.py:810
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:786
+#: nova/exception.py:814
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:791
+#: nova/exception.py:819
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:796
+#: nova/exception.py:824
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:801
+#: nova/exception.py:829
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:805
+#: nova/exception.py:833
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:809
+#: nova/exception.py:837
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:814
+#: nova/exception.py:842
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:818
+#: nova/exception.py:846
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:822
+#: nova/exception.py:850
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:826
+#: nova/exception.py:854
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:830
+#: nova/exception.py:858
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:834
+#: nova/exception.py:862
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:838
+#: nova/exception.py:866
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:842
+#: nova/exception.py:870
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:846
+#: nova/exception.py:874
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:850
+#: nova/exception.py:878
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:855
+#: nova/exception.py:883
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:859
+#: nova/exception.py:887
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:863
+#: nova/exception.py:891
msgid "Migration error"
msgstr ""
-#: nova/exception.py:867
+#: nova/exception.py:895
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:873
+#: nova/exception.py:901
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:877
+#: nova/exception.py:905
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:881
+#: nova/exception.py:909
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:885
+#: nova/exception.py:913
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:889
+#: nova/exception.py:917
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:893
+#: nova/exception.py:921
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:897
+#: nova/exception.py:925
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:901
+#: nova/exception.py:929
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:905
+#: nova/exception.py:933
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:909
+#: nova/exception.py:937
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:913
+#: nova/exception.py:941
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:920
+#: nova/exception.py:948
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:925
+#: nova/exception.py:953
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:929
+#: nova/exception.py:957
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:933
+#: nova/exception.py:961
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:937
+#: nova/exception.py:965
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:941
+#: nova/exception.py:969
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:945
+#: nova/exception.py:973
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:949
+#: nova/exception.py:977
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:953
+#: nova/exception.py:981
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:958
+#: nova/exception.py:986
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:962
+#: nova/exception.py:990
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:966
+#: nova/exception.py:994
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:970
+#: nova/exception.py:998
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:975
+#: nova/exception.py:1003
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:979
+#: nova/exception.py:1007
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:983
+#: nova/exception.py:1011
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:989
+#: nova/exception.py:1017
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:993
+#: nova/exception.py:1021
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:997
+#: nova/exception.py:1025
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1001
+#: nova/exception.py:1029
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1005
+#: nova/exception.py:1033
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1009
+#: nova/exception.py:1037
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1013
+#: nova/exception.py:1041
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1017
+#: nova/exception.py:1045
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1021
+#: nova/exception.py:1049
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1026
+#: nova/exception.py:1054
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1031
+#: nova/exception.py:1059
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1037
+#: nova/exception.py:1065
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1041
+#: nova/exception.py:1069
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1046
+#: nova/exception.py:1074
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1050
+#: nova/exception.py:1078
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
@@ -1318,7 +1351,7 @@ msgstr ""
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1113 nova/virt/configdrive.py:165
+#: nova/utils.py:1113 nova/virt/configdrive.py:177
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
@@ -1353,7 +1386,7 @@ msgstr ""
msgid "Sourcing roles from deprecated X-Role HTTP header"
msgstr ""
-#: nova/api/sizelimit.py:50 nova/api/metadata/password.py:37
+#: nova/api/sizelimit.py:50 nova/api/metadata/password.py:64
msgid "Request is too large."
msgstr ""
@@ -1499,241 +1532,241 @@ msgstr ""
msgid "Unsupported API request: controller = %(controller)s, action = %(action)s"
msgstr ""
-#: nova/api/ec2/cloud.py:384
+#: nova/api/ec2/cloud.py:395
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:410
+#: nova/api/ec2/cloud.py:421
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:426
+#: nova/api/ec2/cloud.py:437
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:433 nova/api/ec2/cloud.py:458
+#: nova/api/ec2/cloud.py:444 nova/api/ec2/cloud.py:469
#: nova/api/openstack/compute/contrib/keypairs.py:93
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/ec2/cloud.py:439 nova/api/ec2/cloud.py:464
+#: nova/api/ec2/cloud.py:450 nova/api/ec2/cloud.py:475
#: nova/api/openstack/compute/contrib/keypairs.py:101
#, python-format
msgid "Key pair '%s' already exists."
msgstr ""
-#: nova/api/ec2/cloud.py:448
+#: nova/api/ec2/cloud.py:459
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:471
+#: nova/api/ec2/cloud.py:482
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:605 nova/api/ec2/cloud.py:726
+#: nova/api/ec2/cloud.py:616 nova/api/ec2/cloud.py:737
msgid "Not enough parameters, need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:610
+#: nova/api/ec2/cloud.py:621
#, python-format
msgid "%s Not enough parameters to build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:680
+#: nova/api/ec2/cloud.py:659 nova/api/ec2/cloud.py:691
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:671
+#: nova/api/ec2/cloud.py:682
#, python-format
msgid "%s - This rule already exists in group"
msgstr ""
-#: nova/api/ec2/cloud.py:737
+#: nova/api/ec2/cloud.py:748
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:813
+#: nova/api/ec2/cloud.py:824
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:817 nova/api/openstack/compute/contrib/volumes.py:241
+#: nova/api/ec2/cloud.py:828 nova/api/openstack/compute/contrib/volumes.py:241
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:845
+#: nova/api/ec2/cloud.py:856
msgid "Delete Failed"
msgstr ""
-#: nova/api/ec2/cloud.py:858
+#: nova/api/ec2/cloud.py:869
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:866
+#: nova/api/ec2/cloud.py:877
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:879 nova/api/openstack/compute/contrib/volumes.py:428
+#: nova/api/ec2/cloud.py:890 nova/api/openstack/compute/contrib/volumes.py:428
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:885
+#: nova/api/ec2/cloud.py:896
msgid "Detach Volume Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:911 nova/api/ec2/cloud.py:968
-#: nova/api/ec2/cloud.py:1504 nova/api/ec2/cloud.py:1519
+#: nova/api/ec2/cloud.py:922 nova/api/ec2/cloud.py:979
+#: nova/api/ec2/cloud.py:1528 nova/api/ec2/cloud.py:1543
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1033
+#: nova/api/ec2/cloud.py:1049
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1184
+#: nova/api/ec2/cloud.py:1208
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1188
+#: nova/api/ec2/cloud.py:1212
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1192
+#: nova/api/ec2/cloud.py:1216
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1197
+#: nova/api/ec2/cloud.py:1221
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1200
+#: nova/api/ec2/cloud.py:1224
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1208
+#: nova/api/ec2/cloud.py:1232
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1216
+#: nova/api/ec2/cloud.py:1240
#: nova/api/openstack/compute/contrib/floating_ips.py:257
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1225
+#: nova/api/ec2/cloud.py:1249
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1228
+#: nova/api/ec2/cloud.py:1252
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1231
+#: nova/api/ec2/cloud.py:1255
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1239
+#: nova/api/ec2/cloud.py:1263
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1244
+#: nova/api/ec2/cloud.py:1268
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1247
+#: nova/api/ec2/cloud.py:1271
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1274
+#: nova/api/ec2/cloud.py:1298
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1306
+#: nova/api/ec2/cloud.py:1330
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1316
+#: nova/api/ec2/cloud.py:1340
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1325
+#: nova/api/ec2/cloud.py:1349
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1334
+#: nova/api/ec2/cloud.py:1358
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1425
+#: nova/api/ec2/cloud.py:1449
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1441
+#: nova/api/ec2/cloud.py:1465
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1460
+#: nova/api/ec2/cloud.py:1484
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1522
+#: nova/api/ec2/cloud.py:1546
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1524
+#: nova/api/ec2/cloud.py:1548
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1526
+#: nova/api/ec2/cloud.py:1550
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1528
+#: nova/api/ec2/cloud.py:1552
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1541
+#: nova/api/ec2/cloud.py:1565
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1570
+#: nova/api/ec2/cloud.py:1594
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1600
+#: nova/api/ec2/cloud.py:1624
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1618
+#: nova/api/ec2/cloud.py:1642
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1651
+#: nova/api/ec2/cloud.py:1675
msgid "Invalid CIDR"
msgstr ""
@@ -2310,7 +2343,7 @@ msgstr ""
#: nova/api/openstack/compute/servers.py:1198
#: nova/api/openstack/compute/contrib/aggregates.py:143
-#: nova/api/openstack/compute/contrib/coverage_ext.py:223
+#: nova/api/openstack/compute/contrib/coverage_ext.py:246
#: nova/api/openstack/compute/contrib/keypairs.py:78
msgid "Invalid request body"
msgstr ""
@@ -2493,17 +2526,37 @@ msgstr ""
msgid "Unable to get console"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:87
+#: nova/api/openstack/compute/contrib/coverage_ext.py:101
#, python-format
msgid "Can't connect to service: %s, no portspecified\n"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:90
+#: nova/api/openstack/compute/contrib/coverage_ext.py:104
#, python-format
msgid "No backdoor API command for service: %s\n"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:221
+#: nova/api/openstack/compute/contrib/coverage_ext.py:123
+msgid "Coverage begin"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:157
+msgid "Coverage not running"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:186
+msgid "Invalid path"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:190
+msgid "No path given for report file"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:197
+msgid "You can't use html reports without combining"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:244
#, python-format
msgid "Coverage doesn't have %s action"
msgstr ""
@@ -2831,6 +2884,122 @@ msgstr ""
msgid "Instance has had its instance_type removed from the DB"
msgstr ""
+#: nova/cells/messaging.py:198
+#, python-format
+msgid "Error processing message locally: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:352 nova/cells/messaging.py:358
+#, python-format
+msgid "destination is %(target_cell)s but routing_path is %(routing_path)s"
+msgstr ""
+
+#: nova/cells/messaging.py:368
+#, python-format
+msgid "Unknown %(cell_type)s when routing to %(target_cell)s"
+msgstr ""
+
+#: nova/cells/messaging.py:392
+#, python-format
+msgid "Error locating next hop for message: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:419
+#, python-format
+msgid "Failed to send message to cell: %(next_hop)s: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:498
+#, python-format
+msgid "Error locating next hops for message: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:518
+#, python-format
+msgid "Error sending message to next hops: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:536
+#, python-format
+msgid "Error waiting for responses from neighbor cells: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:628
+#, python-format
+msgid "Unknown method '%(method)s' in compute API"
+msgstr ""
+
+#: nova/cells/messaging.py:651
+#, python-format
+msgid "Received capabilities from child cell %(cell_name)s: %(capabilities)s"
+msgstr ""
+
+#: nova/cells/messaging.py:660
+#, python-format
+msgid "Received capacities from child cell %(cell_name)s: %(capacities)s"
+msgstr ""
+
+#: nova/cells/messaging.py:719
+#, python-format
+msgid "Got update for instance %(instance_uuid)s: %(instance)s"
+msgstr ""
+
+#: nova/cells/messaging.py:742
+#, python-format
+msgid "Got update to delete instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/cells/messaging.py:757
+#, python-format
+msgid "Got broadcast to %(delete_type)s delete instance"
+msgstr ""
+
+#: nova/cells/messaging.py:771
+#, python-format
+msgid "Got message to create instance fault: %(instance_fault)s"
+msgstr ""
+
+#: nova/cells/messaging.py:921
+#, python-format
+msgid "Updating parents with our capabilities: %(capabs)s"
+msgstr ""
+
+#: nova/cells/messaging.py:941
+#, python-format
+msgid "Updating parents with our capacities: %(capacities)s"
+msgstr ""
+
+#: nova/cells/scheduler.py:94
+#, python-format
+msgid "Scheduling with routing_path=%(routing_path)s"
+msgstr ""
+
+#: nova/cells/scheduler.py:117
+#, python-format
+msgid ""
+"No cells available when scheduling. Will retry in %(sleep_time)s "
+"second(s)"
+msgstr ""
+
+#: nova/cells/scheduler.py:124
+#, python-format
+msgid "Error scheduling instances %(instance_uuids)s"
+msgstr ""
+
+#: nova/cells/state.py:264
+msgid "Updating cell cache from db."
+msgstr ""
+
+#: nova/cells/state.py:300
+#, python-format
+msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
+msgstr ""
+
+#: nova/cells/state.py:315
+#, python-format
+msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
+msgstr ""
+
#: nova/cloudpipe/pipelib.py:43
msgid "Instance type for vpn instances"
msgstr ""
@@ -2965,95 +3134,95 @@ msgstr ""
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:1968
+#: nova/compute/api.py:1976
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1976
+#: nova/compute/api.py:1984
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:2044
+#: nova/compute/api.py:2052
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2129
+#: nova/compute/api.py:2137
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2286
+#: nova/compute/api.py:2294
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2290
+#: nova/compute/api.py:2298
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2391
+#: nova/compute/api.py:2399
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2394
+#: nova/compute/api.py:2402
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2402
+#: nova/compute/api.py:2410
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2408
+#: nova/compute/api.py:2416
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2428
+#: nova/compute/api.py:2436
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2431
+#: nova/compute/api.py:2439
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2438
+#: nova/compute/api.py:2446
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2503
+#: nova/compute/api.py:2511
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2511
+#: nova/compute/api.py:2519
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2514
+#: nova/compute/api.py:2522
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2771
+#: nova/compute/api.py:2779
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2780
+#: nova/compute/api.py:2788
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2783
+#: nova/compute/api.py:2791
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2794
+#: nova/compute/api.py:2802
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3174,478 +3343,478 @@ msgstr ""
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:444
+#: nova/compute/manager.py:447
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:520
+#: nova/compute/manager.py:523
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:592 nova/compute/manager.py:1830
+#: nova/compute/manager.py:595 nova/compute/manager.py:1834
#, python-format
msgid "No node specified, defaulting to %(node)s"
msgstr ""
-#: nova/compute/manager.py:625
+#: nova/compute/manager.py:628
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:648
+#: nova/compute/manager.py:651
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:681 nova/compute/manager.py:1881
+#: nova/compute/manager.py:684 nova/compute/manager.py:1885
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:699
+#: nova/compute/manager.py:702
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:704
+#: nova/compute/manager.py:707
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:710
+#: nova/compute/manager.py:713
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:737
+#: nova/compute/manager.py:741
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:767
+#: nova/compute/manager.py:771
msgid "Instance has already been created"
msgstr ""
-#: nova/compute/manager.py:813
+#: nova/compute/manager.py:817
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:819
+#: nova/compute/manager.py:823
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:829
+#: nova/compute/manager.py:833
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:850
+#: nova/compute/manager.py:854
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:854
+#: nova/compute/manager.py:858
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:867
+#: nova/compute/manager.py:871
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:885
+#: nova/compute/manager.py:889
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:909
+#: nova/compute/manager.py:913
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:981
+#: nova/compute/manager.py:985
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:1012
+#: nova/compute/manager.py:1016
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1015
+#: nova/compute/manager.py:1019
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1022
+#: nova/compute/manager.py:1026
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:1047
+#: nova/compute/manager.py:1051
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1087 nova/compute/manager.py:2049
-#: nova/compute/manager.py:3375
+#: nova/compute/manager.py:1090 nova/compute/manager.py:2053
+#: nova/compute/manager.py:3388
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1221
+#: nova/compute/manager.py:1224
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1307
+#: nova/compute/manager.py:1311
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1331
+#: nova/compute/manager.py:1335
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1340
+#: nova/compute/manager.py:1344
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1377
+#: nova/compute/manager.py:1381
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1383
+#: nova/compute/manager.py:1387
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1436
+#: nova/compute/manager.py:1440
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1443
+#: nova/compute/manager.py:1447
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1448
+#: nova/compute/manager.py:1452
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1479
+#: nova/compute/manager.py:1483
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1486
+#: nova/compute/manager.py:1490
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1496
+#: nova/compute/manager.py:1500
msgid "set_admin_password is not implemented by this driver."
msgstr ""
-#: nova/compute/manager.py:1512
+#: nova/compute/manager.py:1516
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1520
+#: nova/compute/manager.py:1524
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1535
+#: nova/compute/manager.py:1539
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1539
+#: nova/compute/manager.py:1543
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1560
+#: nova/compute/manager.py:1564
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:1573
+#: nova/compute/manager.py:1577
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1607
+#: nova/compute/manager.py:1611
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1628
+#: nova/compute/manager.py:1632
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1797
+#: nova/compute/manager.py:1801
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:1803
+#: nova/compute/manager.py:1807
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1812
+#: nova/compute/manager.py:1816
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2046
+#: nova/compute/manager.py:2050
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2102
+#: nova/compute/manager.py:2106
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2119
+#: nova/compute/manager.py:2123
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2157
+#: nova/compute/manager.py:2161
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2187
+#: nova/compute/manager.py:2191
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2209
+#: nova/compute/manager.py:2213
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2214
+#: nova/compute/manager.py:2218
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2217
+#: nova/compute/manager.py:2221
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2234
+#: nova/compute/manager.py:2238
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2259
+#: nova/compute/manager.py:2263
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2287
+#: nova/compute/manager.py:2291
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2331
+#: nova/compute/manager.py:2336
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2340
+#: nova/compute/manager.py:2345
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2355
+#: nova/compute/manager.py:2360
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2384
+#: nova/compute/manager.py:2390
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2388
+#: nova/compute/manager.py:2394
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2401
+#: nova/compute/manager.py:2407
#, python-format
msgid "Faild to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2426
+#: nova/compute/manager.py:2431
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2463
+#: nova/compute/manager.py:2468
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2536
+#: nova/compute/manager.py:2541
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2597
+#: nova/compute/manager.py:2602
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2625
+#: nova/compute/manager.py:2630
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2678
+#: nova/compute/manager.py:2683
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2680
+#: nova/compute/manager.py:2685
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2694
+#: nova/compute/manager.py:2699
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2838
+#: nova/compute/manager.py:2842
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2882
+#: nova/compute/manager.py:2887
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2888
+#: nova/compute/manager.py:2893
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2897
+#: nova/compute/manager.py:2902
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2904
+#: nova/compute/manager.py:2909
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2908
+#: nova/compute/manager.py:2913
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2915
+#: nova/compute/manager.py:2920
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2923
+#: nova/compute/manager.py:2928
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2940
+#: nova/compute/manager.py:2943
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2958
+#: nova/compute/manager.py:2961
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2981
+#: nova/compute/manager.py:2984
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3094
+#: nova/compute/manager.py:3102
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3112
+#: nova/compute/manager.py:3120
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3140
+#: nova/compute/manager.py:3149
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3146 nova/compute/manager.py:3184
+#: nova/compute/manager.py:3155 nova/compute/manager.py:3193
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3171
+#: nova/compute/manager.py:3180
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3207
+#: nova/compute/manager.py:3216
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3219 nova/compute/manager.py:3230
-#: nova/compute/manager.py:3244
+#: nova/compute/manager.py:3228 nova/compute/manager.py:3239
+#: nova/compute/manager.py:3253
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3224
+#: nova/compute/manager.py:3233
msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3237
+#: nova/compute/manager.py:3246
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3253
+#: nova/compute/manager.py:3262
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3261
+#: nova/compute/manager.py:3270
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3274
+#: nova/compute/manager.py:3285
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3329
+#: nova/compute/manager.py:3341
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3336
+#: nova/compute/manager.py:3348
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3343
+#: nova/compute/manager.py:3355
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
@@ -3734,14 +3903,14 @@ msgstr ""
msgid "Instance not resizing, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:535
+#: nova/compute/resource_tracker.py:538
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
"memory"
msgstr ""
-#: nova/compute/resource_tracker.py:548
+#: nova/compute/resource_tracker.py:551
#, python-format
msgid "Missing keys: %s"
msgstr ""
@@ -3755,16 +3924,20 @@ msgstr ""
msgid "Unable to find host for Instance %s"
msgstr ""
-#: nova/compute/utils.py:94
+#: nova/compute/utils.py:101
#, python-format
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/manager.py:59
+#: nova/conductor/manager.py:62
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
+#: nova/conductor/manager.py:198
+msgid "Invalid block_device_mapping_destroy invocation"
+msgstr ""
+
#: nova/console/manager.py:80 nova/console/vmrc_manager.py:62
msgid "Adding console"
msgstr ""
@@ -3827,19 +4000,39 @@ msgstr ""
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
+#: nova/db/api.py:580
+msgid "Failed to notify cells of instance destroy"
+msgstr ""
+
+#: nova/db/api.py:689 nova/db/api.py:710
+msgid "Failed to notify cells of instance update"
+msgstr ""
+
+#: nova/db/api.py:749
+msgid "Failed to notify cells of instance info cache update"
+msgstr ""
+
+#: nova/db/api.py:1458
+msgid "Failed to notify cells of bw_usage update"
+msgstr ""
+
+#: nova/db/api.py:1602
+msgid "Failed to notify cells of instance fault"
+msgstr ""
+
#: nova/db/sqlalchemy/api.py:182 nova/virt/baremetal/db/sqlalchemy/api.py:61
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1371
+#: nova/db/sqlalchemy/api.py:1374
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:2733
+#: nova/db/sqlalchemy/api.py:2736
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
@@ -4117,69 +4310,69 @@ msgstr ""
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:324
+#: nova/network/manager.py:326
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/manager.py:333 nova/network/manager.py:598
+#: nova/network/manager.py:335 nova/network/manager.py:606
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/manager.py:348
+#: nova/network/manager.py:350
#, python-format
msgid "floating IP allocation for instance |%s|"
msgstr ""
-#: nova/network/manager.py:412
+#: nova/network/manager.py:414
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/manager.py:430
+#: nova/network/manager.py:432
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/manager.py:434
+#: nova/network/manager.py:436
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/manager.py:455
+#: nova/network/manager.py:457
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/manager.py:517
+#: nova/network/manager.py:519
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/manager.py:729
+#: nova/network/manager.py:753
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:736
+#: nova/network/manager.py:760
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/manager.py:766
+#: nova/network/manager.py:790
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:774
+#: nova/network/manager.py:798
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/manager.py:821
+#: nova/network/manager.py:845
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4187,39 +4380,39 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/manager.py:867
+#: nova/network/manager.py:891
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/manager.py:877
+#: nova/network/manager.py:901
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/manager.py:993
+#: nova/network/manager.py:1017
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:997
+#: nova/network/manager.py:1021
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1124
+#: nova/network/manager.py:1148
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1129
+#: nova/network/manager.py:1153
#, python-format
msgid "networks retrieved for instance: |%(networks)s|"
msgstr ""
-#: nova/network/manager.py:1165
+#: nova/network/manager.py:1189
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1395
+#: nova/network/manager.py:1419
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4227,89 +4420,89 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1483
+#: nova/network/manager.py:1507
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1502
+#: nova/network/manager.py:1526
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1506
+#: nova/network/manager.py:1530
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1514
+#: nova/network/manager.py:1538
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1519
+#: nova/network/manager.py:1543
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1523
+#: nova/network/manager.py:1547
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1526
+#: nova/network/manager.py:1550
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1545
+#: nova/network/manager.py:1569
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1569
+#: nova/network/manager.py:1593
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1589
+#: nova/network/manager.py:1613
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1670
+#: nova/network/manager.py:1694
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1673
+#: nova/network/manager.py:1697
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1684
+#: nova/network/manager.py:1708
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1741
+#: nova/network/manager.py:1765
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1761
+#: nova/network/manager.py:1785
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2284
+#: nova/network/manager.py:2308
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2291
+#: nova/network/manager.py:2315
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s. Network size"
@@ -4357,36 +4550,36 @@ msgstr ""
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:150
+#: nova/network/quantumv2/api.py:151
msgid "Port not found"
msgstr ""
-#: nova/network/quantumv2/api.py:158
+#: nova/network/quantumv2/api.py:159
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:170
+#: nova/network/quantumv2/api.py:171
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:179
+#: nova/network/quantumv2/api.py:180
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:189
+#: nova/network/quantumv2/api.py:190
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:204
+#: nova/network/quantumv2/api.py:205
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:458
+#: nova/network/quantumv2/api.py:459
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
@@ -4992,9 +5185,9 @@ msgid ""
" %(usable_ram)s MB usable ram."
msgstr ""
-#: nova/scheduler/filters/retry_filter.py:38
+#: nova/scheduler/filters/retry_filter.py:41
#, python-format
-msgid "Previously tried hosts: %(hosts)s. (host=%(host)s)"
+msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s"
msgstr ""
#: nova/scheduler/filters/trusted_filter.py:200
@@ -5125,7 +5318,7 @@ msgstr ""
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
-#: nova/tests/test_misc.py:57
+#: nova/tests/test_misc.py:59
#, python-format
msgid ""
"The following migrations are missing a downgrade:\n"
@@ -5258,39 +5451,39 @@ msgstr ""
#: nova/tests/compute/test_compute.py:650
#: nova/tests/compute/test_compute.py:668
#: nova/tests/compute/test_compute.py:719
-#: nova/tests/compute/test_compute.py:744
-#: nova/tests/compute/test_compute.py:2593
+#: nova/tests/compute/test_compute.py:746
+#: nova/tests/compute/test_compute.py:2604
#, python-format
msgid "Running instances: %s"
msgstr ""
#: nova/tests/compute/test_compute.py:656
#: nova/tests/compute/test_compute.py:691
-#: nova/tests/compute/test_compute.py:732
-#: nova/tests/compute/test_compute.py:762
+#: nova/tests/compute/test_compute.py:734
+#: nova/tests/compute/test_compute.py:764
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1180
+#: nova/tests/compute/test_compute.py:1182
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2604
+#: nova/tests/compute/test_compute.py:2615
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:3073
+#: nova/tests/compute/test_compute.py:3085
msgid "wrong host/node"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:150 nova/virt/hyperv/vmops.py:559
+#: nova/tests/hyperv/hypervutils.py:150 nova/virt/hyperv/vmops.py:552
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:208 nova/virt/hyperv/vmops.py:494
+#: nova/tests/hyperv/hypervutils.py:208 nova/virt/hyperv/vmops.py:487
#, python-format
msgid "Failed to destroy vm %s"
msgstr ""
@@ -5415,7 +5608,7 @@ msgstr ""
msgid "Decoding JSON: %s"
msgstr ""
-#: nova/virt/configdrive.py:80
+#: nova/virt/configdrive.py:92
#, python-format
msgid "Added %(filepath)s to config drive"
msgstr ""
@@ -5502,45 +5695,45 @@ msgstr ""
msgid "Delete called on non-existing instance %s"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:84
+#: nova/virt/baremetal/ipmi.py:83
#, python-format
msgid "pid file %s does not contain any pid"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:107
+#: nova/virt/baremetal/ipmi.py:106
msgid "Node id not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:109
+#: nova/virt/baremetal/ipmi.py:108
msgid "Address not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:111
+#: nova/virt/baremetal/ipmi.py:110
msgid "User not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:113
+#: nova/virt/baremetal/ipmi.py:112
msgid "Password not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:129
+#: nova/virt/baremetal/ipmi.py:128
#, python-format
msgid "ipmitool stdout: '%(out)s', stderr: '%(err)%s'"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:155
+#: nova/virt/baremetal/ipmi.py:154
msgid "IPMI power on failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:177
+#: nova/virt/baremetal/ipmi.py:176
msgid "IPMI power off failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:187
+#: nova/virt/baremetal/ipmi.py:186
msgid "IPMI set next bootdev failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:192
+#: nova/virt/baremetal/ipmi.py:191
#, python-format
msgid "Activate node called, but node %s is already active"
msgstr ""
@@ -5641,40 +5834,33 @@ msgstr ""
msgid "Baremetal virtual interface %s not found"
msgstr ""
-#: nova/virt/disk/api.py:130
+#: nova/virt/disk/api.py:127
#, python-format
-msgid "Cannot resize filesystem %s to a smaller size."
+msgid "Checking if we can resize image %(image)s. size=%(size)s, CoW=%(use_cow)s"
msgstr ""
-#: nova/virt/disk/api.py:137
+#: nova/virt/disk/api.py:133
#, python-format
-msgid "Checking if we can resize the COW image %s."
-msgstr ""
-
-#: nova/virt/disk/api.py:141
-#, python-format
-msgid ""
-"File injection failed for image %(image)s with error %(error)s. Cannot "
-"resize."
+msgid "Cannot resize filesystem %s to a smaller size."
msgstr ""
-#: nova/virt/disk/api.py:148
+#: nova/virt/disk/api.py:144
#, python-format
-msgid "Checking if we can resize the non-COW image %s."
+msgid "Unable to mount image %(image)s with error %(error)s. Cannot resize."
msgstr ""
-#: nova/virt/disk/api.py:152
+#: nova/virt/disk/api.py:154
#, python-format
msgid ""
"Unable to determine label for image %(image)s with error %(errror)s. "
"Cannot resize."
msgstr ""
-#: nova/virt/disk/api.py:232
+#: nova/virt/disk/api.py:234
msgid "image already mounted"
msgstr ""
-#: nova/virt/disk/api.py:277
+#: nova/virt/disk/api.py:279
#, python-format
msgid ""
"Inject data image=%(image)s key=%(key)s net=%(net)s metadata=%(metadata)s"
@@ -5682,53 +5868,53 @@ msgid ""
"partition=%(partition)s use_cow=%(use_cow)s"
msgstr ""
-#: nova/virt/disk/api.py:301
+#: nova/virt/disk/api.py:303
#, python-format
msgid ""
"Failed to mount container filesystem '%(image)s' on '%(target)s': "
"%(errors)s"
msgstr ""
-#: nova/virt/disk/api.py:318
+#: nova/virt/disk/api.py:320
#, python-format
msgid "Failed to unmount container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:340
+#: nova/virt/disk/api.py:342
#, python-format
msgid "Inject file fs=%(fs)s path=%(path)s append=%(append)s"
msgstr ""
-#: nova/virt/disk/api.py:349
+#: nova/virt/disk/api.py:351
#, python-format
msgid "Inject metadata fs=%(fs)s metadata=%(metadata)s"
msgstr ""
-#: nova/virt/disk/api.py:390
+#: nova/virt/disk/api.py:392
#, python-format
msgid "Inject key fs=%(fs)s key=%(key)s"
msgstr ""
-#: nova/virt/disk/api.py:418
+#: nova/virt/disk/api.py:420
#, python-format
msgid "Inject key fs=%(fs)s net=%(net)s"
msgstr ""
-#: nova/virt/disk/api.py:444
+#: nova/virt/disk/api.py:446
#, python-format
msgid "Inject admin password fs=%(fs)s admin_passwd=ha-ha-not-telling-you"
msgstr ""
-#: nova/virt/disk/api.py:489
+#: nova/virt/disk/api.py:491
msgid "Not implemented on Windows"
msgstr ""
-#: nova/virt/disk/api.py:518
+#: nova/virt/disk/api.py:520
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/disk/api.py:534
+#: nova/virt/disk/api.py:536
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr ""
@@ -5930,66 +6116,71 @@ msgstr ""
msgid "Mounting %(dev)s at %(dir)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:93
+#: nova/virt/disk/vfs/guestfs.py:92
#, python-format
msgid "Setting up appliance for %(imgfile)s %(imgfmt)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:108
+#: nova/virt/disk/vfs/guestfs.py:106
+#, python-format
+msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:113
msgid "Tearing down appliance"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:112
+#: nova/virt/disk/vfs/guestfs.py:117
#, python-format
msgid "Failed to close augeas %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:116
+#: nova/virt/disk/vfs/guestfs.py:121
#, python-format
msgid "Failed to shutdown appliance %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:120
+#: nova/virt/disk/vfs/guestfs.py:125
#, python-format
msgid "Failed to close guest handle %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:130 nova/virt/disk/vfs/localfs.py:102
+#: nova/virt/disk/vfs/guestfs.py:135 nova/virt/disk/vfs/localfs.py:102
#, python-format
msgid "Make directory path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:135 nova/virt/disk/vfs/localfs.py:107
+#: nova/virt/disk/vfs/guestfs.py:140 nova/virt/disk/vfs/localfs.py:107
#, python-format
msgid "Append file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:140 nova/virt/disk/vfs/localfs.py:116
+#: nova/virt/disk/vfs/guestfs.py:145 nova/virt/disk/vfs/localfs.py:116
#, python-format
msgid "Replace file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:145 nova/virt/disk/vfs/localfs.py:125
+#: nova/virt/disk/vfs/guestfs.py:150 nova/virt/disk/vfs/localfs.py:125
#, python-format
msgid "Read file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:150 nova/virt/disk/vfs/localfs.py:131
+#: nova/virt/disk/vfs/guestfs.py:155 nova/virt/disk/vfs/localfs.py:131
#, python-format
msgid "Has file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:159
+#: nova/virt/disk/vfs/guestfs.py:164
#, python-format
msgid "Set permissions path=%(path)s mode=%(mode)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:164
+#: nova/virt/disk/vfs/guestfs.py:169
#, python-format
msgid "Set ownership path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:177
+#: nova/virt/disk/vfs/guestfs.py:182
#, python-format
msgid "chown uid=%(uid)d gid=%(gid)s"
msgstr ""
@@ -6028,7 +6219,7 @@ msgstr ""
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:79 nova/virt/libvirt/driver.py:1424
+#: nova/virt/hyperv/basevolumeutils.py:79 nova/virt/libvirt/driver.py:1421
#: nova/virt/xenapi/vm_utils.py:504
#, python-format
msgid "block_device_list %s"
@@ -6077,7 +6268,7 @@ msgstr ""
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:163 nova/virt/libvirt/driver.py:3109
+#: nova/virt/hyperv/hostops.py:163 nova/virt/libvirt/driver.py:3105
#: nova/virt/xenapi/host.py:149
msgid "Updating host stats"
msgstr ""
@@ -6224,203 +6415,203 @@ msgstr ""
msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:104
+#: nova/virt/hyperv/vmops.py:103
#, python-format
msgid "hyperv vm state: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:110
+#: nova/virt/hyperv/vmops.py:109
#, python-format
msgid ""
"Got Info for vm %(instance_name)s: state=%(state)d, mem=%(memusage)s, "
"num_cpu=%(numprocs)s, uptime=%(uptime)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:146
+#: nova/virt/hyperv/vmops.py:144
#, python-format
msgid "cache image failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:170
+#: nova/virt/hyperv/vmops.py:168
#, python-format
msgid "Starting VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:172
+#: nova/virt/hyperv/vmops.py:170
#, python-format
msgid "Started VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:174
+#: nova/virt/hyperv/vmops.py:172
#, python-format
msgid "spawn vm failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:180
+#: nova/virt/hyperv/vmops.py:178
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:183 nova/virt/libvirt/driver.py:1362
+#: nova/virt/hyperv/vmops.py:181 nova/virt/libvirt/driver.py:1362
msgid "Using config drive"
msgstr ""
-#: nova/virt/hyperv/vmops.py:194 nova/virt/libvirt/driver.py:1372
+#: nova/virt/hyperv/vmops.py:192 nova/virt/libvirt/driver.py:1371
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:201 nova/virt/libvirt/driver.py:1377
+#: nova/virt/hyperv/vmops.py:199 nova/virt/libvirt/driver.py:1377
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:243
+#: nova/virt/hyperv/vmops.py:238
#, python-format
msgid "Failed to create VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:246
+#: nova/virt/hyperv/vmops.py:241
#, python-format
msgid "Created VM %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:263
+#: nova/virt/hyperv/vmops.py:258
#, python-format
msgid "Set memory for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:276
+#: nova/virt/hyperv/vmops.py:271
#, python-format
msgid "Set vcpus for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:280
+#: nova/virt/hyperv/vmops.py:275
#, python-format
msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
msgstr ""
-#: nova/virt/hyperv/vmops.py:289
+#: nova/virt/hyperv/vmops.py:284
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmops.py:297
+#: nova/virt/hyperv/vmops.py:292
#, python-format
msgid "Failed to add scsi controller to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:314
+#: nova/virt/hyperv/vmops.py:309
#, python-format
msgid "Creating disk for %(vm_name)s by attaching disk file %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:342
+#: nova/virt/hyperv/vmops.py:337
#, python-format
msgid "Failed to add drive to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:345
+#: nova/virt/hyperv/vmops.py:340
#, python-format
msgid "New %(drive_type)s drive path is %(drive_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:370
+#: nova/virt/hyperv/vmops.py:365
#, python-format
msgid "Failed to add %(drive_type)s image to VM %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:372
+#: nova/virt/hyperv/vmops.py:367
#, python-format
msgid "Created drive type %(drive_type)s for %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:377
+#: nova/virt/hyperv/vmops.py:372
#, python-format
msgid "Creating nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:382
+#: nova/virt/hyperv/vmops.py:377
msgid "Cannot find vSwitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:402
+#: nova/virt/hyperv/vmops.py:397
msgid "Failed creating a port on the external vswitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:403
+#: nova/virt/hyperv/vmops.py:398
#, python-format
msgid "Failed creating port for %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:406
+#: nova/virt/hyperv/vmops.py:401
#, python-format
msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:418
+#: nova/virt/hyperv/vmops.py:413
#, python-format
msgid "Failed to add nic to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:420
+#: nova/virt/hyperv/vmops.py:415
#, python-format
msgid "Created nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:427 nova/virt/hyperv/vmops.py:430
+#: nova/virt/hyperv/vmops.py:422 nova/virt/hyperv/vmops.py:425
#, python-format
msgid "Attempting to bind NIC to %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:435
+#: nova/virt/hyperv/vmops.py:430
msgid "No vSwitch specified, attaching to default"
msgstr ""
-#: nova/virt/hyperv/vmops.py:460
+#: nova/virt/hyperv/vmops.py:453
#, python-format
msgid "Got request to destroy vm %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:504
+#: nova/virt/hyperv/vmops.py:497
#, python-format
-msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
+msgid "Del: disk %(vhdfile)s vm %(name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:510
+#: nova/virt/hyperv/vmops.py:503
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:515
+#: nova/virt/hyperv/vmops.py:508
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:521
+#: nova/virt/hyperv/vmops.py:514
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:526
+#: nova/virt/hyperv/vmops.py:519
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:531
+#: nova/virt/hyperv/vmops.py:524
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:536
+#: nova/virt/hyperv/vmops.py:529
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:556
+#: nova/virt/hyperv/vmops.py:549
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:585
+#: nova/virt/hyperv/vmops.py:578
#, python-format
msgid "use_cow_image:%s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:605
+#: nova/virt/hyperv/vmops.py:598
#, python-format
msgid "Failed to create Difference Disk from %(base)s to %(target)s"
msgstr ""
@@ -6477,7 +6668,7 @@ msgstr ""
msgid "Unable to attach boot volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:130 nova/virt/xenapi/volumeops.py:114
+#: nova/virt/hyperv/volumeops.py:130 nova/virt/xenapi/volumeops.py:115
#, python-format
msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
@@ -6487,7 +6678,7 @@ msgstr ""
msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:158 nova/virt/xenapi/volumeops.py:180
+#: nova/virt/hyperv/volumeops.py:158 nova/virt/xenapi/volumeops.py:182
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
@@ -6754,113 +6945,113 @@ msgstr ""
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1242
+#: nova/virt/libvirt/driver.py:1241
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1392
+#: nova/virt/libvirt/driver.py:1389
#, python-format
msgid "Injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1402
+#: nova/virt/libvirt/driver.py:1399
#, python-format
msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1476
+#: nova/virt/libvirt/driver.py:1473
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1482
+#: nova/virt/libvirt/driver.py:1479
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1486
+#: nova/virt/libvirt/driver.py:1483
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1490
+#: nova/virt/libvirt/driver.py:1487
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1506
+#: nova/virt/libvirt/driver.py:1503
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1823
+#: nova/virt/libvirt/driver.py:1819
msgid "Starting toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1827
+#: nova/virt/libvirt/driver.py:1823
msgid "Finished toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1844
+#: nova/virt/libvirt/driver.py:1840
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1998
+#: nova/virt/libvirt/driver.py:1994
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2115
+#: nova/virt/libvirt/driver.py:2111
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2198
+#: nova/virt/libvirt/driver.py:2194
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2222
+#: nova/virt/libvirt/driver.py:2218
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2226
+#: nova/virt/libvirt/driver.py:2222
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2342
+#: nova/virt/libvirt/driver.py:2338
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2350
+#: nova/virt/libvirt/driver.py:2346
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2387
+#: nova/virt/libvirt/driver.py:2383
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2412
+#: nova/virt/libvirt/driver.py:2408
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2424
+#: nova/virt/libvirt/driver.py:2420
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -6870,51 +7061,51 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2441
+#: nova/virt/libvirt/driver.py:2437
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2489
+#: nova/virt/libvirt/driver.py:2485
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2561
+#: nova/virt/libvirt/driver.py:2557
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2651
+#: nova/virt/libvirt/driver.py:2647
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2766
+#: nova/virt/libvirt/driver.py:2762
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2815
+#: nova/virt/libvirt/driver.py:2811
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2864
+#: nova/virt/libvirt/driver.py:2860
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:2923
+#: nova/virt/libvirt/driver.py:2919
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2930
+#: nova/virt/libvirt/driver.py:2926
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:2981
+#: nova/virt/libvirt/driver.py:2977
msgid "Starting finish_revert_migration"
msgstr ""
@@ -7125,17 +7316,17 @@ msgstr ""
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/vif.py:111
+#: nova/virt/libvirt/vif.py:110
#, python-format
msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:121
+#: nova/virt/libvirt/vif.py:120
#, python-format
msgid "Ensuring bridge %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:198 nova/virt/libvirt/vif.py:269
+#: nova/virt/libvirt/vif.py:197 nova/virt/libvirt/vif.py:268
msgid "Failed while unplugging vif"
msgstr ""
@@ -7344,33 +7535,33 @@ msgstr ""
msgid "PowerVM instance cleanup failed"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:110
+#: nova/virt/vmwareapi/driver.py:107
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:261
+#: nova/virt/vmwareapi/driver.py:258
#, python-format
msgid "In vmwareapi:_create_session, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:344
+#: nova/virt/vmwareapi/driver.py:341
#, python-format
msgid "In vmwareapi:_call_method, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:379
+#: nova/virt/vmwareapi/driver.py:376
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: success"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:384
+#: nova/virt/vmwareapi/driver.py:381
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:388
+#: nova/virt/vmwareapi/driver.py:385
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr ""
@@ -7480,241 +7671,241 @@ msgstr ""
msgid "Exception in %s "
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:65
+#: nova/virt/vmwareapi/vmops.py:60
msgid "Getting list of instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:81
+#: nova/virt/vmwareapi/vmops.py:76
#, python-format
msgid "Got total of %s instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:125
+#: nova/virt/vmwareapi/vmops.py:120
msgid "Couldn't get a local Datastore reference"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:195
+#: nova/virt/vmwareapi/vmops.py:190
msgid "Creating VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:203
+#: nova/virt/vmwareapi/vmops.py:198
msgid "Created VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:231
+#: nova/virt/vmwareapi/vmops.py:226
#, python-format
msgid ""
"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter "
"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:249
+#: nova/virt/vmwareapi/vmops.py:244
#, python-format
msgid ""
"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host "
"local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:259
+#: nova/virt/vmwareapi/vmops.py:254
#, python-format
msgid ""
"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:272
+#: nova/virt/vmwareapi/vmops.py:267
#, python-format
msgid ""
"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:284
+#: nova/virt/vmwareapi/vmops.py:279
#, python-format
msgid ""
"Downloading image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:300
+#: nova/virt/vmwareapi/vmops.py:295
#, python-format
msgid ""
"Downloaded image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:318
+#: nova/virt/vmwareapi/vmops.py:313
msgid "Reconfiguring VM instance to attach the image disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:325
+#: nova/virt/vmwareapi/vmops.py:320
msgid "Reconfigured VM instance to attach the image disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:332
+#: nova/virt/vmwareapi/vmops.py:327
msgid "Powering on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:338
+#: nova/virt/vmwareapi/vmops.py:333
msgid "Powered on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:384
+#: nova/virt/vmwareapi/vmops.py:379
msgid "Creating Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:394
+#: nova/virt/vmwareapi/vmops.py:389
msgid "Created Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:437
+#: nova/virt/vmwareapi/vmops.py:432
msgid "Copying disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:450
+#: nova/virt/vmwareapi/vmops.py:445
msgid "Copied disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:459
+#: nova/virt/vmwareapi/vmops.py:454
#, python-format
msgid "Uploading image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:473
+#: nova/virt/vmwareapi/vmops.py:468
#, python-format
msgid "Uploaded image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:484
+#: nova/virt/vmwareapi/vmops.py:479
#, python-format
msgid "Deleting temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:493
+#: nova/virt/vmwareapi/vmops.py:488
#, python-format
msgid "Deleted temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:525
+#: nova/virt/vmwareapi/vmops.py:520
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:532
+#: nova/virt/vmwareapi/vmops.py:527
msgid "Rebooting guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:535
+#: nova/virt/vmwareapi/vmops.py:530
msgid "Rebooted guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:537
+#: nova/virt/vmwareapi/vmops.py:532
msgid "Doing hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:541
+#: nova/virt/vmwareapi/vmops.py:536
msgid "Did hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:553
+#: nova/virt/vmwareapi/vmops.py:548
msgid "instance not present"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:572
+#: nova/virt/vmwareapi/vmops.py:567
msgid "Powering off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:577
+#: nova/virt/vmwareapi/vmops.py:572
msgid "Powered off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:581
+#: nova/virt/vmwareapi/vmops.py:576
msgid "Unregistering the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:584
+#: nova/virt/vmwareapi/vmops.py:579
msgid "Unregistered the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:586
+#: nova/virt/vmwareapi/vmops.py:581
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:598
+#: nova/virt/vmwareapi/vmops.py:593
#, python-format
msgid "Deleting contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:608
+#: nova/virt/vmwareapi/vmops.py:603
#, python-format
msgid "Deleted contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:613
+#: nova/virt/vmwareapi/vmops.py:608
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
"contents from the disk: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:622
+#: nova/virt/vmwareapi/vmops.py:617
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:626
+#: nova/virt/vmwareapi/vmops.py:621
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:640
+#: nova/virt/vmwareapi/vmops.py:635
msgid "Suspending the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:644
+#: nova/virt/vmwareapi/vmops.py:639
msgid "Suspended the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:647
+#: nova/virt/vmwareapi/vmops.py:642
msgid "instance is powered off and can not be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:650
+#: nova/virt/vmwareapi/vmops.py:645
msgid "VM was already in suspended state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:663
+#: nova/virt/vmwareapi/vmops.py:658
msgid "Resuming the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:668
+#: nova/virt/vmwareapi/vmops.py:663
msgid "Resumed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:670
+#: nova/virt/vmwareapi/vmops.py:665
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:706
+#: nova/virt/vmwareapi/vmops.py:701
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:764
+#: nova/virt/vmwareapi/vmops.py:759
#, python-format
msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:772
+#: nova/virt/vmwareapi/vmops.py:767
#, python-format
msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:809
+#: nova/virt/vmwareapi/vmops.py:804
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:813
+#: nova/virt/vmwareapi/vmops.py:808
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -7749,19 +7940,19 @@ msgstr ""
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1455
+#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1476
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1459
+#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1480
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:93 nova/virt/xenapi/vmops.py:1464
+#: nova/virt/xenapi/agent.py:93 nova/virt/xenapi/vmops.py:1485
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
@@ -7861,30 +8052,30 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:354
+#: nova/virt/xenapi/driver.py:344
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:566
+#: nova/virt/xenapi/driver.py:556
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:626
+#: nova/virt/xenapi/driver.py:616
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:666
+#: nova/virt/xenapi/driver.py:656
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:750 nova/virt/xenapi/driver.py:764
+#: nova/virt/xenapi/driver.py:740 nova/virt/xenapi/driver.py:754
#, python-format
msgid "Got exception: %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:680 nova/virt/xenapi/fake.py:782
-#: nova/virt/xenapi/fake.py:801 nova/virt/xenapi/fake.py:869
+#: nova/virt/xenapi/fake.py:680 nova/virt/xenapi/fake.py:784
+#: nova/virt/xenapi/fake.py:803 nova/virt/xenapi/fake.py:871
msgid "Raising NotImplemented"
msgstr ""
@@ -7908,7 +8099,7 @@ msgstr ""
msgid "Calling setter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:784
+#: nova/virt/xenapi/fake.py:786
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
@@ -8008,12 +8199,12 @@ msgstr ""
msgid "Pool-set_name_label failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/vif.py:103
+#: nova/virt/xenapi/vif.py:102
#, python-format
msgid "Found no PIF for device %s"
msgstr ""
-#: nova/virt/xenapi/vif.py:122
+#: nova/virt/xenapi/vif.py:121
#, python-format
msgid ""
"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. "
@@ -8443,201 +8634,201 @@ msgstr ""
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:124 nova/virt/xenapi/vmops.py:671
+#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:692
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:165
+#: nova/virt/xenapi/vmops.py:168
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:233
+#: nova/virt/xenapi/vmops.py:254
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:302
+#: nova/virt/xenapi/vmops.py:323
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:374
+#: nova/virt/xenapi/vmops.py:395
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:405
+#: nova/virt/xenapi/vmops.py:426
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:478
+#: nova/virt/xenapi/vmops.py:499
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:495
+#: nova/virt/xenapi/vmops.py:516
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:521
+#: nova/virt/xenapi/vmops.py:542
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:527
+#: nova/virt/xenapi/vmops.py:548
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:541
+#: nova/virt/xenapi/vmops.py:562
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:544
+#: nova/virt/xenapi/vmops.py:565
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:555
+#: nova/virt/xenapi/vmops.py:576
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:582
+#: nova/virt/xenapi/vmops.py:603
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:590
+#: nova/virt/xenapi/vmops.py:611
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:640
+#: nova/virt/xenapi/vmops.py:661
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:644
+#: nova/virt/xenapi/vmops.py:665
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:652
+#: nova/virt/xenapi/vmops.py:673
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:689
+#: nova/virt/xenapi/vmops.py:710
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:695 nova/virt/xenapi/vmops.py:745
+#: nova/virt/xenapi/vmops.py:716 nova/virt/xenapi/vmops.py:766
msgid "Clean shutdown did not complete successfully, trying hard shutdown."
msgstr ""
-#: nova/virt/xenapi/vmops.py:774
+#: nova/virt/xenapi/vmops.py:795
msgid "Resize down not allowed without auto_disk_config"
msgstr ""
-#: nova/virt/xenapi/vmops.py:819
+#: nova/virt/xenapi/vmops.py:840
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:824
+#: nova/virt/xenapi/vmops.py:845
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:868
+#: nova/virt/xenapi/vmops.py:889
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:959
+#: nova/virt/xenapi/vmops.py:980
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:985
+#: nova/virt/xenapi/vmops.py:1006
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1012
+#: nova/virt/xenapi/vmops.py:1033
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1019
+#: nova/virt/xenapi/vmops.py:1040
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1026
+#: nova/virt/xenapi/vmops.py:1047
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1052
+#: nova/virt/xenapi/vmops.py:1073
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1078
+#: nova/virt/xenapi/vmops.py:1099
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1129
+#: nova/virt/xenapi/vmops.py:1150
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1163
+#: nova/virt/xenapi/vmops.py:1184
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1212
+#: nova/virt/xenapi/vmops.py:1233
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1216
+#: nova/virt/xenapi/vmops.py:1237
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1276
+#: nova/virt/xenapi/vmops.py:1297
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1359
+#: nova/virt/xenapi/vmops.py:1380
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1378
+#: nova/virt/xenapi/vmops.py:1399
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1387
+#: nova/virt/xenapi/vmops.py:1408
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1390
+#: nova/virt/xenapi/vmops.py:1411
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1418
+#: nova/virt/xenapi/vmops.py:1439
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1514
+#: nova/virt/xenapi/vmops.py:1535
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1546
+#: nova/virt/xenapi/vmops.py:1567
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1594
+#: nova/virt/xenapi/vmops.py:1615
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1630
+#: nova/virt/xenapi/vmops.py:1651
msgid "Migrate Send failed"
msgstr ""
@@ -8669,7 +8860,7 @@ msgid "introducing sr within volume_utils"
msgstr ""
#: nova/virt/xenapi/volume_utils.py:93 nova/virt/xenapi/volume_utils.py:160
-#: nova/virt/xenapi/volumeops.py:140
+#: nova/virt/xenapi/volumeops.py:141
#, python-format
msgid "Introduced %(label)s as %(sr_ref)s."
msgstr ""
@@ -8682,7 +8873,7 @@ msgstr ""
msgid "Plugging SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:106 nova/virt/xenapi/volumeops.py:144
+#: nova/virt/xenapi/volume_utils.py:106 nova/virt/xenapi/volumeops.py:145
msgid "Unable to introduce Storage Repository"
msgstr ""
@@ -8803,47 +8994,47 @@ msgstr ""
msgid "Could not forget SR"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:127
+#: nova/virt/xenapi/volumeops.py:128
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:162
+#: nova/virt/xenapi/volumeops.py:163
#, python-format
msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:172
+#: nova/virt/xenapi/volumeops.py:173
#, python-format
msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:189
+#: nova/virt/xenapi/volumeops.py:191
#, python-format
msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:197
+#: nova/virt/xenapi/volumeops.py:199
#, python-format
msgid "Unable to locate volume %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:206
+#: nova/virt/xenapi/volumeops.py:208
#, python-format
msgid "Unable to detach volume %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:211
+#: nova/virt/xenapi/volumeops.py:213
#, python-format
msgid "Unable to destroy vbd %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:218
+#: nova/virt/xenapi/volumeops.py:220
#, python-format
msgid "Error purging SR %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:220
+#: nova/virt/xenapi/volumeops.py:222
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
diff --git a/nova/manager.py b/nova/manager.py
index e7130fb4a..636424d1c 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -54,8 +54,10 @@ This module provides Manager, a base class for managers.
"""
import eventlet
+import time
from nova.db import base
+from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
@@ -63,25 +65,50 @@ from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import version
+
+periodic_opts = [
+ cfg.BoolOpt('run_external_periodic_tasks',
+ default=True,
+ help=('Some periodic tasks can be run in a separate process. '
+ 'Should we run them here?')),
+ ]
+
CONF = cfg.CONF
+CONF.register_opts(periodic_opts)
CONF.import_opt('host', 'nova.config')
LOG = logging.getLogger(__name__)
+DEFAULT_INTERVAL = 60.0
+
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
- 1. Without arguments '@periodic_task', this will be run on every tick
+ 1. Without arguments '@periodic_task', this will be run on every cycle
of the periodic scheduler.
- 2. With arguments, @periodic_task(ticks_between_runs=N), this will be
- run on every N ticks of the periodic scheduler.
+ 2. With arguments, @periodic_task(periodic_spacing=N), this will be
+ run on approximately every N seconds. If this number is negative the
+ periodic task will be disabled.
"""
def decorator(f):
+ # Test for old style invocation
+ if 'ticks_between_runs' in kwargs:
+ raise exception.InvalidPeriodicTaskArg(arg='ticks_between_runs')
+
+ # Control if run at all
f._periodic_task = True
- f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0)
+ f._periodic_external_ok = kwargs.pop('external_process_ok', False)
+ if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
+ f._periodic_enabled = False
+ else:
+ f._periodic_enabled = kwargs.pop('enabled', True)
+
+ # Control frequency
+ f._periodic_spacing = kwargs.pop('spacing', 0)
+ f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
@@ -117,17 +144,39 @@ class ManagerMeta(type):
cls._periodic_tasks = []
try:
- cls._ticks_to_skip = cls._ticks_to_skip.copy()
+ cls._periodic_last_run = cls._periodic_last_run.copy()
+ except AttributeError:
+ cls._periodic_last_run = {}
+
+ try:
+ cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
- cls._ticks_to_skip = {}
+ cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
- if task._ticks_between_runs >= 0:
- cls._periodic_tasks.append((name, task))
- cls._ticks_to_skip[name] = task._ticks_between_runs
+
+ if task._periodic_spacing < 0:
+ LOG.info(_('Skipping periodic task %(task)s because '
+ 'its interval is negative'),
+ {'task': name})
+ continue
+ if not task._periodic_enabled:
+ LOG.info(_('Skipping periodic task %(task)s because '
+ 'it is disabled'),
+ {'task': name})
+ continue
+
+ # A periodic spacing of zero indicates that this task should
+ # be run every pass
+ if task._periodic_spacing == 0:
+ task._periodic_spacing = None
+
+ cls._periodic_tasks.append((name, task))
+ cls._periodic_spacing[name] = task._periodic_spacing
+ cls._periodic_last_run[name] = task._periodic_last_run
class Manager(base.Base):
@@ -158,30 +207,39 @@ class Manager(base.Base):
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
+ idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
- ticks_to_skip = self._ticks_to_skip[task_name]
- if ticks_to_skip > 0:
- LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s"
- " ticks left until next run"), locals())
- self._ticks_to_skip[task_name] -= 1
- continue
+ # If a periodic task is _nearly_ due, then we'll run it early
+ if self._periodic_spacing[task_name] is None:
+ wait = 0
+ else:
+ wait = time.time() - (self._periodic_last_run[task_name] +
+ self._periodic_spacing[task_name])
+ if wait > 0.2:
+ if wait < idle_for:
+ idle_for = wait
+ continue
- self._ticks_to_skip[task_name] = task._ticks_between_runs
LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
+ self._periodic_last_run[task_name] = time.time()
try:
task(self, context)
- # NOTE(tiantian): After finished a task, allow manager to
- # do other work (report_state, processing AMPQ request etc.)
- eventlet.sleep(0)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
locals())
+ if (not self._periodic_spacing[task_name] is None and
+ self._periodic_spacing[task_name] < idle_for):
+ idle_for = self._periodic_spacing[task_name]
+ eventlet.sleep(0)
+
+ return idle_for
+
def init_host(self):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index a1c03bc51..e803488d2 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -95,7 +95,7 @@ linux_net_opts = [
CONF = cfg.CONF
CONF.register_opts(linux_net_opts)
CONF.import_opt('bindir', 'nova.config')
-CONF.import_opt('fake_network', 'nova.config')
+CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('host', 'nova.config')
CONF.import_opt('use_ipv6', 'nova.config')
CONF.import_opt('my_ip', 'nova.config')
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 3c892cd56..e263ac730 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -150,6 +150,9 @@ network_opts = [
cfg.StrOpt('network_host',
default=socket.getfqdn(),
help='Network host to use for ip allocation in flat modes'),
+ cfg.BoolOpt('fake_network',
+ default=False,
+ help='If passed, use fake network devices and addresses'),
cfg.BoolOpt('fake_call',
default=False,
help='If True, skip using the queue and make local calls'),
@@ -172,8 +175,8 @@ network_opts = [
'entries in multi host mode'),
cfg.IntOpt("dns_update_periodic_interval",
default=-1,
- help='Number of periodic scheduler ticks to wait between '
- 'runs of updates to DNS entries.'),
+ help='Number of seconds to wait between runs of updates to DNS '
+ 'entries.'),
cfg.StrOpt('dhcp_domain',
default='novalocal',
help='domain to use for building the hostnames'),
@@ -193,7 +196,6 @@ network_opts = [
CONF = cfg.CONF
CONF.register_opts(network_opts)
-CONF.import_opt('fake_network', 'nova.config')
CONF.import_opt('use_ipv6', 'nova.config')
CONF.import_opt('my_ip', 'nova.config')
@@ -1973,7 +1975,7 @@ class NetworkManager(manager.SchedulerDependentManager):
mac_address)
@manager.periodic_task(
- ticks_between_runs=CONF.dns_update_periodic_interval)
+ spacing=CONF.dns_update_periodic_interval)
def _periodic_update_dns(self, context):
"""Update local DNS entries of all networks on this host"""
networks = self.db.network_get_all_by_host(context, self.host)
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index a223e8fde..cfdac03bd 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -57,19 +57,19 @@ rpc_opts = [
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
- #
- # The following options are not registered here, but are expected to be
- # present. The project using this library must register these options with
- # the configuration so that project-specific defaults may be defined.
- #
- #cfg.StrOpt('control_exchange',
- # default='nova',
- # help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
+ cfg.StrOpt('control_exchange',
+ default='openstack',
+ help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
cfg.CONF.register_opts(rpc_opts)
+def set_defaults(control_exchange):
+ cfg.set_defaults(rpc_opts,
+ control_exchange=control_exchange)
+
+
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py
index a5a79cc30..6464914db 100644
--- a/nova/openstack/common/rpc/amqp.py
+++ b/nova/openstack/common/rpc/amqp.py
@@ -428,7 +428,4 @@ def cleanup(connection_pool):
def get_control_exchange(conf):
- try:
- return conf.control_exchange
- except cfg.NoSuchOptError:
- return 'openstack'
+ return conf.control_exchange
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index ea9a39b6f..07a3f578a 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -165,7 +165,7 @@ class FilterScheduler(driver.Scheduler):
if not retry:
return
hosts = retry['hosts']
- hosts.append((host, node))
+ hosts.append([host, node])
def _add_oversubscription_policy(self, filter_properties, host_state):
filter_properties['limits'] = host_state.limits
diff --git a/nova/scheduler/filters/retry_filter.py b/nova/scheduler/filters/retry_filter.py
index 108e4d206..91d2cb2a2 100644
--- a/nova/scheduler/filters/retry_filter.py
+++ b/nova/scheduler/filters/retry_filter.py
@@ -33,10 +33,13 @@ class RetryFilter(filters.BaseHostFilter):
return True
hosts = retry.get('hosts', [])
- host = (host_state.host, host_state.nodename)
+ host = [host_state.host, host_state.nodename]
- LOG.debug(_("Previously tried hosts: %(hosts)s. (host=%(host)s)") %
- locals())
+ passes = host not in hosts
+ pass_msg = "passes" if passes else "fails"
+
+ LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: "
+ "%(hosts)s") % locals())
# Host passes if it's not in the list of previously attempted hosts:
- return host not in hosts
+ return passes
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 0c64d7aa2..d5b8aeb52 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -140,7 +140,8 @@ class HostState(object):
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
- if self.updated and self.updated > compute['updated_at']:
+ if (self.updated and compute['updated_at']
+ and self.updated > compute['updated_at']):
return
all_ram_mb = compute['memory_mb']
diff --git a/nova/service.py b/nova/service.py
index 4c93fefa8..fc0ac4a1b 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -49,9 +49,9 @@ service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
- cfg.IntOpt('periodic_interval',
- default=60,
- help='seconds between running periodic tasks'),
+ cfg.BoolOpt('periodic_enable',
+ default=True,
+ help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
@@ -371,7 +371,8 @@ class Service(object):
it state to the database services table."""
def __init__(self, host, binary, topic, manager, report_interval=None,
- periodic_interval=None, periodic_fuzzy_delay=None,
+ periodic_enable=None, periodic_fuzzy_delay=None,
+ periodic_interval_max=None,
*args, **kwargs):
self.host = host
self.binary = binary
@@ -380,8 +381,9 @@ class Service(object):
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
- self.periodic_interval = periodic_interval
+ self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
+ self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.backdoor_port = None
@@ -433,15 +435,15 @@ class Service(object):
if pulse:
self.timers.append(pulse)
- if self.periodic_interval:
+ if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
- periodic = utils.LoopingCall(self.periodic_tasks)
- periodic.start(interval=self.periodic_interval,
- initial_delay=initial_delay)
+ periodic = utils.DynamicLoopingCall(self.periodic_tasks)
+ periodic.start(initial_delay=initial_delay,
+ periodic_interval_max=self.periodic_interval_max)
self.timers.append(periodic)
def _create_service_ref(self, context):
@@ -460,8 +462,8 @@ class Service(object):
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
- report_interval=None, periodic_interval=None,
- periodic_fuzzy_delay=None):
+ report_interval=None, periodic_enable=None,
+ periodic_fuzzy_delay=None, periodic_interval_max=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
@@ -469,8 +471,9 @@ class Service(object):
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
- :param periodic_interval: defaults to CONF.periodic_interval
+ :param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
+ :param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
@@ -486,14 +489,15 @@ class Service(object):
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
- if periodic_interval is None:
- periodic_interval = CONF.periodic_interval
+ if periodic_enable is None:
+ periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
- periodic_interval=periodic_interval,
- periodic_fuzzy_delay=periodic_fuzzy_delay)
+ periodic_enable=periodic_enable,
+ periodic_fuzzy_delay=periodic_fuzzy_delay,
+ periodic_interval_max=periodic_interval_max)
return service_obj
@@ -529,7 +533,7 @@ class Service(object):
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
- self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
+ return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
class WSGIService(object):
diff --git a/nova/servicegroup/db_driver.py b/nova/servicegroup/db_driver.py
index a4481406c..a52ed258c 100644
--- a/nova/servicegroup/db_driver.py
+++ b/nova/servicegroup/db_driver.py
@@ -41,7 +41,7 @@ class DbDriver(api.ServiceGroupDriver):
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
- pulse = utils.LoopingCall(self._report_state, service)
+ pulse = utils.FixedIntervalLoopingCall(self._report_state, service)
pulse.start(interval=report_interval,
initial_delay=report_interval)
return pulse
diff --git a/nova/test.py b/nova/test.py
index 3ba38eef4..fd9c4a522 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -191,6 +191,8 @@ class TestCase(testtools.TestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
+ # Give each test a maximum of one minute to run.
+ self.useFixture(fixtures.Timeout(60, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 3c119a2c2..61402ce0e 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -672,12 +672,12 @@ class CinderCloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
- def _restart_compute_service(self, periodic_interval=None):
+ def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
- if periodic_interval:
+ if periodic_interval_max:
self.compute = self.start_service(
- 'compute', periodic_interval=periodic_interval)
+ 'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
@@ -716,7 +716,7 @@ class CinderCloudTestCase(test.TestCase):
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
@@ -799,7 +799,7 @@ class CinderCloudTestCase(test.TestCase):
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1,
@@ -936,7 +936,7 @@ class CinderCloudTestCase(test.TestCase):
def test_create_image(self):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index d73502f43..284298585 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1619,19 +1619,19 @@ class CloudTestCase(test.TestCase):
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
- def _restart_compute_service(self, periodic_interval=None):
+ def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
- if periodic_interval:
+ if periodic_interval_max:
self.compute = self.start_service(
- 'compute', periodic_interval=periodic_interval)
+ 'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
@@ -1834,7 +1834,7 @@ class CloudTestCase(test.TestCase):
def _do_test_create_image(self, no_reboot):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
@@ -1942,7 +1942,7 @@ class CloudTestCase(test.TestCase):
instance
"""
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py
index 16790860c..28b109215 100644
--- a/nova/tests/api/openstack/compute/test_versions.py
+++ b/nova/tests/api/openstack/compute/test_versions.py
@@ -37,17 +37,17 @@ NS = {
}
-LINKS = {
+EXP_LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
+ 'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
+ 'api/openstack-compute/2/wadl/os-compute-2.wadl',
},
}
-VERSIONS = {
+EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
@@ -56,12 +56,12 @@ VERSIONS = {
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -79,9 +79,6 @@ VERSIONS = {
class VersionsTest(test.TestCase):
- def setUp(self):
- super(VersionsTest, self).setUp()
- self.stubs.Set(versions, 'VERSIONS', VERSIONS)
def test_get_version_list(self):
req = webob.Request.blank('/')
@@ -132,12 +129,12 @@ class VersionsTest(test.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -176,12 +173,12 @@ class VersionsTest(test.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -210,7 +207,7 @@ class VersionsTest(test.TestCase):
version = etree.XML(res.body)
xmlutil.validate_schema(version, 'version')
- expected = VERSIONS['v2.0']
+ expected = EXP_VERSIONS['v2.0']
self.assertTrue(version.xpath('/ns:version', namespaces=NS))
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
@@ -240,7 +237,7 @@ class VersionsTest(test.TestCase):
for i, v in enumerate(['v2.0']):
version = versions[i]
- expected = VERSIONS[v]
+ expected = EXP_VERSIONS[v]
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
(link,) = version.xpath('atom:link', namespaces=NS)
@@ -278,11 +275,11 @@ class VersionsTest(test.TestCase):
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
- 'href': LINKS['v2.0']['pdf'],
+ 'href': EXP_LINKS['v2.0']['pdf'],
'type': 'application/pdf',
'rel': 'describedby'})
self.assertEqual(entry.links[2], {
- 'href': LINKS['v2.0']['wadl'],
+ 'href': EXP_LINKS['v2.0']['wadl'],
'type': 'application/vnd.sun.wadl+xml',
'rel': 'describedby'})
@@ -368,8 +365,11 @@ class VersionsTest(test.TestCase):
self.assertEqual(version.get('status'), 'CURRENT')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
- self.assertTrue(common.compare_media_types(media_types,
- VERSIONS['v2.0']['media-types']))
+ self.assertTrue(common.
+ compare_media_types(media_types,
+ EXP_VERSIONS['v2.0']['media-types']
+ ))
+
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
@@ -512,7 +512,7 @@ class VersionsSerializerTests(test.TestCase):
"id": "2.7",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
- "media-types": VERSIONS['v2.0']['media-types'],
+ "media-types": EXP_VERSIONS['v2.0']['media-types'],
"links": [
{
"rel": "self",
@@ -601,12 +601,12 @@ class VersionsSerializerTests(test.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -651,9 +651,9 @@ class VersionsSerializerTests(test.TestCase):
self.assertEqual(entry.links[1], {
'rel': 'describedby',
'type': 'application/pdf',
- 'href': LINKS['v2.0']['pdf']})
+ 'href': EXP_LINKS['v2.0']['pdf']})
self.assertEqual(entry.links[2], {
'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
- 'href': LINKS['v2.0']['wadl'],
+ 'href': EXP_LINKS['v2.0']['wadl'],
})
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
new file mode 100644
index 000000000..dd679a563
--- /dev/null
+++ b/nova/tests/baremetal/test_pxe.py
@@ -0,0 +1,534 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for baremetal pxe driver.
+"""
+
+import os
+
+import mox
+from testtools.matchers import Contains
+from testtools.matchers import MatchesAll
+from testtools.matchers import Not
+from testtools.matchers import StartsWith
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova import test
+from nova.tests.baremetal.db import base as bm_db_base
+from nova.tests.baremetal.db import utils as bm_db_utils
+from nova.tests.image import fake as fake_image
+from nova.tests import utils
+from nova.virt.baremetal import db
+from nova.virt.baremetal import pxe
+from nova.virt.baremetal import utils as bm_utils
+from nova.virt.disk import api as disk_api
+
+CONF = cfg.CONF
+
+COMMON_FLAGS = dict(
+ firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
+ host='test_host',
+)
+
+BAREMETAL_FLAGS = dict(
+ driver='nova.virt.baremetal.pxe.PXE',
+ instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
+ power_manager='nova.virt.baremetal.fake.FakePowerManager',
+ vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
+ volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
+ group='baremetal',
+)
+
+
+class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
+
+ def setUp(self):
+ super(BareMetalPXETestCase, self).setUp()
+ self.flags(**COMMON_FLAGS)
+ self.flags(**BAREMETAL_FLAGS)
+ self.driver = pxe.PXE()
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.addCleanup(fake_image.FakeImageService_reset)
+ self.context = utils.get_test_admin_context()
+ self.test_block_device_info = None,
+ self.instance = utils.get_test_instance()
+ self.test_network_info = utils.get_test_network_info(),
+ self.node_info = bm_db_utils.new_bm_node(
+ id=123,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ prov_mac_address='11:11:11:11:11:11',
+ )
+ self.nic_info = [
+ {'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+
+ def _create_node(self):
+ self.node = db.bm_node_create(self.context, self.node_info)
+ for nic in self.nic_info:
+ db.bm_interface_create(
+ self.context,
+ self.node['id'],
+ nic['address'],
+ nic['datapath_id'],
+ nic['port_no'],
+ )
+ self.instance['node'] = self.node['id']
+ self.spawn_params = dict(
+ admin_password='test_pass',
+ block_device_info=self.test_block_device_info,
+ context=self.context,
+ image_meta=utils.get_test_image_info(None,
+ self.instance),
+ injected_files=[('/fake/path', 'hello world')],
+ instance=self.instance,
+ network_info=self.test_network_info,
+ )
+
+
+class PXEClassMethodsTestCase(BareMetalPXETestCase):
+
+ def test_build_pxe_config(self):
+ args = {
+ 'deployment_id': 'aaa',
+ 'deployment_key': 'bbb',
+ 'deployment_iscsi_iqn': 'ccc',
+ 'deployment_aki_path': 'ddd',
+ 'deployment_ari_path': 'eee',
+ 'aki_path': 'fff',
+ 'ari_path': 'ggg',
+ }
+ config = pxe.build_pxe_config(**args)
+ self.assertThat(config, StartsWith('default deploy'))
+
+ # deploy bits are in the deploy section
+ start = config.index('label deploy')
+ end = config.index('label boot')
+ self.assertThat(config[start:end], MatchesAll(
+ Contains('kernel ddd'),
+ Contains('initrd=eee'),
+ Contains('deployment_id=aaa'),
+ Contains('deployment_key=bbb'),
+ Contains('iscsi_target_iqn=ccc'),
+ Not(Contains('kernel fff')),
+ ))
+
+ # boot bits are in the boot section
+ start = config.index('label boot')
+ self.assertThat(config[start:], MatchesAll(
+ Contains('kernel fff'),
+ Contains('initrd=ggg'),
+ Not(Contains('kernel ddd')),
+ ))
+
+ def test_build_network_config(self):
+ net = utils.get_test_network_info(1)
+ config = pxe.build_network_config(net)
+ self.assertIn('eth0', config)
+ self.assertNotIn('eth1', config)
+ self.assertIn('hwaddress ether fake', config)
+ self.assertNotIn('hwaddress ether aa:bb:cc:dd', config)
+
+ net[0][1]['mac'] = 'aa:bb:cc:dd'
+ config = pxe.build_network_config(net)
+ self.assertIn('hwaddress ether aa:bb:cc:dd', config)
+
+ net = utils.get_test_network_info(2)
+ config = pxe.build_network_config(net)
+ self.assertIn('eth0', config)
+ self.assertIn('eth1', config)
+
+ def test_build_network_config_dhcp(self):
+ self.flags(
+ net_config_template='$pybasedir/nova/virt/baremetal/'
+ 'net-dhcp.ubuntu.template',
+ group='baremetal',
+ )
+ net = utils.get_test_network_info()
+ net[0][1]['ips'][0]['ip'] = '1.2.3.4'
+ config = pxe.build_network_config(net)
+ self.assertIn('iface eth0 inet dhcp', config)
+ self.assertNotIn('address 1.2.3.4', config)
+
+ def test_build_network_config_static(self):
+ self.flags(
+ net_config_template='$pybasedir/nova/virt/baremetal/'
+ 'net-static.ubuntu.template',
+ group='baremetal',
+ )
+ net = utils.get_test_network_info()
+ net[0][1]['ips'][0]['ip'] = '1.2.3.4'
+ config = pxe.build_network_config(net)
+ self.assertIn('iface eth0 inet static', config)
+ self.assertIn('address 1.2.3.4', config)
+
+ def test_image_dir_path(self):
+ self.assertEqual(
+ pxe.get_image_dir_path(self.instance),
+ os.path.join(CONF.instances_path, 'instance-00000001'))
+
+ def test_image_file_path(self):
+ self.assertEqual(
+ pxe.get_image_file_path(self.instance),
+ os.path.join(
+ CONF.instances_path, 'instance-00000001', 'disk'))
+
+ def test_pxe_config_file_path(self):
+ self.instance['uuid'] = 'aaaa-bbbb-cccc'
+ self.assertEqual(
+ pxe.get_pxe_config_file_path(self.instance),
+ os.path.join(CONF.baremetal.tftp_root,
+ 'aaaa-bbbb-cccc', 'config'))
+
+ def test_pxe_mac_path(self):
+ self.assertEqual(
+ pxe.get_pxe_mac_path('23:45:67:89:AB'),
+ os.path.join(CONF.baremetal.tftp_root,
+ 'pxelinux.cfg', '01-23-45-67-89-ab'))
+
+ def test_get_instance_deploy_ids(self):
+ self.instance['extra_specs'] = {
+ 'deploy_kernel_id': 'aaaa',
+ 'deploy_ramdisk_id': 'bbbb',
+ }
+ self.flags(deploy_kernel="fail", group='baremetal')
+ self.flags(deploy_ramdisk="fail", group='baremetal')
+
+ self.assertEqual(
+ pxe.get_deploy_aki_id(self.instance), 'aaaa')
+ self.assertEqual(
+ pxe.get_deploy_ari_id(self.instance), 'bbbb')
+
+ def test_get_default_deploy_ids(self):
+ self.instance['extra_specs'] = {}
+ self.flags(deploy_kernel="aaaa", group='baremetal')
+ self.flags(deploy_ramdisk="bbbb", group='baremetal')
+
+ self.assertEqual(
+ pxe.get_deploy_aki_id(self.instance), 'aaaa')
+ self.assertEqual(
+ pxe.get_deploy_ari_id(self.instance), 'bbbb')
+
+ def test_get_partition_sizes(self):
+ # m1.tiny: 10GB root, 0GB swap
+ self.instance['instance_type_id'] = 1
+ sizes = pxe.get_partition_sizes(self.instance)
+ self.assertEqual(sizes[0], 10240)
+ self.assertEqual(sizes[1], 1)
+
+ # kinda.big: 40GB root, 1GB swap
+ ref = utils.get_test_instance_type()
+ self.instance['instance_type_id'] = ref['id']
+ self.instance['root_gb'] = ref['root_gb']
+ sizes = pxe.get_partition_sizes(self.instance)
+ self.assertEqual(sizes[0], 40960)
+ self.assertEqual(sizes[1], 1024)
+
+ def test_get_tftp_image_info(self):
+ # Raises an exception when options are neither specified
+ # on the instance nor in configuration file
+ CONF.baremetal.deploy_kernel = None
+ CONF.baremetal.deploy_ramdisk = None
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
+ # Even if the instance includes kernel_id and ramdisk_id,
+ # we still need deploy_kernel_id and deploy_ramdisk_id.
+ # If those aren't present in instance[], and not specified in
+ # config file, then we raise an exception.
+ self.instance['kernel_id'] = 'aaaa'
+ self.instance['ramdisk_id'] = 'bbbb'
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
+ # If an instance doesn't specify deploy_kernel_id or deploy_ramdisk_id,
+ # but defaults are set in the config file, we should use those.
+
+ # Here, we confirm both that all four values were set
+ # and that the proper paths are getting set for all of them
+ CONF.baremetal.deploy_kernel = 'cccc'
+ CONF.baremetal.deploy_ramdisk = 'dddd'
+ base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
+ res = pxe.get_tftp_image_info(self.instance)
+ expected = {
+ 'kernel': ['aaaa', os.path.join(base, 'kernel')],
+ 'ramdisk': ['bbbb', os.path.join(base, 'ramdisk')],
+ 'deploy_kernel': ['cccc', os.path.join(base, 'deploy_kernel')],
+ 'deploy_ramdisk': ['dddd',
+ os.path.join(base, 'deploy_ramdisk')],
+ }
+ self.assertEqual(res, expected)
+
+ # If deploy_kernel_id and deploy_ramdisk_id are specified on
+ # image extra_specs, this should override any default configuration.
+ # Note that it is passed on the 'instance' object, despite being
+ # inherited from the instance_types_extra_specs table.
+ extra_specs = {
+ 'deploy_kernel_id': 'eeee',
+ 'deploy_ramdisk_id': 'ffff',
+ }
+ self.instance['extra_specs'] = extra_specs
+ res = pxe.get_tftp_image_info(self.instance)
+ self.assertEqual(res['deploy_kernel'][0], 'eeee')
+ self.assertEqual(res['deploy_ramdisk'][0], 'ffff')
+
+
+class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
+
+ def test_collect_mac_addresses(self):
+ self._create_node()
+ address_list = [nic['address'] for nic in self.nic_info]
+ address_list.append(self.node_info['prov_mac_address'])
+ address_list.sort()
+ macs = self.driver._collect_mac_addresses(self.context, self.node)
+ self.assertEqual(macs, address_list)
+
+ def test_generate_udev_rules(self):
+ self._create_node()
+ address_list = [nic['address'] for nic in self.nic_info]
+ address_list.append(self.node_info['prov_mac_address'])
+
+ rules = self.driver._generate_udev_rules(self.context, self.node)
+ for address in address_list:
+ self.assertIn('ATTR{address}=="%s"' % address, rules)
+
+ def test_cache_tftp_images(self):
+ self.instance['kernel_id'] = 'aaaa'
+ self.instance['ramdisk_id'] = 'bbbb'
+ extra_specs = {
+ 'deploy_kernel_id': 'cccc',
+ 'deploy_ramdisk_id': 'dddd',
+ }
+ self.instance['extra_specs'] = extra_specs
+ image_info = pxe.get_tftp_image_info(self.instance)
+
+ self.mox.StubOutWithMock(os, 'makedirs')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.makedirs(os.path.join(CONF.baremetal.tftp_root,
+ self.instance['uuid'])).AndReturn(True)
+ for uuid, path in [image_info[label] for label in image_info]:
+ os.path.exists(path).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.driver._cache_tftp_images(
+ self.context, self.instance, image_info)
+ self.mox.VerifyAll()
+
+ def test_cache_image(self):
+ self.mox.StubOutWithMock(os, 'makedirs')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.makedirs(pxe.get_image_dir_path(self.instance)).\
+ AndReturn(True)
+ os.path.exists(pxe.get_image_file_path(self.instance)).\
+ AndReturn(True)
+ self.mox.ReplayAll()
+
+ image_meta = utils.get_test_image_info(
+ self.context, self.instance)
+ self.driver._cache_image(
+ self.context, self.instance, image_meta)
+ self.mox.VerifyAll()
+
+ def test_inject_into_image(self):
+ # NOTE(deva): we could also test this method by stubbing
+ # nova.virt.disk.api._inject_*_into_fs
+ self._create_node()
+ files = []
+ files.append(('/etc/udev/rules.d/70-persistent-net.rules',
+ self.driver._generate_udev_rules(self.context, self.node)))
+ self.instance['hostname'] = 'fake hostname'
+ files.append(('/etc/hostname', 'fake hostname'))
+ self.instance['key_data'] = 'fake ssh key'
+ net_info = utils.get_test_network_info(1)
+ net = pxe.build_network_config(net_info)
+ admin_password = 'fake password'
+
+ self.mox.StubOutWithMock(disk_api, 'inject_data')
+ disk_api.inject_data(
+ admin_password=admin_password,
+ image=pxe.get_image_file_path(self.instance),
+ key='fake ssh key',
+ metadata=None,
+ partition=None,
+ net=net,
+ files=files, # this is what we're really testing
+ ).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.driver._inject_into_image(
+ self.context, self.node, self.instance,
+ network_info=net_info,
+ admin_password=admin_password,
+ injected_files=None)
+ self.mox.VerifyAll()
+
+
+class PXEPublicMethodsTestCase(BareMetalPXETestCase):
+
+ def test_cache_images(self):
+ self._create_node()
+ self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
+ self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
+ self.mox.StubOutWithMock(self.driver, "_cache_image")
+ self.mox.StubOutWithMock(self.driver, "_inject_into_image")
+
+ pxe.get_tftp_image_info(self.instance).AndReturn([])
+ self.driver._cache_tftp_images(self.context, self.instance, [])
+ self.driver._cache_image(self.context, self.instance, [])
+ self.driver._inject_into_image(self.context, self.node, self.instance,
+ self.test_network_info, None, '')
+ self.mox.ReplayAll()
+
+ self.driver.cache_images(
+ self.context, self.node, self.instance,
+ admin_password='',
+ image_meta=[],
+ injected_files=None,
+ network_info=self.test_network_info,
+ )
+ self.mox.VerifyAll()
+
+ def test_destroy_images(self):
+ self._create_node()
+ self.mox.StubOutWithMock(os, 'unlink')
+
+ os.unlink(pxe.get_image_file_path(self.instance))
+ os.unlink(pxe.get_image_dir_path(self.instance))
+ self.mox.ReplayAll()
+
+ self.driver.destroy_images(self.context, self.node, self.instance)
+ self.mox.VerifyAll()
+
+ def test_activate_bootloader(self):
+ self._create_node()
+ macs = [nic['address'] for nic in self.nic_info]
+ macs.append(self.node_info['prov_mac_address'])
+ macs.sort()
+ image_info = {
+ 'deploy_kernel': [None, 'aaaa'],
+ 'deploy_ramdisk': [None, 'bbbb'],
+ 'kernel': [None, 'cccc'],
+ 'ramdisk': [None, 'dddd'],
+ }
+ self.instance['uuid'] = 'fake-uuid'
+ iqn = "iqn-%s" % self.instance['uuid']
+ pxe_config = 'this is a fake pxe config'
+ pxe_path = pxe.get_pxe_config_file_path(self.instance)
+ image_path = pxe.get_image_file_path(self.instance)
+
+ self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
+ self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
+ self.mox.StubOutWithMock(bm_utils, 'random_alnum')
+ self.mox.StubOutWithMock(db, 'bm_deployment_create')
+ self.mox.StubOutWithMock(pxe, 'build_pxe_config')
+ self.mox.StubOutWithMock(bm_utils, 'write_to_file')
+ self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
+
+ pxe.get_tftp_image_info(self.instance).AndReturn(image_info)
+ pxe.get_partition_sizes(self.instance).AndReturn((0, 0))
+ bm_utils.random_alnum(32).AndReturn('alnum')
+ db.bm_deployment_create(
+ self.context, 'alnum', image_path, pxe_path, 0, 0).\
+ AndReturn(1234)
+ pxe.build_pxe_config(
+ 1234, 'alnum', iqn, 'aaaa', 'bbbb', 'cccc', 'dddd').\
+ AndReturn(pxe_config)
+ bm_utils.write_to_file(pxe_path, pxe_config)
+ for mac in macs:
+ bm_utils.create_link_without_raise(
+ pxe_path, pxe.get_pxe_mac_path(mac))
+ self.mox.ReplayAll()
+
+ self.driver.activate_bootloader(
+ self.context, self.node, self.instance)
+ self.mox.VerifyAll()
+
+ def test_deactivate_bootloader(self):
+ self._create_node()
+ macs = [nic['address'] for nic in self.nic_info]
+ macs.append(self.node_info['prov_mac_address'])
+ macs.sort()
+ image_info = {
+ 'deploy_kernel': [None, 'aaaa'],
+ 'deploy_ramdisk': [None, 'bbbb'],
+ 'kernel': [None, 'cccc'],
+ 'ramdisk': [None, 'dddd'],
+ }
+ self.instance['uuid'] = 'fake-uuid'
+ pxe_path = pxe.get_pxe_config_file_path(self.instance)
+
+ self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
+ self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
+ self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
+
+ pxe.get_tftp_image_info(self.instance).AndReturn(image_info)
+ for uuid, path in [image_info[label] for label in image_info]:
+ bm_utils.unlink_without_raise(path)
+ bm_utils.unlink_without_raise(pxe_path)
+ self.driver._collect_mac_addresses(self.context, self.node).\
+ AndReturn(macs)
+ for mac in macs:
+ bm_utils.unlink_without_raise(pxe.get_pxe_mac_path(mac))
+ bm_utils.unlink_without_raise(
+ os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
+ self.mox.ReplayAll()
+
+ self.driver.deactivate_bootloader(
+ self.context, self.node, self.instance)
+ self.mox.VerifyAll()
+
+ def test_deactivate_bootloader_for_nonexistent_instance(self):
+ self._create_node()
+ macs = [nic['address'] for nic in self.nic_info]
+ macs.append(self.node_info['prov_mac_address'])
+ macs.sort()
+ image_info = {
+ 'deploy_kernel': [None, 'aaaa'],
+ 'deploy_ramdisk': [None, 'bbbb'],
+ 'kernel': [None, 'cccc'],
+ 'ramdisk': [None, 'dddd'],
+ }
+ self.instance['uuid'] = 'fake-uuid'
+ pxe_path = pxe.get_pxe_config_file_path(self.instance)
+
+ self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
+ self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
+ self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
+
+ pxe.get_tftp_image_info(self.instance).\
+ AndRaise(exception.NovaException)
+ bm_utils.unlink_without_raise(pxe_path)
+ self.driver._collect_mac_addresses(self.context, self.node).\
+ AndRaise(exception.DBError)
+ bm_utils.unlink_without_raise(
+ os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
+ self.mox.ReplayAll()
+
+ self.driver.deactivate_bootloader(
+ self.context, self.node, self.instance)
+ self.mox.VerifyAll()
diff --git a/nova/tests/baremetal/test_utils.py b/nova/tests/baremetal/test_utils.py
new file mode 100644
index 000000000..afba55e76
--- /dev/null
+++ b/nova/tests/baremetal/test_utils.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for baremetal utils
+"""
+
+import mox
+
+from nova import exception
+from nova import test
+from nova.virt.baremetal import utils
+
+
+class BareMetalUtilsTestCase(test.TestCase):
+
+ def test_random_alnum(self):
+ s = utils.random_alnum(10)
+ self.assertEqual(len(s), 10)
+ s = utils.random_alnum(100)
+ self.assertEqual(len(s), 100)
diff --git a/nova/tests/cells/fakes.py b/nova/tests/cells/fakes.py
index a9de530d1..e1f3b6e70 100644
--- a/nova/tests/cells/fakes.py
+++ b/nova/tests/cells/fakes.py
@@ -55,6 +55,12 @@ class FakeDBApi(object):
def compute_node_get_all(self, ctxt):
return []
+ def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
+ return []
+
+ def instance_get_by_uuid(self, ctxt, *args, **kwargs):
+ return None
+
class FakeCellsDriver(driver.BaseCellsDriver):
pass
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index 5a2b83145..d05bc4098 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -15,8 +15,12 @@
"""
Tests For CellsManager
"""
+import datetime
+
from nova.cells import messaging
+from nova.cells import utils as cells_utils
from nova import context
+from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
@@ -149,3 +153,61 @@ class CellsManagerClassTestCase(test.TestCase):
self.mox.ReplayAll()
self.cells_manager.bw_usage_update_at_top(
self.ctxt, bw_update_info='fake-bw-info')
+
+ def test_heal_instances(self):
+ self.flags(instance_updated_at_threshold=1000,
+ instance_update_num_instances=2,
+ group='cells')
+
+ fake_context = context.RequestContext('fake', 'fake')
+ stalled_time = timeutils.utcnow()
+ updated_since = stalled_time - datetime.timedelta(seconds=1000)
+
+ def utcnow():
+ return stalled_time
+
+ call_info = {'get_instances': 0, 'sync_instances': []}
+
+ instances = ['instance1', 'instance2', 'instance3']
+
+ def get_instances_to_sync(context, **kwargs):
+ self.assertEqual(context, fake_context)
+ call_info['shuffle'] = kwargs.get('shuffle')
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+ call_info['get_instances'] += 1
+ return iter(instances)
+
+ def instance_get_by_uuid(context, uuid):
+ return instances[int(uuid[-1]) - 1]
+
+ def sync_instance(context, instance):
+ self.assertEqual(context, fake_context)
+ call_info['sync_instances'].append(instance)
+
+ self.stubs.Set(cells_utils, 'get_instances_to_sync',
+ get_instances_to_sync)
+ self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
+ instance_get_by_uuid)
+ self.stubs.Set(self.cells_manager, '_sync_instance',
+ sync_instance)
+ self.stubs.Set(timeutils, 'utcnow', utcnow)
+
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 1)
+ # Only first 2
+ self.assertEqual(call_info['sync_instances'],
+ instances[:2])
+
+ call_info['sync_instances'] = []
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 2)
+ # Now the last 1 and the first 1
+ self.assertEqual(call_info['sync_instances'],
+ [instances[-1], instances[0]])
diff --git a/nova/tests/cells/test_cells_utils.py b/nova/tests/cells/test_cells_utils.py
new file mode 100644
index 000000000..84f60a796
--- /dev/null
+++ b/nova/tests/cells/test_cells_utils.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells Utility methods
+"""
+import inspect
+import random
+
+from nova.cells import utils as cells_utils
+from nova import db
+from nova import test
+
+
+class CellsUtilsTestCase(test.TestCase):
+ """Test case for Cells utility methods."""
+ def test_get_instances_to_sync(self):
+ fake_context = 'fake_context'
+
+ call_info = {'get_all': 0, 'shuffle': 0}
+
+ def random_shuffle(_list):
+ call_info['shuffle'] += 1
+
+ def instance_get_all_by_filters(context, filters,
+ sort_key, sort_order):
+ self.assertEqual(context, fake_context)
+ self.assertEqual(sort_key, 'deleted')
+ self.assertEqual(sort_order, 'asc')
+ call_info['got_filters'] = filters
+ call_info['get_all'] += 1
+ return ['fake_instance1', 'fake_instance2', 'fake_instance3']
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ instance_get_all_by_filters)
+ self.stubs.Set(random, 'shuffle', random_shuffle)
+
+ instances = cells_utils.get_instances_to_sync(fake_context)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 1)
+ self.assertEqual(call_info['got_filters'], {})
+ self.assertEqual(call_info['shuffle'], 0)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ shuffle=True)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 2)
+ self.assertEqual(call_info['got_filters'], {})
+ self.assertEqual(call_info['shuffle'], 1)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ updated_since='fake-updated-since')
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 3)
+ self.assertEqual(call_info['got_filters'],
+ {'changes-since': 'fake-updated-since'})
+ self.assertEqual(call_info['shuffle'], 1)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ project_id='fake-project',
+ updated_since='fake-updated-since', shuffle=True)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 4)
+ self.assertEqual(call_info['got_filters'],
+ {'changes-since': 'fake-updated-since',
+ 'project_id': 'fake-project'})
+ self.assertEqual(call_info['shuffle'], 2)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 57c234734..3bd54cbba 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -6043,7 +6043,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.compute._spawn(mox.IgnoreArg(), self.instance, None, None, None,
False, None).AndRaise(test.TestingException("BuildError"))
self.compute._reschedule_or_reraise(mox.IgnoreArg(), self.instance,
- None, None, None, False, None, {})
+ mox.IgnoreArg(), None, None, None, False, None, {})
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
@@ -6061,6 +6061,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
except Exception:
exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ instance_uuid, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
@@ -6071,7 +6073,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# error:
self.assertRaises(InnerTestingException,
self.compute._reschedule_or_reraise, self.context,
- self.instance, None, None, None, False, None, {})
+ self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_fail(self):
"""Test handling of exception from _reschedule"""
@@ -6093,9 +6095,10 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
raise test.TestingException("Original")
except Exception:
# not re-scheduling, should raise the original build error:
+ exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_or_reraise, self.context,
- self.instance, None, None, None, False, None, {})
+ self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_false(self):
"""Test not-rescheduling, but no nested exception"""
@@ -6104,22 +6107,25 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
self.mox.StubOutWithMock(self.compute, '_reschedule')
- self.compute._deallocate_network(self.context,
- self.instance)
- self.compute._reschedule(self.context, None, instance_uuid,
- {}, self.compute.scheduler_rpcapi.run_instance, method_args,
- task_states.SCHEDULING).AndReturn(False)
-
- self.mox.ReplayAll()
-
try:
raise test.TestingException("Original")
except Exception:
+ exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ instance_uuid, exc_info[0], exc_info=exc_info)
+ self.compute._deallocate_network(self.context,
+ self.instance)
+ self.compute._reschedule(self.context, None, {}, instance_uuid,
+ self.compute.scheduler_rpcapi.run_instance, method_args,
+ task_states.SCHEDULING, exc_info).AndReturn(False)
+
+ self.mox.ReplayAll()
+
# re-scheduling is False, the original build error should be
# raised here:
self.assertRaises(test.TestingException,
self.compute._reschedule_or_reraise, self.context,
- self.instance, None, None, None, False, None, {})
+ self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_true(self):
"""Test behavior when re-scheduling happens"""
@@ -6133,6 +6139,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
except Exception:
exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ instance_uuid, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance)
self.compute._reschedule(self.context, None, {}, instance_uuid,
@@ -6146,7 +6154,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# re-scheduling is True, original error is logged, but nothing
# is raised:
self.compute._reschedule_or_reraise(self.context, self.instance,
- None, None, None, False, None, {})
+ exc_info, None, None, None, False, None, {})
class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
@@ -6171,7 +6179,8 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
- self.instance, self.instance_type, None, None, None)
+ self.instance, mox.IgnoreArg(), self.instance_type, None, None,
+ None)
self.mox.ReplayAll()
@@ -6195,9 +6204,11 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
try:
raise test.TestingException("Original")
except Exception:
+ exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- None, self.instance, self.instance_type, None, {}, {})
+ None, self.instance, exc_info, self.instance_type, None,
+ {}, {})
def test_reschedule_false(self):
"""Original exception should be raised if the resize is not
@@ -6215,9 +6226,11 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
try:
raise test.TestingException("Original")
except Exception:
+ exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- None, self.instance, self.instance_type, None, {}, {})
+ None, self.instance, exc_info, self.instance_type, None,
+ {}, {})
def test_reschedule_true(self):
"""If rescheduled, the original resize exception should be logged"""
@@ -6238,7 +6251,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute._reschedule_resize_or_reraise(self.context, None,
- self.instance, self.instance_type, None, {}, {})
+ self.instance, exc_info, self.instance_type, None, {}, {})
class ComputeInactiveImageTestCase(BaseTestCase):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 454d5347a..86f47a79c 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -100,6 +100,17 @@ class _BaseTestCase(object):
self.conductor.migration_get(self.context,
migration['id']))
+ def test_migration_get_unconfirmed_by_dest_compute(self):
+ self.mox.StubOutWithMock(db,
+ 'migration_get_unconfirmed_by_dest_compute')
+ db.migration_get_unconfirmed_by_dest_compute(self.context,
+ 'fake-window',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
+ 'fake-window',
+ 'fake-host')
+
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
@@ -308,6 +319,30 @@ class _BaseTestCase(object):
result = self.conductor.instance_type_get(self.context, 'fake-id')
self.assertEqual(result, 'fake-type')
+ def test_vol_get_usage_by_time(self):
+ self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
+ db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
+ 'fake-usage')
+ self.mox.ReplayAll()
+ result = self.conductor.vol_get_usage_by_time(self.context,
+ 'fake-time')
+ self.assertEqual(result, 'fake-usage')
+
+ def test_vol_usage_update(self):
+ self.mox.StubOutWithMock(db, 'vol_usage_update')
+ db.vol_usage_update(self.context, 'fake-vol', 'rd-req', 'rd-bytes',
+ 'wr-req', 'wr-bytes', 'fake-id', 'fake-refr',
+ 'fake-bool')
+ self.mox.ReplayAll()
+ self.conductor.vol_usage_update(self.context, 'fake-vol', 'rd-req',
+ 'rd-bytes', 'wr-req', 'wr-bytes',
+ {'uuid': 'fake-id'}, 'fake-refr',
+ 'fake-bool')
+
+ def test_ping(self):
+ result = self.conductor.ping(self.context, 'foo')
+ self.assertEqual(result, {'service': 'conductor', 'arg': 'foo'})
+
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests"""
@@ -371,6 +406,36 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
+ def _test_stubbed(self, name, dbargs, condargs):
+ self.mox.StubOutWithMock(db, name)
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_get_all_by(self.context, **condargs)
+ self.assertEqual(result, 'fake-result')
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (), {})
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host'))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic'))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host'))
+
+ def test_service_get_all_compute_by_host(self):
+ self._test_stubbed('service_get_all_compute_by_host',
+ ('host',),
+ dict(topic='compute', host='host'))
+
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests"""
@@ -432,6 +497,36 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
+ def _test_stubbed(self, name, dbargs, condargs):
+ self.mox.StubOutWithMock(db, name)
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_get_all_by(self.context, **condargs)
+ self.assertEqual(result, 'fake-result')
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (), {})
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host'))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic'))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host'))
+
+ def test_service_get_all_compute_by_host(self):
+ self._test_stubbed('service_get_all_compute_by_host',
+ ('host',),
+ dict(topic='compute', host='host'))
+
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests"""
@@ -511,6 +606,28 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
{'name': 'fake-inst'},
'updated_at', 'asc')
+ def _test_stubbed(self, name, *args):
+ self.mox.StubOutWithMock(db, name)
+ getattr(db, name)(self.context, *args).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = getattr(self.conductor, name)(self.context, *args)
+ self.assertEqual(result, 'fake-result')
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all')
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic', 'topic')
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host', 'host')
+
+ def test_service_get_all_compute_by_host(self):
+ self._test_stubbed('service_get_all_compute_by_host', 'host')
+
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests"""
diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py
index c7ac01c64..33ca49c33 100644
--- a/nova/tests/fakeguestfs.py
+++ b/nova/tests/fakeguestfs.py
@@ -96,13 +96,13 @@ class GuestFS(object):
def stat(self, path):
if not path in self.files:
- raise Exception("No such file: " + path)
+ raise RuntimeError("No such file: " + path)
return self.files[path]["mode"]
def chown(self, uid, gid, path):
if not path in self.files:
- raise Exception("No such file: " + path)
+ raise RuntimeError("No such file: " + path)
if uid != -1:
self.files[path]["uid"] = uid
@@ -111,7 +111,7 @@ class GuestFS(object):
def chmod(self, mode, path):
if not path in self.files:
- raise Exception("No such file: " + path)
+ raise RuntimeError("No such file: " + path)
self.files[path]["mode"] = mode
@@ -123,7 +123,7 @@ class GuestFS(object):
def aug_get(self, cfgpath):
if not self.auginit:
- raise Exception("Augeus not initialized")
+ raise RuntimeError("Augeus not initialized")
if cfgpath == "/files/etc/passwd/root/uid":
return 0
@@ -137,4 +137,4 @@ class GuestFS(object):
return 500
elif cfgpath == "/files/etc/group/admins/gid":
return 600
- raise Exception("Unknown path %s", cfgpath)
+ raise RuntimeError("Unknown path %s", cfgpath)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 471b75308..8ac892b1f 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -145,8 +145,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
fake_network.set_stub_network_methods(self.stubs)
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(
- periodic_interval=0.3, periodic_fuzzy_delay=0)
+ self._restart_compute_service(periodic_interval_max=0.3,
+ periodic_fuzzy_delay=0)
# Create server
server = self._build_minimal_create_server_request()
diff --git a/nova/tests/matchers.py b/nova/tests/matchers.py
index a421cc056..be65da823 100644
--- a/nova/tests/matchers.py
+++ b/nova/tests/matchers.py
@@ -198,6 +198,21 @@ class IsSubDictOf(object):
return SubDictMismatch(k, sub_value, super_value)
+class FunctionCallMatcher(object):
+
+ def __init__(self, expected_func_calls):
+ self.expected_func_calls = expected_func_calls
+ self.actual_func_calls = []
+
+ def call(self, *args, **kwargs):
+ func_call = {'args': args, 'kwargs': kwargs}
+ self.actual_func_calls.append(func_call)
+
+ def match(self):
+ dict_list_matcher = DictListMatches(self.expected_func_calls)
+ return dict_list_matcher.match(self.actual_func_calls)
+
+
class XMLMismatch(object):
"""Superclass for XML mismatch."""
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 673e64997..4d7fb02ec 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -287,7 +287,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
- self.assertEqual((host, node), hosts[0])
+ self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
"""Test addition of certain filter props after a node is selected"""
@@ -300,7 +300,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched._post_select_populate_filter_properties(filter_properties,
host_state)
- self.assertEqual(('host', 'node'),
+ self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpus': 5}, host_state.limits)
@@ -337,5 +337,5 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched.schedule_prep_resize(self.context, image, request_spec,
filter_properties, instance, instance_type, reservations)
- self.assertEqual([('host', 'node')],
+ self.assertEqual([['host', 'node']],
filter_properties['retry']['hosts'])
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index dea764b94..07a1bc2b8 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -1254,8 +1254,8 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
- hosts=[('host1', 'node1'), # same host, different node
- ('host2', 'node2'), # different host and node
+ hosts=[['host1', 'node1'], # same host, different node
+ ['host2', 'node2'], # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1265,7 +1265,7 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
- hosts=[('host1', 'node1')])
+ hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index eae3c0151..cab877da9 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -26,6 +26,7 @@ import sys
import uuid
from nova.compute import power_state
+from nova.compute import task_states
from nova import context
from nova import db
from nova.image import glance
@@ -36,6 +37,7 @@ from nova.tests.hyperv import db_fakes
from nova.tests.hyperv import hypervutils
from nova.tests.hyperv import mockproxy
import nova.tests.image.fake as fake_image
+from nova.tests import matchers
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import vmutils
@@ -407,27 +409,55 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self.assertTrue(self._fetched_image is None)
def test_snapshot_with_update_failure(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self._spawn_instance(True)
self._update_image_raise_exception = True
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
- self._context, self._instance_data, snapshot_name)
+ self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+
+ # assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
# assert VM snapshots have been removed
self.assertEquals(self._hypervutils.get_vm_snapshots_count(
self._instance_data["name"]), 0)
def test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self._spawn_instance(True)
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
- self._conn.snapshot(self._context, self._instance_data, snapshot_name)
+ self._conn.snapshot(self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
self.assertTrue(self._image_metadata and
"disk_format" in self._image_metadata and
self._image_metadata["disk_format"] == "vhd")
+ # assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
# assert VM snapshots have been removed
self.assertEquals(self._hypervutils.get_vm_snapshots_count(
self._instance_data["name"]), 0)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index fc8e01728..6bc18251f 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -32,6 +32,7 @@ from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
@@ -1209,6 +1210,16 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(devices, ['vda', 'vdb'])
def test_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1238,15 +1249,28 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1277,15 +1301,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1316,15 +1352,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1356,15 +1404,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./')
@@ -1391,15 +1451,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1427,15 +1499,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1465,14 +1549,26 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1503,14 +1599,26 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1536,14 +1644,26 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1570,9 +1690,11 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
@@ -4419,12 +4541,12 @@ class LibvirtVolumeUsageTestCase(test.TestCase):
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
- 'instance_id': 1,
+ 'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L},
{'volume': 2,
- 'instance_id': 1,
+ 'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L}]
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
new file mode 100644
index 000000000..5804ea49b
--- /dev/null
+++ b/nova/tests/test_periodic_tasks.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+
+from nova import manager
+from nova import test
+
+
+class ManagerMetaTestCase(test.TestCase):
+ """Tests for the meta class which manages the creation of periodic tasks.
+ """
+
+ def test_meta(self):
+ class Manager(object):
+ __metaclass__ = manager.ManagerMeta
+
+ @manager.periodic_task
+ def foo(self):
+ return 'foo'
+
+ @manager.periodic_task(spacing=4)
+ def bar(self):
+ return 'bar'
+
+ @manager.periodic_task(enabled=False)
+ def baz(self):
+ return 'baz'
+
+ m = Manager()
+ self.assertEqual(2, len(m._periodic_tasks))
+ self.assertEqual(None, m._periodic_spacing['foo'])
+ self.assertEqual(4, m._periodic_spacing['bar'])
+ self.assertFalse('baz' in m._periodic_spacing)
+
+
+class Manager(test.TestCase):
+ """Tests the periodic tasks portion of the manager class."""
+
+ def test_periodic_tasks_with_idle(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=200)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ self.assertEqual(1, len(m._periodic_tasks))
+ self.assertEqual(200, m._periodic_spacing['bar'])
+
+ # Now a single pass of the periodic tasks
+ idle = m.periodic_tasks(None)
+ self.assertAlmostEqual(60, idle, 1)
+
+ def test_periodic_tasks_constant(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=0)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ idle = m.periodic_tasks(None)
+ self.assertAlmostEqual(60, idle, 1)
+
+ def test_periodic_tasks_disabled(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=-1)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ idle = m.periodic_tasks(None)
+ self.assertAlmostEqual(60, idle, 1)
+
+ def test_external_running_here(self):
+ self.flags(run_external_periodic_tasks=True)
+
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=200, external_process_ok=True)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ self.assertEqual(1, len(m._periodic_tasks))
+
+ def test_external_running_elsewhere(self):
+ self.flags(run_external_periodic_tasks=False)
+
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=200, external_process_ok=True)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ self.assertEqual(0, len(m._periodic_tasks))
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index b0e25d095..9d9ebcad9 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -215,13 +215,15 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
- self.ctxt, instance_ref, img_ref['id'])
+ self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
- self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'])
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
@catch_notimplementederror
def test_reboot(self):
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 3a404a122..86b3a5730 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -20,11 +20,13 @@ Test suite for VMWareAPI.
"""
from nova.compute import power_state
+from nova.compute import task_states
from nova import context
from nova import db
from nova import exception
from nova import test
import nova.tests.image.fake
+from nova.tests import matchers
from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import driver
@@ -159,17 +161,29 @@ class VMWareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
- self.conn.snapshot(self.context, self.instance, "Test-Snapshot")
+ self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
+ func_call_matcher.call)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
+ self.assertIsNone(func_call_matcher.match())
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
- self.context, self.instance, "Test-Snapshot")
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
def test_reboot(self):
self._create_vm()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 3ca69dc4c..8b57dfef4 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -397,6 +397,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_instance_snapshot_fails_with_no_primary_vdi(self):
+
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
@@ -417,9 +418,20 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
- self.context, instance, image_id)
+ self.context, instance, image_id,
+ lambda *args, **kwargs: None)
def test_instance_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
@@ -428,7 +440,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance = self._create_instance()
image_id = "my_snapshot_id"
- self.conn.snapshot(self.context, instance, image_id)
+ self.conn.snapshot(self.context, instance, image_id,
+ func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
@@ -447,6 +460,9 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertEquals(vbd_labels, [instance['name']])
+ # Ensure task states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
diff --git a/nova/utils.py b/nova/utils.py
index 26468868a..1056a6e2d 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -556,12 +556,23 @@ class LoopingCallDone(Exception):
self.retvalue = retvalue
-class LoopingCall(object):
+class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
+ self.done = None
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
+
+
+class FixedIntervalLoopingCall(LoopingCallBase):
+ """A looping call which happens at a fixed interval."""
def start(self, interval, initial_delay=None):
self._running = True
@@ -581,7 +592,7 @@ class LoopingCall(object):
self.stop()
done.send(e.retvalue)
except Exception:
- LOG.exception(_('in looping call'))
+ LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
@@ -592,11 +603,47 @@ class LoopingCall(object):
greenthread.spawn(_inner)
return self.done
- def stop(self):
- self._running = False
- def wait(self):
- return self.done.wait()
+class DynamicLoopingCall(LoopingCallBase):
+ """A looping call which happens sleeps until the next known event.
+
+ The function called should return how long to sleep for before being
+ called again.
+ """
+
+ def start(self, initial_delay=None, periodic_interval_max=None):
+ self._running = True
+ done = event.Event()
+
+ def _inner():
+ if initial_delay:
+ greenthread.sleep(initial_delay)
+
+ try:
+ while self._running:
+ idle = self.f(*self.args, **self.kw)
+ if not self._running:
+ break
+
+ if not periodic_interval_max is None:
+ idle = min(idle, periodic_interval_max)
+ LOG.debug(_('Periodic task processor sleeping for %.02f '
+ 'seconds'), idle)
+ greenthread.sleep(idle)
+ except LoopingCallDone, e:
+ self.stop()
+ done.send(e.retvalue)
+ except Exception:
+ LOG.exception(_('in dynamic looping call'))
+ done.send_exception(*sys.exc_info())
+ return
+ else:
+ done.send(True)
+
+ self.done = done
+
+ greenthread.spawn(_inner)
+ return self.done
def xhtml_escape(value):
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index 58899ebef..1111a1e40 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -154,7 +154,7 @@ class IPMI(base.PowerManager):
LOG.exception(_("IPMI power on failed"))
self.retries = 0
- timer = utils.LoopingCall(_wait_for_power_on)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=0.5).wait()
def _power_off(self):
@@ -176,7 +176,7 @@ class IPMI(base.PowerManager):
LOG.exception(_("IPMI power off failed"))
self.retries = 0
- timer = utils.LoopingCall(_wait_for_power_off)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=0.5).wait()
def _set_pxe_for_next_boot(self):
diff --git a/nova/virt/baremetal/net-dhcp.ubuntu.template b/nova/virt/baremetal/net-dhcp.ubuntu.template
new file mode 100644
index 000000000..e8824a88d
--- /dev/null
+++ b/nova/virt/baremetal/net-dhcp.ubuntu.template
@@ -0,0 +1,21 @@
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+#for $ifc in $interfaces
+auto ${ifc.name}
+iface ${ifc.name} inet dhcp
+#if $ifc.hwaddress
+ hwaddress ether ${ifc.hwaddress}
+#end if
+
+#if $use_ipv6
+iface ${ifc.name} inet6 dhcp
+#end if
+
+#end for
diff --git a/nova/virt/baremetal/interfaces.template b/nova/virt/baremetal/net-static.ubuntu.template
index 94776ed49..f14f0ce8c 100644
--- a/nova/virt/baremetal/interfaces.template
+++ b/nova/virt/baremetal/net-static.ubuntu.template
@@ -12,7 +12,6 @@ auto ${ifc.name}
iface ${ifc.name} inet static
address ${ifc.address}
netmask ${ifc.netmask}
- broadcast ${ifc.broadcast}
gateway ${ifc.gateway}
#if $ifc.dns
dns-nameservers ${ifc.dns}
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
new file mode 100644
index 000000000..4bb61ad39
--- /dev/null
+++ b/nova/virt/baremetal/pxe.py
@@ -0,0 +1,460 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Class for PXE bare-metal nodes.
+"""
+
+import os
+import shutil
+
+from nova.compute import instance_types
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import fileutils
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.baremetal import base
+from nova.virt.baremetal import db
+from nova.virt.baremetal import utils as bm_utils
+from nova.virt.disk import api as disk
+
+
+pxe_opts = [
+ cfg.StrOpt('dnsmasq_pid_dir',
+ default='$state_path/baremetal/dnsmasq',
+ help='path to directory stores pidfiles of dnsmasq'),
+ cfg.StrOpt('dnsmasq_lease_dir',
+ default='$state_path/baremetal/dnsmasq',
+ help='path to directory stores leasefiles of dnsmasq'),
+ cfg.StrOpt('deploy_kernel',
+ help='Default kernel image ID used in deployment phase'),
+ cfg.StrOpt('deploy_ramdisk',
+ help='Default ramdisk image ID used in deployment phase'),
+ cfg.StrOpt('net_config_template',
+ default='$pybasedir/nova/virt/baremetal/'
+ 'net-dhcp.ubuntu.template',
+ help='Template file for injected network config'),
+ cfg.StrOpt('pxe_append_params',
+ help='additional append parameters for baremetal PXE boot'),
+ cfg.StrOpt('pxe_config_template',
+ default='$pybasedir/nova/virt/baremetal/pxe_config.template',
+ help='Template file for PXE configuration'),
+ cfg.StrOpt('pxe_interface',
+ default='eth0'),
+ cfg.StrOpt('pxe_path',
+ default='/usr/lib/syslinux/pxelinux.0',
+ help='path to pxelinux.0'),
+ ]
+
+LOG = logging.getLogger(__name__)
+
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(pxe_opts, baremetal_group)
+
+
+CHEETAH = None
+
+
+def _get_cheetah():
+ global CHEETAH
+ if CHEETAH is None:
+ from Cheetah.Template import Template as CHEETAH
+ return CHEETAH
+
+
+def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
+ deployment_aki_path, deployment_ari_path,
+ aki_path, ari_path):
+ """Build the PXE config file for a node
+
+ This method builds the PXE boot configuration file for a node,
+ given all the required parameters.
+
+ The resulting file has both a "deploy" and "boot" label, which correspond
+ to the two phases of booting. This may be extended later.
+
+ """
+ LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
+ pxe_options = {
+ 'deployment_id': deployment_id,
+ 'deployment_key': deployment_key,
+ 'deployment_iscsi_iqn': deployment_iscsi_iqn,
+ 'deployment_aki_path': deployment_aki_path,
+ 'deployment_ari_path': deployment_ari_path,
+ 'aki_path': aki_path,
+ 'ari_path': ari_path,
+ 'pxe_append_params': CONF.baremetal.pxe_append_params,
+ }
+ cheetah = _get_cheetah()
+ pxe_config = str(cheetah(
+ open(CONF.baremetal.pxe_config_template).read(),
+ searchList=[{'pxe_options': pxe_options,
+ 'ROOT': '${ROOT}',
+ }]))
+ return pxe_config
+
+
+def build_network_config(network_info):
+ # TODO(deva): fix assumption that device names begin with "eth"
+ # and fix assumption about ordering
+ try:
+ assert isinstance(network_info, list)
+ except AssertionError:
+ network_info = [network_info]
+ interfaces = []
+ for id, (network, mapping) in enumerate(network_info):
+ address_v6 = None
+ gateway_v6 = None
+ netmask_v6 = None
+ if CONF.use_ipv6:
+ address_v6 = mapping['ip6s'][0]['ip']
+ netmask_v6 = mapping['ip6s'][0]['netmask']
+ gateway_v6 = mapping['gateway_v6']
+ interface = {
+ 'name': 'eth%d' % id,
+ 'hwaddress': mapping['mac'],
+ 'address': mapping['ips'][0]['ip'],
+ 'gateway': mapping['gateway'],
+ 'netmask': mapping['ips'][0]['netmask'],
+ 'dns': ' '.join(mapping['dns']),
+ 'address_v6': address_v6,
+ 'gateway_v6': gateway_v6,
+ 'netmask_v6': netmask_v6,
+ }
+ interfaces.append(interface)
+
+ cheetah = _get_cheetah()
+ network_config = str(cheetah(
+ open(CONF.baremetal.net_config_template).read(),
+ searchList=[
+ {'interfaces': interfaces,
+ 'use_ipv6': CONF.use_ipv6,
+ }
+ ]))
+ return network_config
+
+
+def get_deploy_aki_id(instance):
+ return instance.get('extra_specs', {}).\
+ get('deploy_kernel_id', CONF.baremetal.deploy_kernel)
+
+
+def get_deploy_ari_id(instance):
+ return instance.get('extra_specs', {}).\
+ get('deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
+
+
+def get_image_dir_path(instance):
+ """Generate the dir for an instances disk"""
+ return os.path.join(CONF.instances_path, instance['name'])
+
+
+def get_image_file_path(instance):
+ """Generate the full path for an instances disk"""
+ return os.path.join(CONF.instances_path, instance['name'], 'disk')
+
+
+def get_pxe_config_file_path(instance):
+ """Generate the path for an instances PXE config file"""
+ return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
+
+
+def get_partition_sizes(instance):
+ type_id = instance['instance_type_id']
+ root_mb = instance['root_gb'] * 1024
+
+ # NOTE(deva): is there a way to get swap_mb directly from instance?
+ swap_mb = instance_types.get_instance_type(type_id)['swap']
+
+ # NOTE(deva): For simpler code paths on the deployment side,
+ # we always create a swap partition. If the flavor
+ # does not specify any swap, we default to 1MB
+ if swap_mb < 1:
+ swap_mb = 1
+
+ return (root_mb, swap_mb)
+
+
+def get_pxe_mac_path(mac):
+ """Convert a MAC address into a PXE config file name"""
+ return os.path.join(
+ CONF.baremetal.tftp_root,
+ 'pxelinux.cfg',
+ "01-" + mac.replace(":", "-").lower()
+ )
+
+
+def get_tftp_image_info(instance):
+ """Generate the paths for tftp files for this instance
+
+ Raises NovaException if
+ - instance does not contain kernel_id or ramdisk_id
+ - deploy_kernel_id or deploy_ramdisk_id can not be read from
+ instance['extra_specs'] and defaults are not set
+
+ """
+ image_info = {
+ 'kernel': [None, None],
+ 'ramdisk': [None, None],
+ 'deploy_kernel': [None, None],
+ 'deploy_ramdisk': [None, None],
+ }
+ try:
+ image_info['kernel'][0] = str(instance['kernel_id'])
+ image_info['ramdisk'][0] = str(instance['ramdisk_id'])
+ image_info['deploy_kernel'][0] = get_deploy_aki_id(instance)
+ image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance)
+ except KeyError as e:
+ pass
+
+ missing_labels = []
+ for label in image_info.keys():
+ (uuid, path) = image_info[label]
+ if uuid is None:
+ missing_labels.append(label)
+ else:
+ image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
+ instance['uuid'], label)
+ if missing_labels:
+ raise exception.NovaException(_(
+ "Can not activate PXE bootloader. The following boot parameters "
+ "were not passed to baremetal driver: %s") % missing_labels)
+ return image_info
+
+
+class PXE(base.NodeDriver):
+ """PXE bare metal driver"""
+
+ def __init__(self):
+ super(PXE, self).__init__()
+
+ def _collect_mac_addresses(self, context, node):
+ macs = []
+ macs.append(db.bm_node_get(context, node['id'])['prov_mac_address'])
+ for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
+ if nic['address']:
+ macs.append(nic['address'])
+ macs.sort()
+ return macs
+
+ def _generate_udev_rules(self, context, node):
+ # TODO(deva): fix assumption that device names begin with "eth"
+ # and fix assumption of ordering
+ macs = self._collect_mac_addresses(context, node)
+ rules = ''
+ for (i, mac) in enumerate(macs):
+ rules += 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' \
+ 'ATTR{address}=="%(mac)s", ATTR{dev_id}=="0x0", ' \
+ 'ATTR{type}=="1", KERNEL=="eth*", NAME="%(name)s"\n' \
+ % {'mac': mac.lower(),
+ 'name': 'eth%d' % i,
+ }
+ return rules
+
+ def _cache_tftp_images(self, context, instance, image_info):
+ """Fetch the necessary kernels and ramdisks for the instance."""
+ fileutils.ensure_tree(
+ os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
+
+ LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
+ instance['name'])
+ for label in image_info.keys():
+ (uuid, path) = image_info[label]
+ bm_utils.cache_image(
+ context=context,
+ target=path,
+ image_id=uuid,
+ user_id=instance['user_id'],
+ project_id=instance['project_id'],
+ )
+
+ def _cache_image(self, context, instance, image_meta):
+ """Fetch the instance's image from Glance
+
+ This method pulls the relevant AMI and associated kernel and ramdisk,
+ and the deploy kernel and ramdisk from Glance, and writes them
+ to the appropriate places on local disk.
+
+ Both sets of kernel and ramdisk are needed for PXE booting, so these
+ are stored under CONF.baremetal.tftp_root.
+
+ At present, the AMI is cached and certain files are injected.
+ Debian/ubuntu-specific assumptions are made regarding the injected
+ files. In a future revision, this functionality will be replaced by a
+ more scalable and os-agnostic approach: the deployment ramdisk will
+ fetch from Glance directly, and write its own last-mile configuration.
+
+ """
+ fileutils.ensure_tree(get_image_dir_path(instance))
+ image_path = get_image_file_path(instance)
+
+ LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
+ {'ami': image_meta['id'], 'name': instance['name']})
+ bm_utils.cache_image(context=context,
+ target=image_path,
+ image_id=image_meta['id'],
+ user_id=instance['user_id'],
+ project_id=instance['project_id']
+ )
+
+ return [image_meta['id'], image_path]
+
+ def _inject_into_image(self, context, node, instance, network_info,
+ injected_files=None, admin_password=None):
+ """Inject last-mile configuration into instances image
+
+ Much of this method is a hack around DHCP and cloud-init
+ not working together with baremetal provisioning yet.
+
+ """
+ # NOTE(deva): We assume that if we're not using a kernel,
+ # then the target partition is the first partition
+ partition = None
+ if not instance['kernel_id']:
+ partition = "1"
+
+ ssh_key = None
+ if 'key_data' in instance and instance['key_data']:
+ ssh_key = str(instance['key_data'])
+
+ if injected_files is None:
+ injected_files = []
+
+ net_config = build_network_config(network_info)
+ udev_rules = self._generate_udev_rules(context, node)
+ injected_files.append(
+ ('/etc/udev/rules.d/70-persistent-net.rules', udev_rules))
+
+ if instance['hostname']:
+ injected_files.append(('/etc/hostname', instance['hostname']))
+
+ LOG.debug(_("Injecting files into image for instance %(name)s") %
+ {'name': instance['name']})
+
+ bm_utils.inject_into_image(
+ image=get_image_file_path(instance),
+ key=ssh_key,
+ net=net_config,
+ metadata=instance['metadata'],
+ admin_password=admin_password,
+ files=injected_files,
+ partition=partition,
+ )
+
+ def cache_images(self, context, node, instance,
+ admin_password, image_meta, injected_files, network_info):
+ """Prepare all the images for this instance"""
+ tftp_image_info = get_tftp_image_info(instance)
+ self._cache_tftp_images(context, instance, tftp_image_info)
+
+ self._cache_image(context, instance, image_meta)
+ self._inject_into_image(context, node, instance, network_info,
+ injected_files, admin_password)
+
+ def destroy_images(self, context, node, instance):
+ """Delete instance's image file"""
+ bm_utils.unlink_without_raise(get_image_file_path(instance))
+ bm_utils.unlink_without_raise(get_image_dir_path(instance))
+
+ def activate_bootloader(self, context, node, instance):
+ """Configure PXE boot loader for an instance
+
+ Kernel and ramdisk images are downloaded by cache_tftp_images,
+ and stored in /tftpboot/{uuid}/
+
+ This method writes the instances config file, and then creates
+ symlinks for each MAC address in the instance.
+
+ By default, the complete layout looks like this:
+
+ /tftpboot/
+ ./{uuid}/
+ kernel
+ ramdisk
+ deploy_kernel
+ deploy_ramdisk
+ config
+ ./pxelinux.cfg/
+ {mac} -> ../{uuid}/config
+
+ """
+ image_info = get_tftp_image_info(instance)
+ (root_mb, swap_mb) = get_partition_sizes(instance)
+ pxe_config_file_path = get_pxe_config_file_path(instance)
+ image_file_path = get_image_file_path(instance)
+
+ deployment_key = bm_utils.random_alnum(32)
+ deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
+ deployment_id = db.bm_deployment_create(
+ context,
+ deployment_key,
+ image_file_path,
+ pxe_config_file_path,
+ root_mb,
+ swap_mb
+ )
+ pxe_config = build_pxe_config(
+ deployment_id,
+ deployment_key,
+ deployment_iscsi_iqn,
+ image_info['deploy_kernel'][1],
+ image_info['deploy_ramdisk'][1],
+ image_info['kernel'][1],
+ image_info['ramdisk'][1],
+ )
+ bm_utils.write_to_file(pxe_config_file_path, pxe_config)
+
+ macs = self._collect_mac_addresses(context, node)
+ for mac in macs:
+ mac_path = get_pxe_mac_path(mac)
+ bm_utils.unlink_without_raise(mac_path)
+ bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
+
+ def deactivate_bootloader(self, context, node, instance):
+ """Delete PXE bootloader images and config"""
+ try:
+ image_info = get_tftp_image_info(instance)
+ except exception.NovaException:
+ pass
+ else:
+ for label in image_info.keys():
+ (uuid, path) = image_info[label]
+ bm_utils.unlink_without_raise(path)
+
+ bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
+ try:
+ macs = self._collect_mac_addresses(context, node)
+ except exception.DBError:
+ pass
+ else:
+ for mac in macs:
+ bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
+
+ bm_utils.unlink_without_raise(
+ os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
+
+ def activate_node(self, context, node, instance):
+ pass
+
+ def deactivate_node(self, context, node, instance):
+ pass
diff --git a/nova/virt/baremetal/pxe_config.template b/nova/virt/baremetal/pxe_config.template
new file mode 100644
index 000000000..f2fcc9b14
--- /dev/null
+++ b/nova/virt/baremetal/pxe_config.template
@@ -0,0 +1,11 @@
+default deploy
+
+label deploy
+kernel ${pxe_options.deployment_aki_path}
+append initrd=${pxe_options.deployment_ari_path} selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=${pxe_options.deployment_iscsi_iqn} deployment_id=${pxe_options.deployment_id} deployment_key=${pxe_options.deployment_key} ${pxe_options.pxe_append_params}
+ipappend 3
+
+
+label boot
+kernel ${pxe_options.aki_path}
+append initrd=${pxe_options.ari_path} root=${ROOT} ro ${pxe_options.pxe_append_params}
diff --git a/nova/virt/baremetal/utils.py b/nova/virt/baremetal/utils.py
index 902dda9e8..0842ae201 100644
--- a/nova/virt/baremetal/utils.py
+++ b/nova/virt/baremetal/utils.py
@@ -58,3 +58,10 @@ def create_link_without_raise(source, link):
except OSError:
LOG.exception(_("Failed to create symlink from %(source)s to %(link)s")
% locals())
+
+
+def random_alnum(count):
+ import random
+ import string
+ chars = string.ascii_uppercase + string.digits
+ return "".join(random.choice(chars) for _ in range(count))
diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py
index 66c6849d4..acea8afdf 100644
--- a/nova/virt/disk/vfs/guestfs.py
+++ b/nova/virt/disk/vfs/guestfs.py
@@ -101,6 +101,7 @@ class VFSGuestFS(vfs.VFS):
self.handle.aug_init("/", 0)
except RuntimeError, e:
+ # dereference object and implicitly close()
self.handle = None
raise exception.NovaException(
_("Error mounting %(imgfile)s with libguestfs (%(e)s)") %
@@ -111,19 +112,31 @@ class VFSGuestFS(vfs.VFS):
def teardown(self):
LOG.debug(_("Tearing down appliance"))
+
try:
- self.handle.aug_close()
- except Exception, e:
- LOG.debug(_("Failed to close augeas %s"), e)
- try:
- self.handle.shutdown()
- except Exception, e:
- LOG.debug(_("Failed to shutdown appliance %s"), e)
- try:
- self.handle.close()
- except Exception, e:
- LOG.debug(_("Failed to close guest handle %s"), e)
- self.handle = None
+ try:
+ self.handle.aug_close()
+ except RuntimeError, e:
+ LOG.warn(_("Failed to close augeas %s"), e)
+
+ try:
+ self.handle.shutdown()
+ except AttributeError:
+ # Older libguestfs versions haven't an explicit shutdown
+ pass
+ except RuntimeError, e:
+ LOG.warn(_("Failed to shutdown appliance %s"), e)
+
+ try:
+ self.handle.close()
+ except AttributeError:
+ # Older libguestfs versions haven't an explicit close
+ pass
+ except RuntimeError, e:
+ LOG.warn(_("Failed to close guest handle %s"), e)
+ finally:
+ # dereference object and implicitly close()
+ self.handle = None
@staticmethod
def _canonicalize_path(path):
@@ -157,7 +170,7 @@ class VFSGuestFS(vfs.VFS):
try:
self.handle.stat(path)
return True
- except Exception, e:
+ except RuntimeError:
return False
def set_permissions(self, path, mode):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 9291ac6f8..7d627e80c 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -280,7 +280,7 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
- def snapshot(self, context, instance, image_id):
+ def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshots the specified instance.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index c9cd41680..5d3b3c926 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -26,6 +26,7 @@ semantics of real hypervisor connections.
"""
from nova.compute import power_state
+from nova.compute import task_states
from nova import db
from nova import exception
from nova.openstack.common import log as logging
@@ -122,9 +123,10 @@ class FakeDriver(driver.ComputeDriver):
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
- def snapshot(self, context, instance, name):
+ def snapshot(self, context, instance, name, update_task_state):
if not instance['name'] in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
+ update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 4359b1007..2b57ba0b1 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -128,8 +128,8 @@ class HyperVDriver(driver.ComputeDriver):
def host_power_action(self, host, action):
return self._hostops.host_power_action(host, action)
- def snapshot(self, context, instance, name):
- self._snapshotops.snapshot(context, instance, name)
+ def snapshot(self, context, instance, name, update_task_state):
+ self._snapshotops.snapshot(context, instance, name, update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index 5dc19ebb1..cdc6e45a4 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -22,6 +22,7 @@ import os
import shutil
import sys
+from nova.compute import task_states
from nova import exception
from nova.image import glance
from nova.openstack.common import cfg
@@ -45,7 +46,7 @@ class SnapshotOps(baseops.BaseOps):
super(SnapshotOps, self).__init__()
self._vmutils = vmutils.VMUtils()
- def snapshot(self, context, instance, name):
+ def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
vm = self._vmutils.lookup(self._conn, instance_name)
@@ -70,6 +71,8 @@ class SnapshotOps(baseops.BaseOps):
raise vmutils.HyperVException(
_('Failed to create snapshot for VM %s') %
instance_name)
+ else:
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_folder = None
f = None
@@ -164,6 +167,8 @@ class SnapshotOps(baseops.BaseOps):
_("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s"),
locals())
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
glance_image_service.update(context, image_id, image_metadata, f)
LOG.debug(_("Snapshot image %(image_id)s updated for VM "
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 7b338353d..e3d95c62e 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -57,6 +57,7 @@ from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
@@ -512,7 +513,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.LoopingCall(_wait_for_destroy)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_destroy)
timer.start(interval=0.5).wait()
def destroy(self, instance, network_info, block_device_info=None):
@@ -736,7 +737,7 @@ class LibvirtDriver(driver.ComputeDriver):
mount_device)
@exception.wrap_exception()
- def snapshot(self, context, instance, image_href):
+ def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
@@ -804,6 +805,7 @@ class LibvirtDriver(driver.ComputeDriver):
image_type=source_format)
snapshot.create()
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
# Export the snapshot to a raw image
snapshot_directory = CONF.libvirt_snapshots_directory
@@ -821,6 +823,9 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain(domain=virt_dom)
# Upload that image to the image service
+
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
@@ -872,8 +877,9 @@ class LibvirtDriver(driver.ComputeDriver):
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
- self._create_domain(domain=dom, inst_name=instance['name'])
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ self._create_domain(domain=dom)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
return True
greenthread.sleep(1)
@@ -910,7 +916,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.LoopingCall(_wait_for_reboot)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
@exception.wrap_exception()
@@ -934,8 +940,9 @@ class LibvirtDriver(driver.ComputeDriver):
def power_on(self, instance):
"""Power on the specified instance"""
dom = self._lookup_by_name(instance['name'])
- self._create_domain(domain=dom, inst_name=instance['name'])
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ self._create_domain(domain=dom)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
@exception.wrap_exception()
@@ -1047,7 +1054,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.LoopingCall(_wait_for_boot)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
@@ -1209,8 +1216,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _get_console_log_path(instance_name):
- return os.path.join(CONF.instances_path, instance_name,
- 'console.log')
+ return os.path.join(CONF.instances_path, instance_name, 'console.log')
def _chown_console_log_for_instance(self, instance_name):
console_log = self._get_console_log_path(instance_name)
@@ -1246,7 +1252,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._chown_console_log_for_instance(instance['name'])
# NOTE(vish): No need add the suffix to console.log
- libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
+ libvirt_utils.write_to_file(
+ self._get_console_log_path(instance['name']), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
@@ -1787,9 +1794,8 @@ class LibvirtDriver(driver.ComputeDriver):
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
- consolelog.source_path = os.path.join(CONF.instances_path,
- instance['name'],
- "console.log")
+ consolelog.source_path = self._get_console_log_path(
+ instance['name'])
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
@@ -2199,7 +2205,7 @@ class LibvirtDriver(driver.ComputeDriver):
if vol_stats:
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
vol_usage.append(dict(volume=bdm['volume_id'],
- instance_id=instance['id'],
+ instance=instance,
rd_req=rd_req,
rd_bytes=rd_bytes,
wr_req=wr_req,
@@ -2560,7 +2566,7 @@ class LibvirtDriver(driver.ComputeDriver):
recover_method(ctxt, instance_ref, dest, block_migration)
# Waiting for completion of live_migration.
- timer = utils.LoopingCall(f=None)
+ timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion"""
@@ -2615,7 +2621,7 @@ class LibvirtDriver(driver.ComputeDriver):
os.mkdir(instance_dir)
# Touch the console.log file, required by libvirt.
- console_file = os.path.join(instance_dir, 'console.log')
+ console_file = self._get_console_log_path(instance_ref['name'])
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
@@ -2969,7 +2975,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
@exception.wrap_exception()
@@ -2987,7 +2994,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 51df7a38a..50fc3e922 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -127,9 +127,9 @@ class VMWareESXDriver(driver.ComputeDriver):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
- def snapshot(self, context, instance, name):
+ def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
- self._vmops.snapshot(context, instance, name)
+ self._vmops.snapshot(context, instance, name, update_task_state)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
@@ -354,8 +354,8 @@ class VMWareAPISession(object):
The task is polled until it completes.
"""
done = event.Event()
- loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
- done)
+ loop = utils.FixedIntervalLoopingCall(self._poll_task, instance_uuid,
+ task_ref, done)
loop.start(CONF.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 5d2685b18..b5b5d1fff 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,6 +27,7 @@ import urllib2
import uuid
from nova.compute import power_state
+from nova.compute import task_states
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -333,7 +334,7 @@ class VMWareVMOps(object):
LOG.debug(_("Powered on the VM instance"), instance=instance)
_power_on_vm()
- def snapshot(self, context, instance, snapshot_name):
+ def snapshot(self, context, instance, snapshot_name, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
@@ -390,6 +391,7 @@ class VMWareVMOps(object):
instance=instance)
_create_vm_snapshot()
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
@@ -468,6 +470,8 @@ class VMWareVMOps(object):
LOG.debug(_("Uploaded image %s") % snapshot_name,
instance=instance)
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 10bc99fef..d3047d364 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -188,9 +188,9 @@ class XenAPIDriver(driver.ComputeDriver):
network_info, image_meta, resize_instance,
block_device_info)
- def snapshot(self, context, instance, image_id):
+ def snapshot(self, context, instance, image_id, update_task_state):
""" Create snapshot from a running VM instance """
- self._vmops.snapshot(context, instance, image_id)
+ self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ee36cea0b..adb43a743 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -36,6 +36,7 @@ from eventlet import greenthread
from nova import block_device
from nova.compute import power_state
+from nova.compute import task_states
from nova import exception
from nova.image import glance
from nova.openstack.common import cfg
@@ -604,7 +605,11 @@ def get_vdi_for_vm_safely(session, vm_ref):
@contextlib.contextmanager
-def snapshot_attached_here(session, instance, vm_ref, label):
+def snapshot_attached_here(session, instance, vm_ref, label, *args):
+ update_task_state = None
+ if len(args) == 1:
+ update_task_state = args[0]
+
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
@@ -616,6 +621,8 @@ def snapshot_attached_here(session, instance, vm_ref, label):
sr_ref = vm_vdi_rec["SR"]
snapshot_ref = session.call_xenapi("VDI.snapshot", vm_vdi_ref, {})
+ if update_task_state is not None:
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
snapshot_rec = session.call_xenapi("VDI.get_record", snapshot_ref)
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index a96a90f0e..fbf3e0599 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -28,6 +28,7 @@ import netaddr
from nova.compute import api as compute
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
@@ -626,7 +627,7 @@ class VMOps(object):
vm,
"start")
- def snapshot(self, context, instance, image_id):
+ def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
@@ -654,7 +655,10 @@ class VMOps(object):
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
- self._session, instance, vm_ref, label) as vdi_uuids:
+ self._session, instance, vm_ref, label,
+ update_task_state) as vdi_uuids:
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
vm_utils.upload_image(
context, self._session, instance, vdi_uuids, image_id)