summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/source/devref/scheduler.rst10
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/common/deprecated.py55
-rw-r--r--nova/compute/api.py12
-rw-r--r--nova/compute/manager.py129
-rw-r--r--nova/compute/resource_tracker.py2
-rw-r--r--nova/compute/rpcapi.py13
-rw-r--r--nova/compute/task_states.py12
-rw-r--r--nova/db/sqlalchemy/api.py4
-rw-r--r--nova/exception.py4
-rw-r--r--nova/network/manager.py4
-rw-r--r--nova/network/quantumv2/api.py6
-rw-r--r--nova/openstack/common/log.py18
-rw-r--r--nova/scheduler/chance.py8
-rw-r--r--nova/scheduler/driver.py5
-rw-r--r--nova/scheduler/filter_scheduler.py7
-rw-r--r--nova/scheduler/manager.py14
-rw-r--r--nova/scheduler/multi.py15
-rw-r--r--nova/scheduler/rpcapi.py8
-rw-r--r--nova/scheduler/simple.py97
-rw-r--r--nova/tests/baremetal/test_proxy_bare_metal.py2
-rw-r--r--nova/tests/compute/test_compute.py34
-rw-r--r--nova/tests/compute/test_resource_tracker.py10
-rw-r--r--nova/tests/compute/test_rpcapi.py8
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/integrated/integrated_helpers.py2
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py21
-rw-r--r--nova/tests/scheduler/test_rpcapi.py6
-rw-r--r--nova/tests/test_deprecated.py46
-rw-r--r--nova/tests/test_hypervapi.py2
-rw-r--r--nova/tests/test_libvirt.py163
-rw-r--r--nova/tests/test_powervm.py2
-rw-r--r--nova/tests/test_virt_drivers.py35
-rw-r--r--nova/tests/test_vmwareapi.py2
-rw-r--r--nova/tests/test_xenapi.py97
-rw-r--r--nova/virt/baremetal/driver.py11
-rw-r--r--nova/virt/driver.py11
-rw-r--r--nova/virt/fake.py23
-rw-r--r--nova/virt/hyperv/driver.py4
-rw-r--r--nova/virt/libvirt/driver.py18
-rw-r--r--nova/virt/powervm/driver.py5
-rw-r--r--nova/virt/virtapi.py44
-rw-r--r--nova/virt/vmwareapi/driver.py4
-rw-r--r--nova/virt/xenapi/driver.py16
-rw-r--r--nova/virt/xenapi/host.py17
-rw-r--r--nova/virt/xenapi/vmops.py42
-rw-r--r--smoketests/run_tests.py6
48 files changed, 514 insertions, 546 deletions
diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst
index 066781514..6f0b8edf5 100644
--- a/doc/source/devref/scheduler.rst
+++ b/doc/source/devref/scheduler.rst
@@ -48,16 +48,6 @@ The :mod:`nova.scheduler.chance` Driver
:show-inheritance:
-The :mod:`nova.scheduler.simple` Driver
----------------------------------------
-
-.. automodule:: nova.scheduler.simple
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
Tests
-----
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 0d57e02a9..3970974c0 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -124,4 +124,4 @@ auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
-signing_dirname = /tmp/keystone-signing-nova
+signing_dir = /tmp/keystone-signing-nova
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 20e68030f..3446b5a8f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -457,7 +457,7 @@ class CloudController(object):
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
- source_group = db.security_group_get(context, rule.group_id)
+ source_group = rule.grantee_group
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.project_id}]
if rule.protocol:
diff --git a/nova/common/deprecated.py b/nova/common/deprecated.py
deleted file mode 100644
index 6b1c587e8..000000000
--- a/nova/common/deprecated.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 IBM
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import warnings
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-deprecate_opts = [
- cfg.BoolOpt('fatal_deprecations',
- default=False,
- help='make deprecations fatal')
- ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(deprecate_opts)
-
-
-def _showwarning(message, category, filename, lineno, file=None, line=None):
- """
- Redirect warnings into logging.
- """
- LOG.warn(str(message))
-
-
-# Install our warnings handler
-warnings.showwarning = _showwarning
-
-
-def warn(msg=""):
- """
- Warn of a deprecated config option that an operator has specified.
- This should be added in the code where we've made a change in how
- we use some operator changeable parameter to indicate that it will
- go away in a future version of OpenStack.
- """
- warnings.warn(_("Deprecated Config: %s") % msg)
- if FLAGS.fatal_deprecations:
- raise exception.DeprecatedConfig(msg=msg)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 9e5971862..1bbcdbda9 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -837,11 +837,11 @@ class API(base.Base):
# no daemon to reclaim, so delete it immediately.
if instance['host']:
instance = self.update(context, instance,
- task_state=task_states.POWERING_OFF,
+ task_state=task_states.SOFT_DELETING,
expected_task_state=None,
deleted_at=timeutils.utcnow())
- self.compute_rpcapi.power_off_instance(context, instance)
+ self.compute_rpcapi.soft_delete_instance(context, instance)
else:
LOG.warning(_('No host for instance, deleting immediately'),
instance=instance)
@@ -1001,10 +1001,10 @@ class API(base.Base):
"""Restore a previously deleted (but not reclaimed) instance."""
if instance['host']:
instance = self.update(context, instance,
- task_state=task_states.POWERING_ON,
+ task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
- self.compute_rpcapi.power_on_instance(context, instance)
+ self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
@@ -1030,7 +1030,7 @@ class API(base.Base):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance = self.update(context, instance,
- task_state=task_states.STOPPING,
+ task_state=task_states.POWERING_OFF,
expected_task_state=None,
progress=0)
@@ -1044,7 +1044,7 @@ class API(base.Base):
LOG.debug(_("Going to try to start instance"), instance=instance)
instance = self.update(context, instance,
- task_state=task_states.STARTING,
+ task_state=task_states.POWERING_ON,
expected_task_state=None)
# TODO(yamahata): injected_files isn't supported right now.
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 7b2dc4d1d..39c3faddf 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -73,6 +73,7 @@ from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
+from nova.virt import virtapi
from nova import volume
@@ -209,10 +210,27 @@ def _get_image_meta(context, image_ref):
return image_service.show(context, image_id)
+class ComputeVirtAPI(virtapi.VirtAPI):
+ def __init__(self, compute):
+ super(ComputeVirtAPI, self).__init__()
+ self._compute = compute
+
+ def instance_update(self, context, instance_uuid, updates):
+ return self._compute.db.instance_update_and_get_original(context,
+ instance_uuid,
+ updates)
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return self._compute.db.instance_get_by_uuid(context, instance_uuid)
+
+ def instance_get_all_by_host(self, context, host):
+ return self._compute.db.instance_get_all_by_host(context, host)
+
+
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.10'
+ RPC_API_VERSION = '2.11'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -225,10 +243,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
+ self.virtapi = ComputeVirtAPI(self)
+
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
self.driver = utils.check_isinstance(
- importutils.import_object_ns('nova.virt', compute_driver),
+ importutils.import_object_ns('nova.virt', compute_driver,
+ self.virtapi),
driver.ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
@@ -979,48 +1000,34 @@ class ComputeManager(manager.SchedulerDependentManager):
do_terminate_instance(instance, bdms)
+ # NOTE(johannes): This is probably better named power_off_instance
+ # so it matches the driver method, but because of other issues, we
+ # can't use that name in grizzly.
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def stop_instance(self, context, instance):
- """Stopping an instance on this host.
-
- Alias for power_off_instance for compatibility"""
- self.power_off_instance(context, instance=instance,
- final_state=vm_states.STOPPED)
-
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @reverts_task_state
- @wrap_instance_fault
- def start_instance(self, context, instance):
- """Starting an instance on this host.
-
- Alias for power_on_instance for compatibility"""
- self.power_on_instance(context, instance=instance)
-
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @reverts_task_state
- @wrap_instance_fault
- def power_off_instance(self, context, instance,
- final_state=vm_states.SOFT_DELETED):
- """Power off an instance on this host."""
+ """Stopping an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
- vm_state=final_state,
+ vm_state=vm_states.STOPPED,
expected_task_state=(task_states.POWERING_OFF,
task_states.STOPPING),
task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
+ # NOTE(johannes): This is probably better named power_on_instance
+ # so it matches the driver method, but because of other issues, we
+ # can't use that name in grizzly.
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def power_on_instance(self, context, instance):
- """Power on an instance on this host."""
+ def start_instance(self, context, instance):
+ """Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
@@ -1036,6 +1043,71 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
+ def soft_delete_instance(self, context, instance):
+ """Soft delete an instance on this host."""
+ self._notify_about_instance_usage(context, instance,
+ "soft_delete.start")
+ try:
+ self.driver.soft_delete(instance)
+ except NotImplementedError:
+ # Fallback to just powering off the instance if the hypervisor
+ # doesn't implement the soft_delete method
+ self.driver.power_off(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
+ self._notify_about_instance_usage(context, instance, "soft_delete.end")
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
+ def restore_instance(self, context, instance):
+ """Restore a soft-deleted instance on this host."""
+ self._notify_about_instance_usage(context, instance, "restore.start")
+ try:
+ self.driver.restore(instance)
+ except NotImplementedError:
+ # Fallback to just powering on the instance if the hypervisor
+ # doesn't implement the restore method
+ self.driver.power_on(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ expected_task_state=task_states.RESTORING,
+ task_state=None)
+ self._notify_about_instance_usage(context, instance, "restore.end")
+
+ # NOTE(johannes): In the folsom release, power_off_instance was poorly
+ # named. It was the main entry point to soft delete an instance. That
+ # has been changed to soft_delete_instance now, but power_off_instance
+ # will need to stick around for compatibility in grizzly.
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
+ def power_off_instance(self, context, instance):
+ """Power off an instance on this host."""
+ self.soft_delete_instance(context, instance)
+
+ # NOTE(johannes): In the folsom release, power_on_instance was poorly
+ # named. It was the main entry point to restore a soft deleted instance.
+ # That has been changed to restore_instance now, but power_on_instance
+ # will need to stick around for compatibility in grizzly.
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
+ def power_on_instance(self, context, instance):
+ """Power on an instance on this host."""
+ self.restore_instance(context, instance)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
+ @wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None):
"""Destroy and re-make this instance.
@@ -2078,7 +2150,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def reserve_block_device_name(self, context, instance, device, volume_id):
+ def reserve_block_device_name(self, context, instance, device,
+ volume_id=None):
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_reserve():
@@ -2087,7 +2160,7 @@ class ComputeManager(manager.SchedulerDependentManager):
device)
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
- 'volume_id': volume_id,
+ 'volume_id': volume_id or 'reserved',
'device_name': result}
self.db.block_device_mapping_create(context, values)
return result
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 7acaa3dc1..5e3d745bb 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -474,7 +474,7 @@ class ResourceTracker(object):
self.tracked_instances[uuid] = 1
sign = 1
- if instance['vm_state'] == vm_states.DELETED:
+ if is_deleted_instance:
self.tracked_instances.pop(uuid)
sign = -1
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index c6ad3383e..e42e025dc 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -138,6 +138,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.8 - Remove migration_id, add migration to finish_resize
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
+ 2.11 - Adds soft_delete_instance() and restore_instance()
'''
#
@@ -539,6 +540,18 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
+ def soft_delete_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('soft_delete_instance',
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance))
+
+ def restore_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('restore_instance',
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance))
+
class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the security group rpc API.
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index d4df75e60..c2966d554 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -68,16 +68,18 @@ SUSPENDING = 'suspending'
# possible task states during resume()
RESUMING = 'resuming'
+# NOTE(johannes): STOPPING and STARTING need to stick around for the
+# grizzly release for compatibility, but can be removed afterwards.
# possible task states during stop()
STOPPING = 'stopping'
# possible task states during start()
STARTING = 'starting'
-# possible task states during soft_delete()
+# possible task states during power_off()
POWERING_OFF = 'powering-off'
-# possible task states during restore()
+# possible task states during power_on()
POWERING_ON = 'powering-on'
# possible task states during rescue()
@@ -96,3 +98,9 @@ MIGRATING = "migrating"
# possible task states during delete()
DELETING = 'deleting'
+
+# possible task states during soft_delete()
+SOFT_DELETING = 'soft-deleting'
+
+# possible task states during restore()
+RESTORING = 'restoring'
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index f5f7f3b15..a00895c57 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -3423,7 +3423,7 @@ def _security_group_get_query(context, session=None, read_deleted=None,
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
- query = query.options(joinedload_all('rules'))
+ query = query.options(joinedload_all('rules.grantee_group'))
return query
@@ -3480,7 +3480,7 @@ def security_group_get_by_name(context, project_id, group_name,
filter_by(name=group_name)
if columns_to_join is None:
- columns_to_join = ['instances', 'rules']
+ columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
diff --git a/nova/exception.py b/nova/exception.py
index ab9265f5d..c3e1fe39e 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -175,10 +175,6 @@ class DBError(NovaException):
super(DBError, self).__init__(str(inner_exception))
-class DeprecatedConfig(NovaException):
- message = _("Fatal call to deprecated config %(msg)s")
-
-
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text")
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a944ebd40..afe79a664 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -589,7 +589,7 @@ class FloatingIP(object):
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
service = self.db.service_get_by_host_and_topic(
- context, instance['host'], 'network')
+ context.elevated(), instance['host'], 'network')
if service and utils.service_is_up(service):
host = instance['host']
else:
@@ -840,7 +840,7 @@ class NetworkManager(manager.SchedulerDependentManager):
The one at a time part is to flatten the layout to help scale
"""
- RPC_API_VERSION = '1.1'
+ RPC_API_VERSION = '1.2'
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 241bb1a2d..e8d672835 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -508,15 +508,13 @@ class API(base.Base):
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
- def migrate_instance_start(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance"""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
- def migrate_instance_finish(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance"""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index 35c7972c8..67a06a7af 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -76,6 +76,9 @@ log_opts = [
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
+ cfg.BoolOpt('fatal_deprecations',
+ default=False,
+ help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
@@ -170,6 +173,14 @@ class ContextAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
+ def deprecated(self, msg, *args, **kwargs):
+ stdmsg = _("Deprecated Config: %s") % msg
+ if CONF.fatal_deprecations:
+ self.critical(stdmsg, *args, **kwargs)
+ raise DeprecatedConfig(msg=stdmsg)
+ else:
+ self.warn(stdmsg, *args, **kwargs)
+
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
@@ -450,3 +461,10 @@ class ColorHandler(logging.StreamHandler):
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
+
+
+class DeprecatedConfig(Exception):
+ message = _("Fatal call to deprecated config: %(msg)s")
+
+ def __init__(self, msg):
+ super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index e0f351a78..6d6288d83 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -92,11 +92,3 @@ class ChanceScheduler(driver.Scheduler):
filter_properties)
self.compute_rpcapi.prep_resize(context, image, instance,
instance_type, host, reservations)
-
- def schedule_create_volume(self, context, volume_id, snapshot_id,
- image_id):
- """Picks a host that is up at random."""
- host = self._schedule(context, FLAGS.volume_topic, None, {})
- driver.cast_to_host(context, FLAGS.volume_topic, host, 'create_volume',
- volume_id=volume_id, snapshot_id=snapshot_id,
- image_id=image_id)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index cba1ed935..012ad09ed 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -181,11 +181,6 @@ class Scheduler(object):
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
- def schedule_create_volume(self, context, volume_id, snapshot_id,
- image_id):
- msg = _("Driver must implement schedule_create_volune")
- raise NotImplementedError(msg)
-
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 4bddb949b..78bd49a96 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -42,13 +42,6 @@ class FilterScheduler(driver.Scheduler):
self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
- def schedule_create_volume(self, context, volume_id, snapshot_id, image_id,
- reservations):
- # NOTE: We're only focused on compute instances right now,
- # so this method will always raise NoValidHost().
- msg = _("No host selection for %s defined.") % FLAGS.volume_topic
- raise exception.NoValidHost(reason=msg)
-
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 0703e030a..531c54726 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -43,7 +43,7 @@ from nova import quota
LOG = logging.getLogger(__name__)
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
- default='nova.scheduler.multi.MultiScheduler',
+ default='nova.scheduler.filter_scheduler.FilterScheduler',
help='Default driver to use for the scheduler')
FLAGS = flags.FLAGS
@@ -55,7 +55,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
- RPC_API_VERSION = '2.2'
+ RPC_API_VERSION = '2.3'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@@ -80,14 +80,8 @@ class SchedulerManager(manager.Manager):
def create_volume(self, context, volume_id, snapshot_id,
reservations=None, image_id=None):
- try:
- self.driver.schedule_create_volume(
- context, volume_id, snapshot_id, image_id)
- except Exception as ex:
- with excutils.save_and_reraise_exception():
- LOG.warning(_("Failed to schedule create_volume: %(ex)s") %
- locals())
- db.volume_update(context, volume_id, {'status': 'error'})
+ #function removed in RPC API 2.3
+ pass
def live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index c589d6276..13e3c0e1a 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -19,6 +19,12 @@
"""
Scheduler that allows routing some calls to one driver and others to another.
+
+This scheduler was originally used to deal with both compute and volume. But
+is now used for openstack extensions that want to use the nova-scheduler to
+schedule requests to compute nodes but provide their own manager and topic.
+
+https://bugs.launchpad.net/nova/+bug/1009681
"""
from nova import flags
@@ -32,9 +38,6 @@ multi_scheduler_opts = [
default='nova.scheduler.'
'filter_scheduler.FilterScheduler',
help='Driver to use for scheduling compute calls'),
- cfg.StrOpt('volume_scheduler_driver',
- default='nova.scheduler.chance.ChanceScheduler',
- help='Driver to use for scheduling volume calls'),
cfg.StrOpt('default_scheduler_driver',
default='nova.scheduler.chance.ChanceScheduler',
help='Default driver to use for scheduling calls'),
@@ -56,13 +59,10 @@ class MultiScheduler(driver.Scheduler):
super(MultiScheduler, self).__init__()
compute_driver = importutils.import_object(
FLAGS.compute_scheduler_driver)
- volume_driver = importutils.import_object(
- FLAGS.volume_scheduler_driver)
default_driver = importutils.import_object(
FLAGS.default_scheduler_driver)
self.drivers = {'compute': compute_driver,
- 'volume': volume_driver,
'default': default_driver}
def schedule_run_instance(self, *args, **kwargs):
@@ -71,9 +71,6 @@ class MultiScheduler(driver.Scheduler):
def schedule_prep_resize(self, *args, **kwargs):
return self.drivers['compute'].schedule_prep_resize(*args, **kwargs)
- def schedule_create_volume(self, *args, **kwargs):
- return self.drivers['volume'].schedule_create_volume(*args, **kwargs)
-
def update_service_capabilities(self, service_name, host, capabilities):
# Multi scheduler is only a holder of sub-schedulers, so
# pass the capabilities to the schedulers that matter
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index 2c280be44..b41668733 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -46,6 +46,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.0 - Remove 1.x backwards compat
2.1 - Add image_id to create_volume()
2.2 - Remove reservations argument to create_volume()
+ 2.3 - Remove create_volume()
'''
#
@@ -95,13 +96,6 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
disk_over_commit=disk_over_commit, instance=instance_p,
dest=dest))
- def create_volume(self, ctxt, volume_id, snapshot_id, image_id):
- self.cast(ctxt,
- self.make_msg('create_volume',
- volume_id=volume_id, snapshot_id=snapshot_id,
- image_id=image_id),
- version='2.2')
-
def update_service_capabilities(self, ctxt, service_name, host,
capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
deleted file mode 100644
index 48e5ea37d..000000000
--- a/nova/scheduler/simple.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 OpenStack, LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Simple Scheduler - for Volumes
-
-Note: Deprecated in Folsom. Will be removed along with nova-volumes
-"""
-
-from nova.common import deprecated
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.scheduler import chance
-from nova.scheduler import driver
-from nova import utils
-
-
-simple_scheduler_opts = [
- cfg.IntOpt("max_gigabytes",
- default=10000,
- help="maximum number of volume gigabytes to allow per host"),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(simple_scheduler_opts)
-
-
-class SimpleScheduler(chance.ChanceScheduler):
- """Implements Naive Scheduler that tries to find least loaded host."""
-
- def schedule_run_instance(self, context, request_spec, admin_password,
- injected_files, requested_networks,
- is_first_time, filter_properties):
- deprecated.warn(_('SimpleScheduler now only covers volume scheduling '
- 'and is deprecated in Folsom. Non-volume functionality in '
- 'SimpleScheduler has been replaced by FilterScheduler'))
- super(SimpleScheduler, self).schedule_run_instance(context,
- request_spec, admin_password, injected_files,
- requested_networks, is_first_time, filter_properties)
-
- def schedule_create_volume(self, context, volume_id, snapshot_id,
- image_id):
- """Picks a host that is up and has the fewest volumes."""
- deprecated.warn(_('nova-volume functionality is deprecated in Folsom '
- 'and will be removed in Grizzly. Volumes are now handled '
- 'by Cinder'))
- elevated = context.elevated()
-
- volume_ref = db.volume_get(context, volume_id)
- availability_zone = volume_ref.get('availability_zone')
-
- zone, host = None, None
- if availability_zone:
- zone, _x, host = availability_zone.partition(':')
- if host and context.is_admin:
- service = db.service_get_by_args(elevated, host, 'nova-volume')
- if not utils.service_is_up(service):
- raise exception.WillNotSchedule(host=host)
- driver.cast_to_volume_host(context, host, 'create_volume',
- volume_id=volume_id, snapshot_id=snapshot_id,
- image_id=image_id)
- return None
-
- results = db.service_get_all_volume_sorted(elevated)
- if zone:
- results = [(service, gigs) for (service, gigs) in results
- if service['availability_zone'] == zone]
- for result in results:
- (service, volume_gigabytes) = result
- if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
- msg = _("Not enough allocatable volume gigabytes remaining")
- raise exception.NoValidHost(reason=msg)
- if utils.service_is_up(service) and not service['disabled']:
- driver.cast_to_volume_host(context, service['host'],
- 'create_volume', volume_id=volume_id,
- snapshot_id=snapshot_id, image_id=image_id)
- return None
- msg = _("Is the appropriate service running?")
- raise exception.NoValidHost(reason=msg)
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
index 4fe6722c3..e9184ee5d 100644
--- a/nova/tests/baremetal/test_proxy_bare_metal.py
+++ b/nova/tests/baremetal/test_proxy_bare_metal.py
@@ -257,7 +257,7 @@ class BareMetalTestCase(test.TestCase):
self.mox.ReplayAll()
# Code under test
- conn = driver.BareMetalDriver(True)
+ conn = driver.BareMetalDriver(None, True)
# TODO(mikalstill): this is not a very good fake instance
info = conn.get_info({'name': 'instance-00000001'})
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 8a5ced502..676f7c159 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -754,7 +754,7 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STOPPING})
+ {"task_state": task_states.POWERING_OFF})
self.compute.stop_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
@@ -763,10 +763,10 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STOPPING})
+ {"task_state": task_states.POWERING_OFF})
self.compute.stop_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STARTING})
+ {"task_state": task_states.POWERING_ON})
self.compute.start_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
@@ -818,7 +818,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_ON})
- self.compute.power_on_instance(self.context, instance=instance)
+ self.compute.start_instance(self.context, instance=instance)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, instance=instance)
@@ -837,7 +837,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
- self.compute.power_off_instance(self.context, instance=instance)
+ self.compute.stop_instance(self.context, instance=instance)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, instance=instance)
@@ -1590,12 +1590,14 @@ class ComputeTestCase(BaseTestCase):
"""ensure that task_state is reverted after a failed operation"""
actions = [
("reboot_instance", task_states.REBOOTING),
- ("stop_instance", task_states.STOPPING),
- ("start_instance", task_states.STARTING),
+ ("stop_instance", task_states.POWERING_OFF),
+ ("start_instance", task_states.POWERING_ON),
("terminate_instance", task_states.DELETING,
task_states.DELETING),
("power_off_instance", task_states.POWERING_OFF),
("power_on_instance", task_states.POWERING_ON),
+ ("soft_delete_instance", task_states.SOFT_DELETING),
+ ("restore_instance", task_states.RESTORING),
("rebuild_instance", task_states.REBUILDING, None,
{'orig_image_ref': None,
'image_ref': None,
@@ -2411,7 +2413,7 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(task_states.STOPPING, instances[0]['task_state'])
+ self.assertEqual(task_states.POWERING_OFF, instances[0]['task_state'])
def test_add_instance_fault(self):
exc_info = None
@@ -3091,7 +3093,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.STOPPING})
+ {"task_state": task_states.POWERING_OFF})
self.compute.stop_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@@ -3100,7 +3102,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.start(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.STARTING)
+ self.assertEqual(instance['task_state'], task_states.POWERING_ON)
db.instance_destroy(self.context, instance['uuid'])
@@ -3115,7 +3117,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.stop(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.STOPPING)
+ self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
db.instance_destroy(self.context, instance['uuid'])
@@ -3147,7 +3149,7 @@ class ComputeAPITestCase(BaseTestCase):
None)
start_check_state(instance['uuid'], power_state.NOSTATE,
- vm_states.STOPPED, task_states.STARTING)
+ vm_states.STOPPED, task_states.POWERING_ON)
db.instance_destroy(self.context, instance['uuid'])
@@ -3264,7 +3266,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+ self.assertEqual(instance['task_state'], task_states.SOFT_DELETING)
db.instance_destroy(self.context, instance['uuid'])
@@ -3291,7 +3293,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+ self.assertEqual(instance['task_state'], task_states.SOFT_DELETING)
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
@@ -3382,7 +3384,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+ self.assertEqual(instance['task_state'], task_states.SOFT_DELETING)
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
@@ -3392,7 +3394,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.restore(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.POWERING_ON)
+ self.assertEqual(instance['task_state'], task_states.RESTORING)
db.instance_destroy(self.context, instance['uuid'])
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 6276c47ac..dfeebf0d0 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -42,7 +42,7 @@ class UnsupportedVirtDriver(driver.ComputeDriver):
class FakeVirtDriver(driver.ComputeDriver):
- def __init__(self):
+ def __init__(self, virtapi):
self.memory_mb = 5
self.local_gb = 6
self.vcpus = 1
@@ -148,9 +148,9 @@ class BaseTestCase(test.TestCase):
host = "fakehost"
if unsupported:
- driver = UnsupportedVirtDriver()
+ driver = UnsupportedVirtDriver(None)
else:
- driver = FakeVirtDriver()
+ driver = FakeVirtDriver(None)
tracker = resource_tracker.ResourceTracker(host, driver)
return tracker
@@ -293,12 +293,12 @@ class ResourceTestCase(BaseTestCase):
self.assertEqual(1, self.tracker.compute_node['current_workload'])
def testFreeRamResourceValue(self):
- driver = FakeVirtDriver()
+ driver = FakeVirtDriver(None)
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def testFreeDiskResourceValue(self):
- driver = FakeVirtDriver()
+ driver = FakeVirtDriver(None)
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 7b3d58909..f94cca857 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -204,6 +204,14 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('power_on_instance', 'cast',
instance=self.fake_instance)
+ def test_soft_delete_instance(self):
+ self._test_compute_api('soft_delete_instance', 'cast',
+ instance=self.fake_instance)
+
+ def test_restore_instance(self):
+ self._test_compute_api('restore_instance', 'cast',
+ instance=self.fake_instance)
+
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance, block_migration='block_migration',
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 70c166aa9..d1c166ba1 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -20,7 +20,7 @@ from nova import flags
FLAGS = flags.FLAGS
-flags.DECLARE('compute_scheduler_driver', 'nova.scheduler.multi')
+flags.DECLARE('scheduler_driver', 'nova.scheduler.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
flags.DECLARE('network_size', 'nova.network.manager')
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index f3bd944da..b1b2c076e 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -69,7 +69,7 @@ class _IntegratedTestBase(test.TestCase):
self.stub_module('crypto', fake_crypto)
nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.flags(compute_scheduler_driver='nova.scheduler.'
+ self.flags(scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
# set up services
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
index 04ab67675..ee9e0bbd3 100644
--- a/nova/tests/scheduler/test_multi_scheduler.py
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -36,14 +36,6 @@ class FakeComputeScheduler(driver.Scheduler):
pass
-class FakeVolumeScheduler(driver.Scheduler):
- is_fake_volume = True
-
- def __init__(self):
- super(FakeVolumeScheduler, self).__init__()
- self.is_update_caps_called = False
-
-
class FakeDefaultScheduler(driver.Scheduler):
is_fake_default = True
@@ -61,18 +53,15 @@ class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
super(MultiDriverTestCase, self).setUp()
base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
compute_cls_name = base_name % 'FakeComputeScheduler'
- volume_cls_name = base_name % 'FakeVolumeScheduler'
default_cls_name = base_name % 'FakeDefaultScheduler'
self.flags(compute_scheduler_driver=compute_cls_name,
- volume_scheduler_driver=volume_cls_name,
default_scheduler_driver=default_cls_name)
self._manager = multi.MultiScheduler()
def test_drivers_inited(self):
mgr = self._manager
- self.assertEqual(len(mgr.drivers), 3)
+ self.assertEqual(len(mgr.drivers), 2)
self.assertTrue(mgr.drivers['compute'].is_fake_compute)
- self.assertTrue(mgr.drivers['volume'].is_fake_volume)
self.assertTrue(mgr.drivers['default'].is_fake_default)
def test_update_service_capabilities(self):
@@ -84,10 +73,8 @@ class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
'update_service_capabilities',
fake_update_service_capabilities)
self.assertFalse(mgr.drivers['compute'].is_update_caps_called)
- self.assertFalse(mgr.drivers['volume'].is_update_caps_called)
mgr.update_service_capabilities('foo_svc', 'foo_host', 'foo_caps')
self.assertTrue(mgr.drivers['compute'].is_update_caps_called)
- self.assertTrue(mgr.drivers['volume'].is_update_caps_called)
class SimpleSchedulerTestCase(MultiDriverTestCase):
@@ -99,10 +86,8 @@ class SimpleSchedulerTestCase(MultiDriverTestCase):
super(SimpleSchedulerTestCase, self).setUp()
base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
compute_cls_name = base_name % 'FakeComputeScheduler'
- volume_cls_name = 'nova.scheduler.simple.SimpleScheduler'
default_cls_name = base_name % 'FakeDefaultScheduler'
self.flags(compute_scheduler_driver=compute_cls_name,
- volume_scheduler_driver=volume_cls_name,
default_scheduler_driver=default_cls_name)
self._manager = multi.MultiScheduler()
@@ -117,11 +102,9 @@ class SimpleSchedulerTestCase(MultiDriverTestCase):
self.assertFalse(mgr.drivers['compute'].is_update_caps_called)
mgr.update_service_capabilities('foo_svc', 'foo_host', 'foo_caps')
self.assertTrue(mgr.drivers['compute'].is_update_caps_called)
- self.assertTrue(mgr.drivers['volume'].is_update_caps_called)
def test_drivers_inited(self):
mgr = self._manager
- self.assertEqual(len(mgr.drivers), 3)
+ self.assertEqual(len(mgr.drivers), 2)
self.assertTrue(mgr.drivers['compute'].is_fake_compute)
- self.assertTrue(mgr.drivers['volume'] is not None)
self.assertTrue(mgr.drivers['default'].is_fake_default)
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index 100812175..8cf741118 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -83,9 +83,3 @@ class SchedulerRpcAPITestCase(test.TestCase):
self._test_scheduler_api('update_service_capabilities',
rpc_method='fanout_cast', service_name='fake_name',
host='fake_host', capabilities='fake_capabilities')
-
- def test_create_volume(self):
- self._test_scheduler_api('create_volume',
- rpc_method='cast', volume_id="fake_volume",
- snapshot_id="fake_snapshots", image_id="fake_image",
- version='2.2')
diff --git a/nova/tests/test_deprecated.py b/nova/tests/test_deprecated.py
deleted file mode 100644
index ebc6fed93..000000000
--- a/nova/tests/test_deprecated.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2010 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.common import deprecated
-from nova import exception
-from nova import test
-
-
-class DeprecatedConfigTestCase(test.TestCase):
- def setUp(self):
- super(DeprecatedConfigTestCase, self).setUp()
- self.logbuffer = ""
-
- def local_log(msg):
- self.logbuffer = msg
-
- self.stubs.Set(deprecated.LOG, 'warn', local_log)
-
- def test_deprecated(self):
- deprecated.warn('test')
- self.assertEqual(self.logbuffer, 'Deprecated Config: test')
-
- def test_deprecated_fatal(self):
- self.flags(fatal_deprecations=True)
- self.assertRaises(exception.DeprecatedConfig,
- deprecated.warn, "test2")
- self.assertEqual(self.logbuffer, 'Deprecated Config: test2')
-
- def test_deprecated_logs_only_once(self):
- deprecated.warn('only once!')
- deprecated.warn('only once!')
- deprecated.warn('only once!')
- self.assertEqual(self.logbuffer, 'Deprecated Config: only once!')
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index e3f9fde4c..6d2396350 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -66,7 +66,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
vswitch_name='external')
self._hypervutils = hypervutils.HyperVUtils()
- self._conn = driver_hyperv.HyperVDriver()
+ self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 5a158c4b2..ea35ff29e 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -48,6 +48,7 @@ import nova.tests.image.fake
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import driver
+from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import config
@@ -147,7 +148,7 @@ class LibvirtVolumeTestCase(test.TestCase):
def get_all_block_devices(self):
return []
- self.fake_conn = FakeLibvirtDriver()
+ self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
@@ -619,7 +620,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(my_ip=ip)
self.flags(host=host)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
@@ -632,7 +633,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertDictMatch(expected, result)
def test_get_guest_config(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -676,7 +677,7 @@ class LibvirtConnTestCase(test.TestCase):
"catchup")
def test_get_guest_config_with_two_nics(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -708,7 +709,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None,
@@ -728,7 +729,7 @@ class LibvirtConnTestCase(test.TestCase):
config.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
@@ -746,7 +747,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_guest_cpu_config_none(self):
self.flags(libvirt_cpu_mode="none")
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -764,7 +765,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -779,7 +780,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_type="uml",
libvirt_cpu_mode=None)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -791,7 +792,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_type="lxc",
libvirt_cpu_mode=None)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conf = conn.get_guest_config(instance_ref,
@@ -806,7 +807,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
@@ -825,7 +826,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
@@ -844,7 +845,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
@@ -863,7 +864,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect, "getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
@@ -894,7 +895,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt_driver.LibvirtDriver,
"get_host_capabilities",
get_host_capabilities_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
@@ -914,7 +915,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
@@ -1038,7 +1039,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
@@ -1054,7 +1055,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# None should be listed, since we fake deleted the last one
self.assertEquals(len(instances), 0)
@@ -1107,7 +1108,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
@@ -1168,7 +1169,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
@@ -1201,7 +1202,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1240,7 +1241,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1279,7 +1280,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1319,7 +1320,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1354,7 +1355,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1390,7 +1391,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1428,7 +1429,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1466,7 +1467,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1499,7 +1500,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -1545,7 +1546,7 @@ class LibvirtConnTestCase(test.TestCase):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
@@ -1555,7 +1556,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, network_info, None, False)
tree = etree.fromstring(xml)
@@ -1572,7 +1573,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri, 'lxc:///')
@@ -1615,7 +1616,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info)
@@ -1651,9 +1652,8 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
- network_info,
- image_meta)
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
+ instance_ref, network_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1663,9 +1663,8 @@ class LibvirtConnTestCase(test.TestCase):
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
- xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
- network_info,
- image_meta)
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
+ instance_ref, network_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1677,7 +1676,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(True).to_xml(
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
instance_ref,
network_info,
image_meta,
@@ -1704,9 +1703,8 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
- network_info,
- image_meta)
+ xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
+ instance_ref, network_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@@ -1818,7 +1816,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri, expected_uri)
@@ -1847,7 +1845,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri, testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
@@ -1879,7 +1877,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
try:
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -1907,7 +1905,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
@@ -1935,7 +1933,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
filename = "file"
@@ -1961,7 +1959,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
self.mox.StubOutWithMock(conn, '_compare_cpu')
@@ -1980,7 +1978,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
conn._cleanup_shared_storage_test_file("file")
@@ -1995,7 +1993,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
@@ -2015,7 +2013,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": True,
"disk_over_commit": False,
'disk_available_mb': 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(True)
@@ -2031,7 +2029,7 @@ class LibvirtConnTestCase(test.TestCase):
"block_migration": False,
"disk_over_commit": False,
'disk_available_mb': 1024}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
@@ -2045,7 +2043,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
@@ -2100,7 +2098,7 @@ class LibvirtConnTestCase(test.TestCase):
#start test
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
@@ -2120,7 +2118,7 @@ class LibvirtConnTestCase(test.TestCase):
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
@@ -2174,7 +2172,7 @@ class LibvirtConnTestCase(test.TestCase):
user_id=None).AndReturn(None)
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson)
@@ -2227,7 +2225,7 @@ class LibvirtConnTestCase(test.TestCase):
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref.name)
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
@@ -2284,7 +2282,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -2343,7 +2341,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.libvirt_utils = fake_libvirt_utils
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
@@ -2388,12 +2386,12 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.libvirt_utils = fake_libvirt_utils
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
def test_get_host_ip_addr(self):
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
@@ -2402,7 +2400,7 @@ class LibvirtConnTestCase(test.TestCase):
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC)):
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getCapabilities")
@@ -2422,7 +2420,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.UnsetStubs()
def test_volume_in_mapping(self):
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
@@ -2461,7 +2459,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = db.instance_create(self.context, self.test_instance)
@@ -2480,7 +2478,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2501,7 +2499,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2524,7 +2522,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2546,7 +2544,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2564,7 +2562,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
raise exception.InstanceNotFound()
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -2574,7 +2572,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_available_least_handles_missing(self):
"""Ensure destroy calls managedSaveRemove for saved instance"""
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
return ['fake']
@@ -2589,7 +2587,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(result, space / 1024 ** 3)
def test_cpu_info(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = config.LibvirtConfigCPU()
@@ -2681,7 +2679,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'vda_read': 688640L,
'vda_read_req': 169L,
@@ -2761,7 +2759,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -2835,7 +2833,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -2911,7 +2909,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -2993,7 +2991,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- conn = libvirt_driver.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
@@ -3024,7 +3022,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(actual, expect)
def test_get_instance_capabilities(self):
- conn = libvirt_driver.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = config.LibvirtConfigCaps()
@@ -3107,11 +3105,13 @@ class HostStateTestCase(test.TestCase):
return HostStateTestCase.instance_caps
def test_update_status(self):
+ virtapi = fake.FakeVirtAPI()
self.mox.StubOutWithMock(libvirt_driver, 'LibvirtDriver')
- libvirt_driver.LibvirtDriver(True).AndReturn(self.FakeConnection())
+ libvirt_driver.LibvirtDriver(virtapi, True).AndReturn(
+ self.FakeConnection())
self.mox.ReplayAll()
- hs = libvirt_driver.HostState(True)
+ hs = libvirt_driver.HostState(virtapi, True)
stats = hs._stats
self.assertEquals(stats["vcpus"], 1)
self.assertEquals(stats["vcpus_used"], 0)
@@ -3891,7 +3891,8 @@ class LibvirtDriverTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
- self.libvirtconnection = libvirt_driver.LibvirtDriver(read_only=True)
+ self.libvirtconnection = libvirt_driver.LibvirtDriver(
+ fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
"""Create a test instance"""
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index f40ef534c..83a7514db 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -117,7 +117,7 @@ class PowerVMDriverTestCase(test.TestCase):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(operator, 'get_powervm_operator',
fake_get_powervm_operator)
- self.powervm_connection = powervm_driver.PowerVMDriver()
+ self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = self._create_instance()
def _create_instance(self):
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index ec87c9111..1f30ee695 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -20,12 +20,14 @@ import sys
import traceback
from nova.compute.manager import ComputeManager
+from nova import db
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
+from nova.virt import fake
LOG = logging.getLogger(__name__)
@@ -171,7 +173,8 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase):
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
- self.connection = importutils.import_object(self.driver_module, '')
+ self.connection = importutils.import_object(self.driver_module,
+ fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
@@ -286,17 +289,33 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.power_off(instance_ref)
@catch_notimplementederror
- def test_test_power_on_running(self):
+ def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(instance_ref)
@catch_notimplementederror
- def test_test_power_on_powered_off(self):
+ def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(instance_ref)
@catch_notimplementederror
+ def test_soft_delete(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_soft_deleted(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@@ -507,17 +526,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
class AbstractDriverTestCase(_VirtDriverTestCase):
def setUp(self):
- from nova.virt.driver import ComputeDriver
-
self.driver_module = "nova.virt.driver.ComputeDriver"
-
- # TODO(sdague): the abstract driver doesn't have a constructor,
- # add one now that the loader loads classes directly
- def __new_init__(self, read_only=False):
- super(ComputeDriver, self).__init__()
-
- ComputeDriver.__init__ = __new_init__
-
super(AbstractDriverTestCase, self).setUp()
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index aa28f2762..757ec2bf2 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -50,7 +50,7 @@ class VMWareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- self.conn = driver.VMWareESXDriver(False)
+ self.conn = driver.VMWareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index bd1bcd4f1..404c183a0 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -40,6 +40,7 @@ from nova.tests import fake_network
from nova.tests import fake_utils
import nova.tests.image.fake as fake_image
from nova.tests.xenapi import stubs
+from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
@@ -231,7 +232,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
@@ -249,7 +250,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
@@ -283,7 +284,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
@@ -822,7 +823,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
@@ -839,7 +840,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_unrescue(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
@@ -847,7 +848,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
@@ -863,25 +864,25 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(instance, None, "HARD")
def test_reboot_soft(self):
instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(instance, None, "SOFT")
def test_reboot_halted(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
instance = self._create_instance(spawn=False)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance.name, 'Halted')
conn.reboot(instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance.name)
@@ -890,7 +891,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance.name, 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
@@ -1019,7 +1020,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0),
product_brand='XenServer')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
@@ -1038,7 +1039,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(1, 4, 99),
product_brand='XCP')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
@@ -1049,7 +1050,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
@@ -1062,7 +1063,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
@@ -1092,7 +1093,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
product_version=(4, 0, 0),
product_brand='XenServer')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -1127,7 +1128,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
product_version=(4, 0, 0),
product_brand='XenServer')
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -1149,7 +1150,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -1165,7 +1166,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
@@ -1256,7 +1257,7 @@ class XenAPIHostTestCase(stubs.XenAPITestBase):
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.create_local_srs()
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def test_host_state(self):
stats = self.conn.get_host_stats()
@@ -1347,7 +1348,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1440,7 +1441,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1487,6 +1488,11 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 5})
+ # NOTE(danms): because we're stubbing out the instance_types from
+ # the database, our instance['instance_type'] doesn't get properly
+ # filled out here, so put what we need into it
+ instance['instance_type']['swap'] = 1024
+
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
@@ -1499,6 +1505,11 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 4})
+ # NOTE(danms): because we're stubbing out the instance_types from
+ # the database, our instance['instance_type'] doesn't get properly
+ # filled out here, so put what we need into it
+ instance['instance_type']['ephemeral_gb'] = 160
+
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
@@ -1528,7 +1539,7 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
@@ -1661,7 +1672,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(FLAGS.network_manager)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
@@ -1987,7 +1998,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(FLAGS.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
@@ -2392,7 +2403,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
@@ -2406,18 +2417,18 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.post_live_migration_at_destination(None, None, None, None)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
@@ -2436,7 +2447,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
@@ -2445,7 +2456,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
@@ -2470,7 +2481,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
@@ -2497,7 +2508,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
@@ -2514,7 +2525,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
@@ -2532,7 +2543,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2554,7 +2565,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2581,7 +2592,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
@@ -2608,7 +2619,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2627,7 +2638,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2661,7 +2672,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, Session)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
@@ -2687,7 +2698,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
- conn = xenapi_conn.XenAPIDriver(False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
@@ -2719,7 +2730,7 @@ class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(False)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
@@ -2890,7 +2901,7 @@ class VMOpsTestCase(test.TestCase):
def test_check_resize_func_name_defaults_to_VDI_resize(self):
session = self._get_mock_session(None, None)
- ops = vmops.VMOps(session)
+ ops = vmops.VMOps(session, fake.FakeVirtAPI())
self.assertEquals(
'VDI.resize',
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 166eacba6..c38b0f98b 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -37,7 +37,6 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_states
from nova import context as nova_context
-from nova import db
from nova import exception
from nova import flags
from nova import notifications
@@ -78,11 +77,11 @@ def _late_load_cheetah():
class BareMetalDriver(driver.ComputeDriver):
- def __init__(self, read_only):
+ def __init__(self, virtapi, read_only):
_late_load_cheetah()
# Note that baremetal doesn't have a read-only connection
# mode, so the read_only parameter is ignored
- super(BareMetalDriver, self).__init__()
+ super(BareMetalDriver, self).__init__(virtapi)
self.baremetal_nodes = nodes.get_baremetal_nodes()
self._wrapped_conn = None
self._host_state = None
@@ -230,7 +229,7 @@ class BareMetalDriver(driver.ComputeDriver):
try:
LOG.debug(_("Key is injected but instance is not running yet"),
instance=instance)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.virtapi.instance_update(
context, instance['uuid'],
{'vm_state': vm_states.BUILDING})
notifications.send_update(context, old_ref, new_ref)
@@ -239,7 +238,7 @@ class BareMetalDriver(driver.ComputeDriver):
if state == power_state.RUNNING:
LOG.debug(_('instance %s: booted'), instance['name'],
instance=instance)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.virtapi.instance_update(
context, instance['uuid'],
{'vm_state': vm_states.ACTIVE})
notifications.send_update(context, old_ref, new_ref)
@@ -254,7 +253,7 @@ class BareMetalDriver(driver.ComputeDriver):
except Exception:
LOG.exception(_("Baremetal assignment is overcommitted."),
instance=instance)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.virtapi.instance_update(
context, instance['uuid'],
{'vm_state': vm_states.ERROR,
'power_state': power_state.FAILED})
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index c78d063f9..9c8a6448d 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -92,6 +92,9 @@ class ComputeDriver(object):
"has_imagecache": False,
}
+ def __init__(self, virtapi):
+ self.virtapi = virtapi
+
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
@@ -338,6 +341,14 @@ class ComputeDriver(object):
"""Power on the specified instance"""
raise NotImplementedError()
+ def soft_delete(self, instance):
+ """Soft delete the specified instance."""
+ raise NotImplementedError()
+
+ def restore(self, instance):
+ """Restore the specified instance"""
+ raise NotImplementedError()
+
def get_available_resource(self):
"""Retrieve resource information.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 8c3253adc..03711fe98 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -30,6 +30,7 @@ from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
+from nova.virt import virtapi
LOG = logging.getLogger(__name__)
@@ -52,7 +53,8 @@ class FakeDriver(driver.ComputeDriver):
"""Fake hypervisor driver"""
- def __init__(self, read_only=False):
+ def __init__(self, virtapi, read_only=False):
+ super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status = {
'host_name-description': 'Fake Host',
@@ -142,6 +144,12 @@ class FakeDriver(driver.ComputeDriver):
def power_on(self, instance):
pass
+ def soft_delete(self, instance):
+ pass
+
+ def restore(self, instance):
+ pass
+
def pause(self, instance):
pass
@@ -329,3 +337,16 @@ class FakeDriver(driver.ComputeDriver):
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
+
+
+class FakeVirtAPI(virtapi.VirtAPI):
+ def instance_update(self, context, instance_uuid, updates):
+ return db.instance_update_and_get_original(context,
+ instance_uuid,
+ updates)
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return db.instance_get_by_uuid(context, instance_uuid)
+
+ def instance_get_all_by_host(self, context, host):
+ return db.instance_get_all_by_host(context, host)
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 3f43ca0a6..6d9f66ff8 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -73,8 +73,8 @@ LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
- def __init__(self):
- super(HyperVDriver, self).__init__()
+ def __init__(self, virtapi):
+ super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 132a4b744..97ce1710c 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -61,7 +61,6 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_mode
from nova import context as nova_context
-from nova import db
from nova import exception
from nova import flags
from nova.image import glance
@@ -257,8 +256,8 @@ class LibvirtDriver(driver.ComputeDriver):
"has_imagecache": True,
}
- def __init__(self, read_only=False):
- super(LibvirtDriver, self).__init__()
+ def __init__(self, virtapi, read_only=False):
+ super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
@@ -311,7 +310,7 @@ class LibvirtDriver(driver.ComputeDriver):
@property
def host_state(self):
if not self._host_state:
- self._host_state = HostState(self.read_only)
+ self._host_state = HostState(self.virtapi, self.read_only)
return self._host_state
def has_min_version(self, ver):
@@ -1621,7 +1620,7 @@ class LibvirtDriver(driver.ComputeDriver):
if ephemeral_device is not None:
swap_device = self.default_third_device
- db.instance_update(
+ self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + self.default_second_device})
@@ -1646,7 +1645,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)):
diskswap = disk_info('disk.swap', swap_device)
devices.append(diskswap)
- db.instance_update(
+ self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + swap_device})
@@ -1700,7 +1699,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
root_device = self.default_root_device
- db.instance_update(
+ self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': '/dev/' + self.default_root_device})
@@ -3008,11 +3007,12 @@ class LibvirtDriver(driver.ComputeDriver):
class HostState(object):
"""Manages information about the compute node through libvirt"""
- def __init__(self, read_only):
+ def __init__(self, virtapi, read_only):
super(HostState, self).__init__()
self.read_only = read_only
self._stats = {}
self.connection = None
+ self.virtapi = virtapi
self.update_status()
def get_host_stats(self, refresh=False):
@@ -3027,7 +3027,7 @@ class HostState(object):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
if self.connection is None:
- self.connection = LibvirtDriver(self.read_only)
+ self.connection = LibvirtDriver(self.virtapi, self.read_only)
data = {}
data["vcpus"] = self.connection.get_vcpu_total()
data["vcpus_used"] = self.connection.get_vcpu_used()
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index cd91a7299..f4f26045e 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -18,7 +18,6 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
-from nova import db
from nova import flags
from nova.openstack.common import cfg
@@ -59,8 +58,8 @@ class PowerVMDriver(driver.ComputeDriver):
"""PowerVM Implementation of Compute Driver."""
- def __init__(self):
- super(PowerVMDriver, self).__init__()
+ def __init__(self, virtapi):
+ super(PowerVMDriver, self).__init__(virtapi)
self._powervm = operator.PowerVMOperator()
@property
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
new file mode 100644
index 000000000..13aaa7e4d
--- /dev/null
+++ b/nova/virt/virtapi.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova import db
+
+
+class VirtAPI(object):
+ def instance_update(self, context, instance_uuid, updates):
+ """Perform an instance update operation on behalf of a virt driver
+ :param context: security context
+ :param instance_uuid: uuid of the instance to be updated
+ :param updates: dict of attribute=value pairs to change
+
+ Returns: orig_instance, new_instance
+ """
+ raise NotImplementedError()
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ """Look up an instance by uuid
+ :param context: security context
+ :param instance_uuid: uuid of the instance to be fetched
+ """
+ raise NotImplementedError()
+
+ def instance_get_all_by_host(self, context, host):
+ """Find all instances on a given host
+ :param context: security context
+ :param host: host running instances to be returned
+ """
+ raise NotImplementedError()
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index ec8673418..e56f81213 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -100,8 +100,8 @@ class Failure(Exception):
class VMWareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
- def __init__(self, read_only=False, scheme="https"):
- super(VMWareESXDriver, self).__init__()
+ def __init__(self, virtapi, read_only=False, scheme="https"):
+ super(VMWareESXDriver, self).__init__(virtapi)
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 2ae4c27e9..e4c4150a8 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -125,8 +125,8 @@ FLAGS.register_opts(xenapi_opts)
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform"""
- def __init__(self, read_only=False):
- super(XenAPIDriver, self).__init__()
+ def __init__(self, virtapi, read_only=False):
+ super(XenAPIDriver, self).__init__(virtapi)
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
@@ -140,8 +140,8 @@ class XenAPIDriver(driver.ComputeDriver):
self._session = XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
- self._host = host.Host(self._session)
- self._vmops = vmops.VMOps(self._session)
+ self._host = host.Host(self._session, self.virtapi)
+ self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session)
@@ -282,6 +282,14 @@ class XenAPIDriver(driver.ComputeDriver):
"""Power on the specified instance"""
self._vmops.power_on(instance)
+ def soft_delete(self, instance):
+ """Soft delete the specified instance"""
+ self._vmops.soft_delete(instance)
+
+ def restore(self, instance):
+ """Restore the specified instance"""
+ self._vmops.restore(instance)
+
def poll_rebooting_instances(self, timeout):
"""Poll for rebooting instances"""
self._vmops.poll_rebooting_instances(timeout)
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 5186a40ce..8a69f7c54 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -38,8 +38,9 @@ class Host(object):
"""
Implements host related operations.
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
self._session = session
+ self._virtapi = virtapi
def host_power_action(self, _host, action):
"""Reboots or shuts down the host."""
@@ -65,7 +66,7 @@ class Host(object):
uuid = vm_rec['other_config'].get('nova_uuid')
if not uuid:
name = vm_rec['name_label']
- uuid = _uuid_find(ctxt, host, name)
+ uuid = _uuid_find(self._virtapi, ctxt, host, name)
if not uuid:
msg = _('Instance %(name)s running on %(host)s'
' could not be found in the database:'
@@ -73,11 +74,11 @@ class Host(object):
' ping migration to a new host')
LOG.info(msg % locals())
continue
- instance = db.instance_get_by_uuid(ctxt, uuid)
+ instance = self._virtapi.instance_get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
dest = _host_find(ctxt, self._session, host, host_ref)
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
{'host': dest,
@@ -88,7 +89,7 @@ class Host(object):
vm_ref, host_ref, {})
migrations_counter = migrations_counter + 1
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
{'vm_state': vm_states.ACTIVE})
@@ -98,7 +99,7 @@ class Host(object):
except self._session.XenAPI.Failure:
LOG.exception('Unable to migrate VM %(vm_ref)s'
'from %(host)s' % locals())
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
{'host': host,
@@ -212,9 +213,9 @@ def call_xenhost(session, method, arg_dict):
return e.details[1]
-def _uuid_find(context, host, name_label):
+def _uuid_find(virtapi, context, host, name_label):
"""Return instance uuid by name_label."""
- for i in db.instance_get_all_by_host(context, host):
+ for i in virtapi.instance_get_all_by_host(context, host):
if i.name == name_label:
return i['uuid']
return None
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 1f8cafa3b..5a295d194 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -93,7 +93,7 @@ def cmp_version(a, b):
return len(a) - len(b)
-def make_step_decorator(context, instance):
+def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
@@ -125,7 +125,7 @@ def make_step_decorator(context, instance):
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
- db.instance_update(context, instance['uuid'], {'progress': progress})
+ instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@@ -145,9 +145,10 @@ class VMOps(object):
"""
Management class for VM-related tasks
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
+ self._virtapi = virtapi
self.poll_rescue_last_ran = None
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER,
@@ -254,7 +255,8 @@ class VMOps(object):
if name_label is None:
name_label = instance['name']
- step = make_step_decorator(context, instance)
+ step = make_step_decorator(context, instance,
+ self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
@@ -454,7 +456,8 @@ class VMOps(object):
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
- db.instance_update(context, instance['uuid'], {'vm_mode': mode})
+ self._virtapi.instance_update(context,
+ instance['uuid'], {'vm_mode': mode})
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
@@ -463,7 +466,7 @@ class VMOps(object):
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type):
ctx = nova_context.get_admin_context()
- instance_type = db.instance_type_get(ctx, instance['instance_type_id'])
+ instance_type = instance['instance_type']
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
@@ -472,7 +475,7 @@ class VMOps(object):
cd_vdi = vdis.pop('root')
root_vdi = vm_utils.fetch_blank_disk(self._session,
- instance['instance_type_id'])
+ instance_type['id'])
vdis['root'] = root_vdi
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
@@ -567,8 +570,7 @@ class VMOps(object):
agent.resetnetwork()
# Set VCPU weight
- inst_type = db.instance_type_get(ctx, instance['instance_type_id'])
- vcpu_weight = inst_type['vcpu_weight']
+ vcpu_weight = instance['instance_type']['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
@@ -661,7 +663,8 @@ class VMOps(object):
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
- db.instance_update(context, instance['uuid'], {'progress': progress})
+ self._virtapi.instance_update(context, instance['uuid'],
+ {'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
@@ -1133,14 +1136,25 @@ class VMOps(object):
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
- def power_off(self, instance):
- """Power off the specified instance."""
+ def soft_delete(self, instance):
+ """Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
- vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
except exception.NotFound:
- LOG.warning(_("VM is not present, skipping power off..."),
+ LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
+ else:
+ vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+
+ def restore(self, instance):
+ """Restore the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._start(instance, vm_ref)
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
def power_on(self, instance):
"""Power on the specified instance."""
diff --git a/smoketests/run_tests.py b/smoketests/run_tests.py
index 053acc09f..3c3ed0574 100644
--- a/smoketests/run_tests.py
+++ b/smoketests/run_tests.py
@@ -138,12 +138,12 @@ class _Win32Colorizer(object):
from win32console import FOREGROUND_INTENSITY
from win32console import FOREGROUND_RED
from win32console import GetStdHandle
- from win32console import STD_OUT_HANDLE
+ from win32console import STD_OUTPUT_HANDLE
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
- self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
+ self.screenBuffer = GetStdHandle(STD_OUTPUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
@@ -159,7 +159,7 @@ class _Win32Colorizer(object):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
- win32console.STD_OUT_HANDLE)
+ win32console.STD_OUTPUT_HANDLE)
except ImportError:
return False
import pywintypes