summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors3
-rwxr-xr-xbin/nova-clear-rabbit-queues (renamed from bin/clear_rabbit_queues)1
-rwxr-xr-xbin/nova-dhcpbridge2
-rwxr-xr-xbin/nova-instance-usage-audit (renamed from bin/instance-usage-audit)1
-rwxr-xr-xbin/nova-manage1
-rwxr-xr-xbin/nova-rpc-zmq-receiver77
-rwxr-xr-xbin/nova-volume-usage-audit (renamed from bin/volume-usage-audit)1
-rwxr-xr-xbin/nova-xvpvncproxy1
-rw-r--r--doc/source/devref/addmethod.openstackapi.rst4
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavorextradata.py3
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py2
-rw-r--r--nova/api/openstack/compute/flavors.py5
-rw-r--r--nova/api/openstack/compute/servers.py2
-rw-r--r--nova/api/openstack/compute/views/flavors.py10
-rw-r--r--nova/compute/api.py49
-rw-r--r--nova/compute/instance_types.py7
-rw-r--r--nova/compute/manager.py25
-rw-r--r--nova/compute/power_state.py25
-rw-r--r--nova/compute/rpcapi.py25
-rw-r--r--nova/db/api.py42
-rw-r--r--nova/db/sqlalchemy/api.py119
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py40
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql64
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql64
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql61
-rw-r--r--nova/db/sqlalchemy/models.py13
-rw-r--r--nova/exception.py11
-rw-r--r--nova/flags.py5
-rw-r--r--nova/network/quantum/manager.py1
-rw-r--r--nova/network/quantum/nova_ipam_lib.py21
-rw-r--r--nova/openstack/common/policy.py (renamed from nova/common/policy.py)28
-rw-r--r--nova/policy.py8
-rw-r--r--nova/rpc/__init__.py46
-rw-r--r--nova/rpc/impl_fake.py4
-rw-r--r--nova/rpc/impl_kombu.py9
-rw-r--r--nova/rpc/impl_qpid.py6
-rw-r--r--nova/rpc/impl_zmq.py713
-rw-r--r--nova/scheduler/driver.py26
-rw-r--r--nova/service.py3
-rw-r--r--nova/test.py31
-rw-r--r--nova/tests/__init__.py4
-rw-r--r--nova/tests/api/ec2/test_cloud.py2
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py110
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py21
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py7
-rw-r--r--nova/tests/baremetal/test_proxy_bare_metal.py16
-rw-r--r--nova/tests/compute/test_compute.py180
-rw-r--r--nova/tests/compute/test_rpcapi.py25
-rw-r--r--nova/tests/fake_flags.py4
-rw-r--r--nova/tests/integrated/test_servers.py2
-rw-r--r--nova/tests/rpc/test_kombu.py1
-rw-r--r--nova/tests/rpc/test_kombu_ssl.py1
-rw-r--r--nova/tests/rpc/test_qpid.py1
-rw-r--r--nova/tests/rpc/test_zmq.py128
-rw-r--r--nova/tests/scheduler/test_scheduler.py127
-rw-r--r--nova/tests/test_auth.py2
-rw-r--r--nova/tests/test_compute_utils.py2
-rw-r--r--nova/tests/test_db_api.py41
-rw-r--r--nova/tests/test_libvirt.py171
-rw-r--r--nova/tests/test_notifications.py2
-rw-r--r--nova/tests/test_policy.py7
-rw-r--r--nova/tests/test_quota.py2
-rw-r--r--nova/tests/test_virt_drivers.py21
-rw-r--r--nova/tests/test_vmwareapi.py2
-rw-r--r--nova/tests/test_volume.py2
-rw-r--r--nova/tests/test_volume_utils.py2
-rw-r--r--nova/tests/test_xenapi.py120
-rw-r--r--nova/tests/xenapi/stubs.py67
-rw-r--r--nova/virt/baremetal/proxy.py10
-rw-r--r--nova/virt/connection.py43
-rw-r--r--nova/virt/fake.py15
-rw-r--r--nova/virt/firewall.py4
-rw-r--r--nova/virt/libvirt/connection.py75
-rw-r--r--nova/virt/libvirt/utils.py3
-rw-r--r--nova/virt/vmwareapi_conn.py33
-rw-r--r--nova/virt/xenapi/__init__.py30
-rw-r--r--nova/virt/xenapi/connection.py55
-rw-r--r--nova/virt/xenapi/fake.py13
-rw-r--r--nova/virt/xenapi/host.py4
-rw-r--r--nova/virt/xenapi/network_utils.py53
-rw-r--r--nova/virt/xenapi/vif.py14
-rw-r--r--nova/virt/xenapi/vm_utils.py1882
-rw-r--r--nova/virt/xenapi/vmops.py248
-rw-r--r--nova/virt/xenapi/volume_utils.py549
-rw-r--r--nova/virt/xenapi/volumeops.py59
-rw-r--r--openstack-common.conf2
-rw-r--r--setup.py9
93 files changed, 3857 insertions, 2100 deletions
diff --git a/Authors b/Authors
index a78e0033b..3448eb055 100644
--- a/Authors
+++ b/Authors
@@ -68,6 +68,7 @@ Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Evan Callicoat <diopter@gmail.com>
Ewan Mellor <ewan.mellor@citrix.com>
+Florian Haas <florian@hastexo.com>
François Charlier <francois.charlier@enovance.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
Gabriel Hurley <gabriel@strikeawe.com>
@@ -207,12 +208,14 @@ William Henry <whenry@redhat.com>
William Kelly <william.kelly@rackspace.com>
William Wolf <throughnothing@gmail.com>
Yaguang Tang <heut2008@gmail.com>
+Ying Chun Guo <daisy.ycguo@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Yun Mao <yunmao@gmail.com>
Yun Shen <Yun.Shen@hp.com>
Yuriy Taraday <yorik.sar@gmail.com>
Zed Shaw <zedshaw@zedshaw.com>
+Zhiteng Huang <zhiteng.huang@intel.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
Zhongyue Luo <lzyeval@gmail.com>
Ziad Sawalha <github@highbridgellc.com>
diff --git a/bin/clear_rabbit_queues b/bin/nova-clear-rabbit-queues
index c367c06c6..578681790 100755
--- a/bin/clear_rabbit_queues
+++ b/bin/nova-clear-rabbit-queues
@@ -71,7 +71,6 @@ def delete_queues(queues):
if __name__ == '__main__':
args = flags.parse_args(sys.argv)
logging.setup()
- rpc.register_opts(flags.FLAGS)
delete_queues(args[1:])
if FLAGS.delete_exchange:
delete_exchange(FLAGS.control_exchange)
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 109d7101b..f30a0877e 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -99,8 +99,6 @@ def main():
argv = flags.parse_args(sys.argv)
logging.setup()
- rpc.register_opts(FLAGS)
-
if int(os.environ.get('TESTING', '0')):
from nova.tests import fake_flags
diff --git a/bin/instance-usage-audit b/bin/nova-instance-usage-audit
index 59fff1080..3d60edfec 100755
--- a/bin/instance-usage-audit
+++ b/bin/nova-instance-usage-audit
@@ -63,7 +63,6 @@ from nova import utils
FLAGS = flags.FLAGS
if __name__ == '__main__':
- rpc.register_opts(FLAGS)
admin_context = context.get_admin_context()
flags.parse_args(sys.argv)
logging.setup()
diff --git a/bin/nova-manage b/bin/nova-manage
index 466bd9846..605e76032 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -1664,7 +1664,6 @@ def methods_of(obj):
def main():
"""Parse options and call the appropriate class/method."""
- rpc.register_opts(FLAGS)
try:
argv = flags.parse_args(sys.argv)
diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver
new file mode 100755
index 000000000..76b67a840
--- /dev/null
+++ b/bin/nova-rpc-zmq-receiver
@@ -0,0 +1,77 @@
+#!/usr/bin/env python -d
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import eventlet
+eventlet.monkey_patch()
+
+import contextlib
+import logging
+import os
+import sys
+
+import zmq
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
+ sys.path.insert(0, POSSIBLE_TOPDIR)
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova import rpc
+from nova.rpc import impl_zmq
+from nova import utils
+
+CONF = cfg.CONF
+
+
+def main():
+ CONF.register_opts(rpc.rpc_opts)
+ impl_zmq.register_opts(CONF)
+ logging.setup()
+ utils.monkey_patch()
+
+ ipc_dir = CONF.rpc_zmq_ipc_dir
+
+ # Create the necessary directories/files for this service.
+ if not os.path.isdir(ipc_dir):
+ try:
+ utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
+ utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
+ ipc_dir, run_as_root=True)
+ utils.execute('chmod', '750', ipc_dir, run_as_root=True)
+ except exception.ProcessExecutionError:
+ logging.error(_("Could not create IPC socket directory."))
+ return
+
+ with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
+ consume_in = "tcp://%s:%s" % \
+ (CONF.rpc_zmq_bind_address,
+ CONF.rpc_zmq_port)
+ consumption_proxy = impl_zmq.InternalContext(None)
+
+ reactor.register(consumption_proxy,
+ consume_in, zmq.PULL, out_bind=True)
+
+ reactor.consume_in_thread()
+ reactor.wait()
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/volume-usage-audit b/bin/nova-volume-usage-audit
index 0329d184e..2c01da56f 100755
--- a/bin/volume-usage-audit
+++ b/bin/nova-volume-usage-audit
@@ -62,7 +62,6 @@ from nova.volume import utils as volume_utils
FLAGS = flags.FLAGS
if __name__ == '__main__':
- rpc.register_opts(FLAGS)
admin_context = context.get_admin_context()
utils.default_cfgfile()
flags.FLAGS(sys.argv)
diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy
index d338d3a3e..bdbe20997 100755
--- a/bin/nova-xvpvncproxy
+++ b/bin/nova-xvpvncproxy
@@ -40,7 +40,6 @@ from nova.vnc import xvp_proxy
FLAGS = flags.FLAGS
if __name__ == "__main__":
- rpc.register_opts(FLAGS)
flags.parse_args(sys.argv)
logging.setup()
diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst
index f19d79d31..18123f8cc 100644
--- a/doc/source/devref/addmethod.openstackapi.rst
+++ b/doc/source/devref/addmethod.openstackapi.rst
@@ -35,7 +35,7 @@ Controllers and actions
Controllers live in ``nova/api/openstack``, and inherit from nova.wsgi.Controller.
-See ``nova/api/openstack/servers.py`` for an example.
+See ``nova/api/openstack/compute/servers.py`` for an example.
Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc.
@@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML
If you define a new controller, you'll need to define a ``_serialization_metadata`` attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. ``<servers>`` list contains ``<server>`` tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. ``<server id="4"/>`` instead of ``<server><id>4</id></server>``).
-See `nova/api/openstack/servers.py` for an example.
+See `nova/api/openstack/compute/servers.py` for an example.
Faults
------
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 43686aa12..723c7fca7 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -598,7 +598,7 @@ class CloudController(object):
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
- values['protocol'] = ip_protocol
+ values['protocol'] = ip_protocol.lower()
values['from_port'] = from_port
values['to_port'] = to_port
else:
diff --git a/nova/api/openstack/compute/contrib/flavorextradata.py b/nova/api/openstack/compute/contrib/flavorextradata.py
index 2864ada5e..d50734817 100644
--- a/nova/api/openstack/compute/contrib/flavorextradata.py
+++ b/nova/api/openstack/compute/contrib/flavorextradata.py
@@ -37,7 +37,8 @@ authorize = extensions.soft_extension_authorizer('compute', 'flavorextradata')
class FlavorextradataController(wsgi.Controller):
def _get_flavor_refs(self):
"""Return a dictionary mapping flavorid to flavor_ref."""
- flavor_refs = instance_types.get_all_types(True)
+
+ flavor_refs = instance_types.get_all_types(inactive=True)
rval = {}
for name, obj in flavor_refs.iteritems():
rval[obj['flavorid']] = obj
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index 7e3f93752..4a69d392e 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -524,7 +524,7 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
- values['protocol'] = ip_protocol
+ values['protocol'] = ip_protocol.lower()
values['from_port'] = from_port
values['to_port'] = to_port
else:
diff --git a/nova/api/openstack/compute/flavors.py b/nova/api/openstack/compute/flavors.py
index 23c918924..56b2e18ab 100644
--- a/nova/api/openstack/compute/flavors.py
+++ b/nova/api/openstack/compute/flavors.py
@@ -94,6 +94,11 @@ class Controller(wsgi.Controller):
def _get_flavors(self, req):
"""Helper function that returns a list of flavor dicts."""
filters = {}
+
+ context = req.environ['nova.context']
+ if not context.is_admin:
+ filters['disabled'] = False
+
if 'minRam' in req.params:
try:
filters['min_memory_mb'] = int(req.params['minRam'])
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 7bf3d59d0..64af7222f 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -704,6 +704,8 @@ class Controller(wsgi.Controller):
headers={'Retry-After': 0})
except exception.InstanceTypeMemoryTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
+ except exception.InstanceTypeNotFound as error:
+ raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InstanceTypeDiskTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InvalidMetadata as error:
diff --git a/nova/api/openstack/compute/views/flavors.py b/nova/api/openstack/compute/views/flavors.py
index 7406e8066..c299170b0 100644
--- a/nova/api/openstack/compute/views/flavors.py
+++ b/nova/api/openstack/compute/views/flavors.py
@@ -34,7 +34,7 @@ class ViewBuilder(common.ViewBuilder):
}
def show(self, request, flavor):
- return {
+ flavor_dict = {
"flavor": {
"id": flavor["flavorid"],
"name": flavor["name"],
@@ -49,6 +49,14 @@ class ViewBuilder(common.ViewBuilder):
},
}
+ # NOTE(sirp): disabled attribute is namespaced for now for
+ # compatability with the OpenStack API. This should ultimately be made
+ # a first class attribute.
+ flavor_dict["flavor"]["OS-FLV-DISABLED:disabled"] =\
+ flavor.get("disabled", "")
+
+ return flavor_dict
+
def index(self, request, flavors):
"""Return the 'index' view of flavors."""
return self._list_view(self.basic, request, flavors)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 868ff1b7a..a0672cfff 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -170,9 +170,13 @@ class API(base.Base):
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
+ overs = exc.kwargs['overs']
+
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
+
+ # Reduce 'allowed' to the minimum supported
allowed = headroom['instances']
if instance_type['vcpus']:
allowed = min(allowed,
@@ -185,7 +189,7 @@ class API(base.Base):
pid = context.project_id
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
- used = max_count
+ allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
@@ -193,10 +197,15 @@ class API(base.Base):
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
- used = max_count - allowed
- LOG.warn(_("Quota exceeded for %(pid)s,"
+
+ used = quotas['instances'] - headroom['instances']
+ total_allowed = used + allowed
+ overs = ','.join(overs)
+
+ LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances. %(msg)s"), locals())
- raise exception.TooManyInstances(used=used, allowed=max_count)
+ raise exception.TooManyInstances(overs=overs, req=min_count,
+ used=used, allowed=total_allowed)
return max_count, reservations
@@ -345,6 +354,10 @@ class API(base.Base):
block_device_mapping = block_device_mapping or []
+ if instance_type['disabled']:
+ raise exception.InstanceTypeNotFound(
+ instance_type_id=instance_type['id'])
+
# Check quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
@@ -977,9 +990,15 @@ class API(base.Base):
try:
if not instance['host']:
# Just update database, nothing else we can do
- result = self.db.instance_destroy(context, instance['id'])
- QUOTAS.commit(context, reservations)
- return result
+ constraint = self.db.constraint(host=self.db.equal_any(host))
+ try:
+ result = self.db.instance_destroy(
+ context, instance['uuid'], constraint)
+ QUOTAS.commit(context, reservations)
+ return result
+ except exception.ConstraintNotMet:
+ # Refresh to get new host information
+ instance = self.get(context, instance['uuid'])
self.update(context,
instance,
@@ -1494,9 +1513,19 @@ class API(base.Base):
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s") % locals())
+
+ # FIXME(sirp): both of these should raise InstanceTypeNotFound instead
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
+ same_instance_type = (current_instance_type['id'] ==
+ new_instance_type['id'])
+
+ # NOTE(sirp): We don't want to force a customer to change their flavor
+ # when Ops is migrating off of a failed host.
+ if new_instance_type['disabled'] and not same_instance_type:
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
+
# NOTE(markwash): look up the image early to avoid auth problems later
image = self.image_service.show(context, instance['image_ref'])
@@ -1750,13 +1779,13 @@ class API(base.Base):
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
- rv = self.db.instance_metadata_get(context, instance['id'])
+ rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
- self.db.instance_metadata_delete(context, instance['id'], key)
+ self.db.instance_metadata_delete(context, instance['uuid'], key)
@wrap_check_policy
def update_instance_metadata(self, context, instance,
@@ -1774,7 +1803,7 @@ class API(base.Base):
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
- self.db.instance_metadata_update(context, instance['id'],
+ self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
return _metadata
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 66c73c624..69eef7f83 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -97,14 +97,15 @@ def destroy(name):
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
-def get_all_types(inactive=0, filters=None):
+def get_all_types(inactive=False, filters=None):
"""Get all non-deleted instance_types.
Pass true as argument if you want deleted instance types returned also.
-
"""
ctxt = context.get_admin_context()
- inst_types = db.instance_type_get_all(ctxt, inactive, filters)
+ inst_types = db.instance_type_get_all(
+ ctxt, inactive=inactive, filters=filters)
+
inst_type_dict = {}
for inst_type in inst_types:
inst_type_dict[inst_type['name']] = inst_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d9f3ee14f..a110401bc 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -308,7 +308,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
return self.driver.get_info(instance)["state"]
except exception.NotFound:
- return power_state.FAILED
+ return power_state.NOSTATE
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -674,10 +674,15 @@ class ComputeManager(manager.SchedulerDependentManager):
bdms = self._get_instance_volume_bdms(context, instance_uuid)
block_device_mapping = []
for bdm in bdms:
- cinfo = jsonutils.loads(bdm['connection_info'])
- block_device_mapping.append({'connection_info': cinfo,
- 'mount_device':
- bdm['device_name']})
+ try:
+ cinfo = jsonutils.loads(bdm['connection_info'])
+ block_device_mapping.append({'connection_info': cinfo,
+ 'mount_device':
+ bdm['device_name']})
+ except TypeError:
+ # if the block_device_mapping has no value in connection_info
+ # (returned as None), don't include in the mapping
+ pass
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
return {'block_device_mapping': block_device_mapping}
@@ -1855,7 +1860,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
instance_ref = self.db.instance_get(context, instance_id)
bdm = self._get_instance_volume_bdm(context,
- instance_id,
+ instance_ref['uuid'],
volume_id)
self._detach_volume(context, instance_ref, bdm)
volume = self.volume_api.get(context, volume_id)
@@ -2197,11 +2202,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref['uuid']):
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
- rpc.call(context,
- rpc.queue_get_for(context, FLAGS.compute_topic, dest),
- {"method": "remove_volume_connection",
- "args": {'instance_id': instance_ref['id'],
- 'volume_id': volume['id']}})
+ self.compute_rpcapi.remove_volume_connection(context, instance_ref,
+ volume['id'], dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
@@ -2467,7 +2469,6 @@ class ComputeManager(manager.SchedulerDependentManager):
continue
if (vm_power_state in (power_state.NOSTATE,
- power_state.SHUTOFF,
power_state.SHUTDOWN,
power_state.CRASHED)
and db_instance['vm_state'] == vm_states.ACTIVE):
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index c468fe6b3..6d1c00b98 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -18,18 +18,26 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""The various power states that a VM can be in."""
-
-#NOTE(justinsb): These are the virDomainState values from libvirt
+"""Power state is the state we get by calling virt driver on a particular
+domain. The hypervisor is always considered the authority on the status
+of a particular VM, and the power_state in the DB should be viewed as a
+snapshot of the VMs's state in the (recent) past. It can be periodically
+updated, and should also be updated at the end of a task if the task is
+supposed to affect power_state.
+"""
+
+# NOTE(maoy): These are *not* virDomainState values from libvirt.
+# The hex value happens to match virDomainState for backward-compatibility
+# reasons.
NOSTATE = 0x00
RUNNING = 0x01
-BLOCKED = 0x02
PAUSED = 0x03
-SHUTDOWN = 0x04
-SHUTOFF = 0x05
+SHUTDOWN = 0x04 # the VM is powered off
CRASHED = 0x06
SUSPENDED = 0x07
-FAILED = 0x08
+
+# TODO(maoy): BUILDING state is only used in bare metal case and should
+# eventually be removed/cleaned up. NOSTATE is probably enough.
BUILDING = 0x09
# TODO(justinsb): Power state really needs to be a proper class,
@@ -38,13 +46,10 @@ BUILDING = 0x09
_STATE_MAP = {
NOSTATE: 'pending',
RUNNING: 'running',
- BLOCKED: 'blocked',
PAUSED: 'paused',
SHUTDOWN: 'shutdown',
- SHUTOFF: 'shutdown',
CRASHED: 'crashed',
SUSPENDED: 'suspended',
- FAILED: 'failed to spawn',
BUILDING: 'building',
}
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 7d7b57151..8c25906c9 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -84,6 +84,16 @@ class ComputeAPI(nova.rpc.proxy.RpcProxy):
mountpoint=mountpoint),
topic=self._compute_topic(ctxt, None, instance))
+ def check_shared_storage_test_file(self, ctxt, filename, host):
+ return self.call(ctxt, self.make_msg('check_shared_storage_test_file',
+ filename=filename),
+ topic=self._compute_topic(ctxt, host, None))
+
+ def cleanup_shared_storage_test_file(self, ctxt, filename, host):
+ self.cast(ctxt, self.make_msg('cleanup_shared_storage_test_file',
+ filename=filename),
+ topic=self._compute_topic(ctxt, host, None))
+
def compare_cpu(self, ctxt, cpu_info, host):
return self.call(ctxt, self.make_msg('compare_cpu', cpu_info=cpu_info),
topic=self._compute_topic(ctxt, host, None))
@@ -95,6 +105,11 @@ class ComputeAPI(nova.rpc.proxy.RpcProxy):
instance_uuid=instance['uuid'], migration_id=migration_id),
topic=self._compute_topic(ctxt, host, instance))
+ def create_shared_storage_test_file(self, ctxt, host):
+ return self.call(ctxt,
+ self.make_msg('create_shared_storage_test_file'),
+ topic=self._compute_topic(ctxt, host, None))
+
def detach_volume(self, ctxt, instance, volume_id):
self.cast(ctxt, self.make_msg('detach_volume',
instance_uuid=instance['uuid'], volume_id=volume_id),
@@ -131,6 +146,11 @@ class ComputeAPI(nova.rpc.proxy.RpcProxy):
instance_uuid=instance['uuid']),
topic=self._compute_topic(ctxt, None, instance))
+ def get_instance_disk_info(self, ctxt, instance):
+ return self.call(ctxt, self.make_msg('get_instance_disk_info',
+ instance_name=instance['name']),
+ topic=self._compute_topic(ctxt, None, instance))
+
def get_vnc_console(self, ctxt, instance, console_type):
return self.call(ctxt, self.make_msg('get_vnc_console',
instance_uuid=instance['uuid'], console_type=console_type),
@@ -243,6 +263,11 @@ class ComputeAPI(nova.rpc.proxy.RpcProxy):
instance_uuid=instance['uuid'], address=address),
topic=self._compute_topic(ctxt, None, instance))
+ def remove_volume_connection(self, ctxt, instance, volume_id, host):
+ return self.call(ctxt, self.make_msg('remove_volume_connection',
+ instance_id=instance['id'], volume_id=volume_id),
+ topic=self._compute_topic(ctxt, host, None))
+
def rescue_instance(self, ctxt, instance, rescue_password):
self.cast(ctxt, self.make_msg('rescue_instance',
instance_uuid=instance['uuid'],
diff --git a/nova/db/api.py b/nova/db/api.py
index 63a586dea..93f2b060b 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -87,6 +87,32 @@ class NoMoreTargets(exception.NovaException):
###################
+def constraint(**conditions):
+ """Return a constraint object suitable for use with some updates."""
+ return IMPL.constraint(**conditions)
+
+
+def equal_any(*values):
+ """Return an equality condition object suitable for use in a constraint.
+
+ Equal_any conditions require that a model object's attribute equal any
+ one of the given values.
+ """
+ return IMPL.equal_any(*values)
+
+
+def not_equal(*values):
+ """Return an inequality condition object suitable for use in a constraint.
+
+ Not_equal conditions require that a model object's attribute differs from
+ all of the given values.
+ """
+ return IMPL.not_equal(*values)
+
+
+###################
+
+
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
@@ -527,9 +553,9 @@ def instance_data_get_for_project(context, project_id, session=None):
session=session)
-def instance_destroy(context, instance_id):
+def instance_destroy(context, instance_id, constraint=None):
"""Destroy the instance or raise if it does not exist."""
- return IMPL.instance_destroy(context, instance_id)
+ return IMPL.instance_destroy(context, instance_id, constraint)
def instance_get_by_uuid(context, uuid):
@@ -1559,19 +1585,19 @@ def cell_get_all(context):
####################
-def instance_metadata_get(context, instance_id):
+def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
- return IMPL.instance_metadata_get(context, instance_id)
+ return IMPL.instance_metadata_get(context, instance_uuid)
-def instance_metadata_delete(context, instance_id, key):
+def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
- IMPL.instance_metadata_delete(context, instance_id, key)
+ IMPL.instance_metadata_delete(context, instance_uuid, key)
-def instance_metadata_update(context, instance_id, metadata, delete):
+def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
- IMPL.instance_metadata_update(context, instance_id, metadata, delete)
+ IMPL.instance_metadata_update(context, instance_uuid, metadata, delete)
####################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 30c556b4d..f7a6b59a6 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -43,6 +43,7 @@ from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
+from sqlalchemy.sql.expression import or_
from sqlalchemy.sql import func
FLAGS = flags.FLAGS
@@ -263,6 +264,52 @@ def exact_filter(query, model, filters, legal_keys):
###################
+def constraint(**conditions):
+ return Constraint(conditions)
+
+
+def equal_any(*values):
+ return EqualityCondition(values)
+
+
+def not_equal(*values):
+ return InequalityCondition(values)
+
+
+class Constraint(object):
+
+ def __init__(self, conditions):
+ self.conditions = conditions
+
+ def apply(self, model, query):
+ clauses = []
+ for key, condition in self.conditions.iteritems():
+ for clause in condition.clauses(getattr(model, key)):
+ query = query.filter(clause)
+ return query
+
+
+class EqualityCondition(object):
+
+ def __init__(self, values):
+ self.values = values
+
+ def clauses(self, field):
+ return or_([field == value for value in self.values])
+
+
+class InequalityCondition(object):
+
+ def __init__(self, values):
+ self.values = values
+
+ def clauses(self, field):
+ return [field != value for value in self.values]
+
+
+###################
+
+
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
@@ -1311,7 +1358,7 @@ def instance_data_get_for_project(context, project_id, session=None):
@require_context
-def instance_destroy(context, instance_id):
+def instance_destroy(context, instance_id, constraint=None):
session = get_session()
with session.begin():
if utils.is_uuid_like(instance_id):
@@ -1321,18 +1368,21 @@ def instance_destroy(context, instance_id):
else:
instance_ref = instance_get(context, instance_id,
session=session)
- session.query(models.Instance).\
- filter_by(id=instance_id).\
- update({'deleted': True,
- 'deleted_at': utils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ query = session.query(models.Instance).filter_by(id=instance_id)
+ if constraint is not None:
+ query = constraint.apply(models.Instance, query)
+ count = query.update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ if count == 0:
+ raise exception.ConstraintNotMet()
session.query(models.SecurityGroupInstanceAssociation).\
- filter_by(instance_id=instance_id).\
+ filter_by(instance_uuid=instance_ref['uuid']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
- filter_by(instance_id=instance_id).\
+ filter_by(instance_uuid=instance_ref['uuid']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -1680,7 +1730,7 @@ def _instance_update(context, instance_id, values, copy_old_instance=False):
metadata = values.get('metadata')
if metadata is not None:
instance_metadata_update(
- context, instance_ref['id'], values.pop('metadata'), delete=True)
+ context, instance_ref['uuid'], values.pop('metadata'), delete=True)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
@@ -1715,7 +1765,7 @@ def instance_remove_security_group(context, instance_uuid, security_group_id):
instance_ref = instance_get_by_uuid(context, instance_uuid,
session=session)
session.query(models.SecurityGroupInstanceAssociation).\
- filter_by(instance_id=instance_ref['id']).\
+ filter_by(instance_uuid=instance_ref['uuid']).\
filter_by(security_group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
@@ -3360,7 +3410,7 @@ def security_group_in_use(context, group_id):
for ia in inst_assoc:
num_instances = session.query(models.Instance).\
filter_by(deleted=False).\
- filter_by(id=ia.instance_id).\
+ filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
@@ -3957,16 +4007,27 @@ def instance_type_get_all(context, inactive=False, filters=None):
Returns all instance types.
"""
filters = filters or {}
+
+ # FIXME(sirp): now that we have the `disabled` field for instance-types, we
+ # should probably remove the use of `deleted` to mark inactive. `deleted`
+ # should mean truly deleted, e.g. we can safely purge the record out of the
+ # database.
read_deleted = "yes" if inactive else "no"
+
query = _instance_type_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
+
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
+ if 'disabled' in filters:
+ query = query.filter(
+ models.InstanceTypes.disabled == filters['disabled'])
+
inst_types = query.order_by("name").all()
return [_dict_with_extra_specs(i) for i in inst_types]
@@ -4081,16 +4142,16 @@ def cell_get_all(context):
########################
# User-provided metadata
-def _instance_metadata_get_query(context, instance_id, session=None):
+def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
- filter_by(instance_id=instance_id)
+ filter_by(instance_uuid=instance_uuid)
@require_context
-@require_instance_exists
-def instance_metadata_get(context, instance_id):
- rows = _instance_metadata_get_query(context, instance_id).all()
+@require_instance_exists_using_uuid
+def instance_metadata_get(context, instance_uuid):
+ rows = _instance_metadata_get_query(context, instance_uuid).all()
result = {}
for row in rows:
@@ -4100,9 +4161,9 @@ def instance_metadata_get(context, instance_id):
@require_context
-@require_instance_exists
-def instance_metadata_delete(context, instance_id, key):
- _instance_metadata_get_query(context, instance_id).\
+@require_instance_exists_using_uuid
+def instance_metadata_delete(context, instance_uuid, key):
+ _instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
@@ -4110,31 +4171,31 @@ def instance_metadata_delete(context, instance_id, key):
@require_context
-@require_instance_exists
-def instance_metadata_get_item(context, instance_id, key, session=None):
+@require_instance_exists_using_uuid
+def instance_metadata_get_item(context, instance_uuid, key, session=None):
result = _instance_metadata_get_query(
- context, instance_id, session=session).\
+ context, instance_uuid, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.InstanceMetadataNotFound(metadata_key=key,
- instance_id=instance_id)
+ instance_uuid=instance_uuid)
return result
@require_context
-@require_instance_exists
-def instance_metadata_update(context, instance_id, metadata, delete):
+@require_instance_exists_using_uuid
+def instance_metadata_update(context, instance_uuid, metadata, delete):
session = get_session()
# Set existing metadata to deleted if delete argument is True
if delete:
- original_metadata = instance_metadata_get(context, instance_id)
+ original_metadata = instance_metadata_get(context, instance_uuid)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
- meta_ref = instance_metadata_get_item(context, instance_id,
+ meta_ref = instance_metadata_get_item(context, instance_uuid,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
@@ -4148,11 +4209,11 @@ def instance_metadata_update(context, instance_id, metadata, delete):
item = {"value": meta_value}
try:
- meta_ref = instance_metadata_get_item(context, instance_id,
+ meta_ref = instance_metadata_get_item(context, instance_uuid,
meta_key, session)
except exception.InstanceMetadataNotFound, e:
meta_ref = models.InstanceMetadata()
- item.update({"key": meta_key, "instance_id": instance_id})
+ item.update({"key": meta_key, "instance_uuid": instance_uuid})
meta_ref.update(item)
meta_ref.save(session=session)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py
new file mode 100644
index 000000000..166ca98cb
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py
@@ -0,0 +1,40 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, MetaData, Table
+
+from nova import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_types = Table('instance_types', meta, autoload=True)
+ disabled = Column('disabled', Boolean)
+
+ instance_types.create_column(disabled)
+ instance_types.update().values(disabled=False).execute()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_types = Table('instance_types', meta, autoload=True)
+ disabled = Column('disabled', Boolean)
+
+ instance_types.drop_column(disabled)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py
new file mode 100644
index 000000000..fc08bc124
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import select, Column, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
+from migrate import ForeignKeyConstraint
+
+from nova import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ instance_metadata = Table('instance_metadata', meta, autoload=True)
+ instances = Table('instances', meta, autoload=True)
+ uuid_column = Column('instance_uuid', String(36))
+ uuid_column.create(instance_metadata)
+
+ try:
+ instance_metadata.update().values(
+ instance_uuid=select(
+ [instances.c.uuid],
+ instances.c.id == instance_metadata.c.instance_id)
+ ).execute()
+ except Exception:
+ uuid_column.drop()
+ raise
+
+ fkeys = list(instance_metadata.c.instance_id.foreign_keys)
+ if fkeys:
+ try:
+ fkey_name = fkeys[0].constraint.name
+ ForeignKeyConstraint(
+ columns=[instance_metadata.c.instance_id],
+ refcolumns=[instances.c.id],
+ name=fkey_name).drop()
+ except Exception:
+ LOG.error(_("foreign key constraint couldn't be removed"))
+ raise
+
+ instance_metadata.c.instance_id.drop()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ instance_metadata = Table('instance_metadata', meta, autoload=True)
+ instances = Table('instances', meta, autoload=True)
+ id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
+ id_column.create(instance_metadata)
+
+ try:
+ instance_metadata.update().values(
+ instance_id=select(
+ [instances.c.id],
+ instances.c.uuid == instance_metadata.c.instance_uuid)
+ ).execute()
+ except Exception:
+ id_column.drop()
+ raise
+
+ instance_metadata.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql
new file mode 100644
index 000000000..97b628c6e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql
@@ -0,0 +1,64 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE instance_metadata_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ instance_id INTEGER NOT NULL,
+ instance_uuid VARCHAR(36),
+ key VARCHAR(255) NOT NULL,
+ value VARCHAR(255) NOT NULL,
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO instance_metadata_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ NULL,
+ instance_uuid,
+ key,
+ value
+ FROM instance_metadata;
+
+ UPDATE instance_metadata_backup
+ SET instance_id=
+ (SELECT id
+ FROM instances
+ WHERE instance_metadata_backup.instance_uuid = instances.uuid
+ );
+
+ DROP TABLE instance_metadata;
+
+ CREATE TABLE instance_metadata (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ instance_id INTEGER NOT NULL,
+ key VARCHAR(255) NOT NULL,
+ value VARCHAR(255) NOT NULL,
+ PRIMARY KEY (id),
+ FOREIGN KEY(instance_id) REFERENCES instances (id)
+ );
+
+ CREATE INDEX instance_metadata_instance_id_idx ON instance_metadata(instance_id);
+
+ INSERT INTO instance_metadata
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ instance_id,
+ key,
+ value
+ FROM instance_metadata_backup;
+
+ DROP TABLE instance_metadata_backup;
+
+COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql
new file mode 100644
index 000000000..0d1e1ca8b
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql
@@ -0,0 +1,64 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE instance_metadata_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ instance_id INTEGER NOT NULL,
+ instance_uuid VARCHAR(36),
+ key VARCHAR(255) NOT NULL,
+ value VARCHAR(255) NOT NULL,
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO instance_metadata_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ instance_id,
+ NULL,
+ key,
+ value
+ FROM instance_metadata;
+
+ UPDATE instance_metadata_backup
+ SET instance_uuid=
+ (SELECT uuid
+ FROM instances
+ WHERE instance_metadata_backup.instance_id = instances.id
+ );
+
+ DROP TABLE instance_metadata;
+
+ CREATE TABLE instance_metadata (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ instance_uuid VARCHAR(36) NOT NULL,
+ key VARCHAR(255) NOT NULL,
+ value VARCHAR(255) NOT NULL,
+ PRIMARY KEY (id),
+ FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
+ );
+
+ CREATE INDEX instance_metadata_instance_uuid_idx ON instance_metadata(instance_uuid);
+
+ INSERT INTO instance_metadata
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ instance_uuid,
+ key,
+ value
+ FROM instance_metadata_backup;
+
+ DROP TABLE instance_metadata_backup;
+
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
new file mode 100644
index 000000000..3a04895cc
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import select, Column, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
+from migrate import ForeignKeyConstraint
+
+from nova import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ sgia = Table('security_group_instance_association', meta, autoload=True)
+ instances = Table('instances', meta, autoload=True)
+ uuid_column = Column('instance_uuid', String(36))
+ uuid_column.create(sgia)
+
+ try:
+ sgia.update().values(
+ instance_uuid=select(
+ [instances.c.uuid],
+ instances.c.id == sgia.c.instance_id)
+ ).execute()
+ except Exception:
+ uuid_column.drop()
+ raise
+
+ fkeys = list(sgia.c.instance_id.foreign_keys)
+ if fkeys:
+ try:
+ fkey_name = fkeys[0].constraint.name
+ ForeignKeyConstraint(
+ columns=[sgia.c.instance_id],
+ refcolumns=[instances.c.id],
+ name=fkey_name).drop()
+ except Exception:
+ LOG.error(_("foreign key constraint couldn't be removed"))
+ raise
+
+ sgia.c.instance_id.drop()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ sgia = Table('security_group_instance_association', meta, autoload=True)
+ instances = Table('instances', meta, autoload=True)
+ id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
+ id_column.create(sgia)
+
+ try:
+ sgia.update().values(
+ instance_id=select(
+ [instances.c.id],
+ instances.c.uuid == sgia.c.instance_uuid)
+ ).execute()
+ except Exception:
+ id_column.drop()
+ raise
+
+ sgia.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql
new file mode 100644
index 000000000..5f13b8997
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql
@@ -0,0 +1,61 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE security_group_instance_association_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ security_group_id INTEGER NOT NULL,
+ instance_id INTEGER NOT NULL,
+ instance_uuid VARCHAR(36),
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO security_group_instance_association_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ security_group_id,
+ NULL,
+ instance_uuid
+ FROM security_group_instance_association;
+
+ UPDATE security_group_instance_association_backup
+ SET instance_id=
+ (SELECT id
+ FROM instances
+ WHERE security_group_instance_association_backup.instance_uuid = instances.uuid
+ );
+
+ DROP TABLE security_group_instance_association;
+
+ CREATE TABLE security_group_instance_association (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ security_group_id INTEGER NOT NULL,
+ instance_id INTEGER NOT NULL,
+ PRIMARY KEY (id),
+ FOREIGN KEY(instance_id) REFERENCES instances (id)
+ );
+
+ CREATE INDEX security_group_instance_association_security_group_id_idx ON security_group_instance_association(security_group_id);
+ CREATE INDEX security_group_instance_association_instance_id_idx ON security_group_instance_association(instance_id);
+
+ INSERT INTO security_group_instance_association
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ security_group_id,
+ instance_id
+ FROM security_group_instance_association_backup;
+
+ DROP TABLE security_group_instance_association_backup;
+
+COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql
new file mode 100644
index 000000000..f39d5b1fc
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql
@@ -0,0 +1,61 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE security_group_instance_association_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ security_group_id INTEGER NOT NULL,
+ instance_id INTEGER NOT NULL,
+ instance_uuid VARCHAR(36),
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO security_group_instance_association_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ security_group_id,
+ instance_id,
+ NULL
+ FROM security_group_instance_association;
+
+ UPDATE security_group_instance_association_backup
+ SET instance_uuid=
+ (SELECT uuid
+ FROM instances
+ WHERE security_group_instance_association_backup.instance_id = instances.id
+ );
+
+ DROP TABLE security_group_instance_association;
+
+ CREATE TABLE security_group_instance_association (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ security_group_id INTEGER NOT NULL,
+ instance_uuid VARCHAR(36),
+ PRIMARY KEY (id),
+ FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
+ );
+
+ CREATE INDEX security_group_instance_association_security_group_id_idx ON security_group_instance_association(security_group_id);
+ CREATE INDEX security_group_instance_association_instance_uuid_idx ON security_group_instance_association(instance_uuid);
+
+ INSERT INTO security_group_instance_association
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ security_group_id,
+ instance_uuid
+ FROM security_group_instance_association_backup;
+
+ DROP TABLE security_group_instance_association_backup;
+
+COMMIT;
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 452816bb7..2576b237c 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -313,6 +313,7 @@ class InstanceTypes(BASE, NovaBase):
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, nullable=False, default=1)
vcpu_weight = Column(Integer, nullable=True)
+ disabled = Column(Boolean, default=False)
instances = relationship(Instance,
backref=backref('instance_type', uselist=False),
@@ -563,7 +564,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
id = Column(Integer, primary_key=True)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
- instance_id = Column(Integer, ForeignKey('instances.id'))
+ instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
@@ -584,7 +585,7 @@ class SecurityGroup(BASE, NovaBase):
'SecurityGroupInstanceAssociation.deleted == False,'
'SecurityGroup.deleted == False)',
secondaryjoin='and_('
- 'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
+ 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
@@ -858,11 +859,13 @@ class InstanceMetadata(BASE, NovaBase):
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
- instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
+ nullable=False)
instance = relationship(Instance, backref="metadata",
- foreign_keys=instance_id,
+ foreign_keys=instance_uuid,
primaryjoin='and_('
- 'InstanceMetadata.instance_id == Instance.id,'
+ 'InstanceMetadata.instance_uuid == '
+ 'Instance.uuid,'
'InstanceMetadata.deleted == False)')
diff --git a/nova/exception.py b/nova/exception.py
index 3f3988d2d..bf9318fd2 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -419,6 +419,11 @@ class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid).")
+class ConstraintNotMet(NovaException):
+ message = _("Constraint not met.")
+ code = 412
+
+
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
@@ -816,7 +821,7 @@ class SchedulerWeightFlagNotFound(NotFound):
class InstanceMetadataNotFound(NotFound):
- message = _("Instance %(instance_id)s has no metadata with "
+ message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
@@ -988,8 +993,8 @@ class QuotaError(NovaException):
class TooManyInstances(QuotaError):
- message = _("Quota exceeded: already used %(used)d of %(allowed)d"
- " instances")
+ message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
+ " but already used %(used)d of %(allowed)d instances")
class VolumeSizeTooLarge(QuotaError):
diff --git a/nova/flags.py b/nova/flags.py
index 34d5fc814..25d5e0be7 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -325,7 +325,10 @@ global_opts = [
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
- 'It is not necessarily a hostname, FQDN, or IP address.'),
+ 'It is not necessarily a hostname, FQDN, or IP address. '
+ 'However, the node name must be valid within '
+ 'an AMQP key, and if using ZeroMQ, a valid '
+ 'hostname, FQDN, or IP address'),
cfg.StrOpt('node_availability_zone',
default='nova',
help='availability zone of this node'),
diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py
index 689c787fd..73a484570 100644
--- a/nova/network/quantum/manager.py
+++ b/nova/network/quantum/manager.py
@@ -28,6 +28,7 @@ from nova.network import manager
from nova.network.quantum import melange_ipam_lib
from nova.network.quantum import quantum_connection
from nova.openstack.common import cfg
+from nova import rpc
from nova import utils
LOG = logging.getLogger(__name__)
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
index 88c0ed95c..55a5718ed 100644
--- a/nova/network/quantum/nova_ipam_lib.py
+++ b/nova/network/quantum/nova_ipam_lib.py
@@ -228,10 +228,31 @@ class QuantumNovaIPAMLib(object):
# - otherwise, _disassociate_stale_fixed_ips is called periodically
# to disassociate all fixed ips that are unallocated
# but still associated with an instance-id.
+
+ read_deleted_context = admin_context.elevated(read_deleted='yes')
for fixed_ip in fixed_ips:
db.fixed_ip_update(admin_context, fixed_ip['address'],
{'allocated': False,
'virtual_interface_id': None})
+ fixed_id = fixed_ip['id']
+ floating_ips = self.net_manager.db.floating_ip_get_by_fixed_ip_id(
+ admin_context,
+ fixed_id)
+ # disassociate floating ips related to fixed_ip
+ for floating_ip in floating_ips:
+ address = floating_ip['address']
+ manager.FloatingIP.disassociate_floating_ip(
+ self.net_manager,
+ read_deleted_context,
+ address,
+ affect_auto_assigned=True)
+ # deallocate if auto_assigned
+ if floating_ip['auto_assigned']:
+ manager.FloatingIP.deallocate_floating_ip(
+ read_deleted_context,
+ address,
+ affect_auto_assigned=True)
+
if len(fixed_ips) == 0:
LOG.error(_('No fixed IPs to deallocate for vif %s'),
vif_ref['id'])
diff --git a/nova/common/policy.py b/nova/openstack/common/policy.py
index ec944a1cc..203995a3d 100644
--- a/nova/common/policy.py
+++ b/nova/openstack/common/policy.py
@@ -18,12 +18,12 @@
"""Common Policy Engine Implementation"""
import json
+import logging
import urllib
import urllib2
-class NotAuthorized(Exception):
- pass
+LOG = logging.getLogger(__name__)
_BRAIN = None
@@ -45,7 +45,8 @@ def reset():
_BRAIN = None
-def enforce(match_list, target_dict, credentials_dict):
+def enforce(match_list, target_dict, credentials_dict, exc=None,
+ *args, **kwargs):
"""Enforces authorization of some rules against credentials.
:param match_list: nested tuples of data to match against
@@ -106,14 +107,24 @@ def enforce(match_list, target_dict, credentials_dict):
Credentials dicts contain as much information as we can about the user
performing the action.
- :raises NotAuthorized: if the check fails
+ :param exc: exception to raise
+ Class of the exception to raise if the check fails. Any remaining
+ arguments passed to enforce() (both positional and keyword arguments)
+ will be passed to the exception class. If exc is not provided, returns
+ False.
+
+ :return: True if the policy allows the action
+ :return: False if the policy does not allow the action and exc is not set
"""
global _BRAIN
if not _BRAIN:
_BRAIN = Brain()
if not _BRAIN.check(match_list, target_dict, credentials_dict):
- raise NotAuthorized()
+ if exc:
+ raise exc(*args, **kwargs)
+ return False
+ return True
class Brain(object):
@@ -132,7 +143,12 @@ class Brain(object):
self.rules[key] = match
def _check(self, match, target_dict, cred_dict):
- match_kind, match_value = match.split(':', 1)
+ try:
+ match_kind, match_value = match.split(':', 1)
+ except Exception:
+ LOG.exception(_("Failed to understand rule %(match)r") % locals())
+ # If the rule is invalid, fail closed
+ return False
try:
f = getattr(self, '_check_%s' % match_kind)
except AttributeError:
diff --git a/nova/policy.py b/nova/policy.py
index e976831f3..8c501da9e 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -19,10 +19,10 @@
import os.path
-from nova.common import policy
from nova import exception
from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import policy
from nova import utils
@@ -90,7 +90,5 @@ def enforce(context, action, target):
match_list = ('rule:%s' % action,)
credentials = context.to_dict()
- try:
- policy.enforce(match_list, target, credentials)
- except policy.NotAuthorized:
- raise exception.PolicyNotAuthorized(action=action)
+ policy.enforce(match_list, target, credentials,
+ exception.PolicyNotAuthorized, action=action)
diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py
index 1980f9679..1ce43d650 100644
--- a/nova/rpc/__init__.py
+++ b/nova/rpc/__init__.py
@@ -42,6 +42,10 @@ rpc_opts = [
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
+ cfg.IntOpt('rpc_cast_timeout',
+ default=30,
+ help='Seconds to wait before a cast expires (TTL). '
+ 'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception'],
help='Modules of exceptions that are permitted to be recreated'
@@ -54,14 +58,7 @@ rpc_opts = [
help='If passed, use a fake RabbitMQ provider'),
]
-_CONF = None
-
-
-def register_opts(conf):
- global _CONF
- _CONF = conf
- _CONF.register_opts(rpc_opts)
- _get_impl().register_opts(_CONF)
+cfg.CONF.register_opts(rpc_opts)
def create_connection(new=True):
@@ -77,7 +74,7 @@ def create_connection(new=True):
:returns: An instance of nova.rpc.common.Connection
"""
- return _get_impl().create_connection(_CONF, new=new)
+ return _get_impl().create_connection(cfg.CONF, new=new)
def call(context, topic, msg, timeout=None):
@@ -99,7 +96,7 @@ def call(context, topic, msg, timeout=None):
:raises: nova.rpc.common.Timeout if a complete response is not received
before the timeout is reached.
"""
- return _get_impl().call(_CONF, context, topic, msg, timeout)
+ return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
@@ -116,7 +113,7 @@ def cast(context, topic, msg):
:returns: None
"""
- return _get_impl().cast(_CONF, context, topic, msg)
+ return _get_impl().cast(cfg.CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
@@ -136,7 +133,7 @@ def fanout_cast(context, topic, msg):
:returns: None
"""
- return _get_impl().fanout_cast(_CONF, context, topic, msg)
+ return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None):
@@ -165,7 +162,7 @@ def multicall(context, topic, msg, timeout=None):
:raises: nova.rpc.common.Timeout if a complete response is not received
before the timeout is reached.
"""
- return _get_impl().multicall(_CONF, context, topic, msg, timeout)
+ return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
def notify(context, topic, msg):
@@ -178,7 +175,7 @@ def notify(context, topic, msg):
:returns: None
"""
- return _get_impl().notify(_CONF, context, topic, msg)
+ return _get_impl().notify(cfg.CONF, context, topic, msg)
def cleanup():
@@ -206,7 +203,7 @@ def cast_to_server(context, server_params, topic, msg):
:returns: None
"""
- return _get_impl().cast_to_server(_CONF, context, server_params, topic,
+ return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
msg)
@@ -222,12 +219,25 @@ def fanout_cast_to_server(context, server_params, topic, msg):
:returns: None
"""
- return _get_impl().fanout_cast_to_server(_CONF, context, server_params,
+ return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
- """Get a queue name for a given topic + host."""
+ """Get a queue name for a given topic + host.
+
+ This function only works if this naming convention is followed on the
+ consumer side, as well. For example, in nova, every instance of the
+ nova-foo service calls create_consumer() for two topics:
+
+ foo
+ foo.<host>
+
+ Messages sent to the 'foo' topic are distributed to exactly one instance of
+ the nova-foo service. The services are chosen in a round-robin fashion.
+ Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
+ <host>.
+ """
return '%s.%s' % (topic, host)
@@ -238,5 +248,5 @@ def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
- _RPCIMPL = importutils.import_module(_CONF.rpc_backend)
+ _RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
return _RPCIMPL
diff --git a/nova/rpc/impl_fake.py b/nova/rpc/impl_fake.py
index 24ef0e7c1..22bf04939 100644
--- a/nova/rpc/impl_fake.py
+++ b/nova/rpc/impl_fake.py
@@ -182,7 +182,3 @@ def fanout_cast(conf, context, topic, msg):
consumer.call(context, version, method, args, None)
except Exception:
pass
-
-
-def register_opts(conf):
- pass
diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py
index b9fb081bd..a9d887323 100644
--- a/nova/rpc/impl_kombu.py
+++ b/nova/rpc/impl_kombu.py
@@ -82,6 +82,8 @@ kombu_opts = [
]
+cfg.CONF.register_opts(kombu_opts)
+
LOG = rpc_common.LOG
@@ -137,9 +139,10 @@ class ConsumerBase(object):
message = self.channel.message_to_python(raw_message)
try:
callback(message.payload)
- message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
+ finally:
+ message.ack()
self.queue.consume(*args, callback=_callback, **options)
@@ -754,7 +757,3 @@ def notify(conf, context, topic, msg):
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
-
-
-def register_opts(conf):
- conf.register_opts(kombu_opts)
diff --git a/nova/rpc/impl_qpid.py b/nova/rpc/impl_qpid.py
index 388b99542..ed850339e 100644
--- a/nova/rpc/impl_qpid.py
+++ b/nova/rpc/impl_qpid.py
@@ -78,6 +78,8 @@ qpid_opts = [
help='Disable Nagle algorithm'),
]
+cfg.CONF.register_opts(qpid_opts)
+
class ConsumerBase(object):
"""Consumer base class."""
@@ -575,7 +577,3 @@ def notify(conf, context, topic, msg):
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
-
-
-def register_opts(conf):
- conf.register_opts(qpid_opts)
diff --git a/nova/rpc/impl_zmq.py b/nova/rpc/impl_zmq.py
new file mode 100644
index 000000000..f540e75c4
--- /dev/null
+++ b/nova/rpc/impl_zmq.py
@@ -0,0 +1,713 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Cloudscaling Group, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import pprint
+import string
+import sys
+import types
+import uuid
+
+import eventlet
+from eventlet.green import zmq
+import greenlet
+
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.rpc import common as rpc_common
+
+
+# for convenience, are not modified.
+pformat = pprint.pformat
+Timeout = eventlet.timeout.Timeout
+LOG = rpc_common.LOG
+RemoteError = rpc_common.RemoteError
+RPCException = rpc_common.RPCException
+
+zmq_opts = [
+ cfg.StrOpt('rpc_zmq_bind_address', default='*',
+ help='ZeroMQ bind address. Should be a wildcard (*), '
+ 'an ethernet interface, or IP. '
+ 'The "host" option should point or resolve to this address.'),
+
+ # The module.Class to use for matchmaking.
+ cfg.StrOpt('rpc_zmq_matchmaker',
+ default='nova.rpc.matchmaker.MatchMakerLocalhost',
+ help='MatchMaker driver'),
+
+ # The following port is unassigned by IANA as of 2012-05-21
+ cfg.IntOpt('rpc_zmq_port', default=9501,
+ help='ZeroMQ receiver listening port'),
+
+ cfg.IntOpt('rpc_zmq_contexts', default=1,
+ help='Number of ZeroMQ contexts, defaults to 1'),
+
+ cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/nova',
+ help='Directory for holding IPC sockets'),
+ ]
+
+
+# These globals are defined in register_opts(conf),
+# a mandatory initialization call
+FLAGS = None
+ZMQ_CTX = None # ZeroMQ Context, must be global.
+matchmaker = None # memoized matchmaker object
+
+
+def _serialize(data):
+ """
+ Serialization wrapper
+ We prefer using JSON, but it cannot encode all types.
+ Error if a developer passes us bad data.
+ """
+ try:
+ return str(json.dumps(data, ensure_ascii=True))
+ except TypeError:
+ LOG.error(_("JSON serialization failed."))
+ raise
+
+
+def _deserialize(data):
+ """
+ Deserialization wrapper
+ """
+ LOG.debug(_("Deserializing: %s"), data)
+ return json.loads(data)
+
+
+class ZmqSocket(object):
+ """
+ A tiny wrapper around ZeroMQ to simplify the send/recv protocol
+ and connection management.
+
+ Can be used as a Context (supports the 'with' statement).
+ """
+
+ def __init__(self, addr, zmq_type, bind=True, subscribe=None):
+ self.sock = ZMQ_CTX.socket(zmq_type)
+ self.addr = addr
+ self.type = zmq_type
+ self.subscriptions = []
+
+ # Support failures on sending/receiving on wrong socket type.
+ self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
+ self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
+ self.can_sub = zmq_type in (zmq.SUB, )
+
+ # Support list, str, & None for subscribe arg (cast to list)
+ do_sub = {
+ list: subscribe,
+ str: [subscribe],
+ type(None): []
+ }[type(subscribe)]
+
+ for f in do_sub:
+ self.subscribe(f)
+
+ LOG.debug(_("Connecting to %{addr}s with %{type}s"
+ "\n-> Subscribed to %{subscribe}s"
+ "\n-> bind: %{bind}s"),
+ {'addr': addr, 'type': self.socket_s(),
+ 'subscribe': subscribe, 'bind': bind})
+
+ try:
+ if bind:
+ self.sock.bind(addr)
+ else:
+ self.sock.connect(addr)
+ except Exception:
+ raise RPCException(_("Could not open socket."))
+
+ def socket_s(self):
+ """Get socket type as string."""
+ t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
+ 'DEALER')
+ return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
+
+ def subscribe(self, msg_filter):
+ """Subscribe."""
+ if not self.can_sub:
+ raise RPCException("Cannot subscribe on this socket.")
+ LOG.debug(_("Subscribing to %s"), msg_filter)
+
+ try:
+ self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
+ except Exception:
+ return
+
+ self.subscriptions.append(msg_filter)
+
+ def unsubscribe(self, msg_filter):
+ """Unsubscribe."""
+ if msg_filter not in self.subscriptions:
+ return
+ self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
+ self.subscriptions.remove(msg_filter)
+
+ def close(self):
+ if self.sock is None or self.sock.closed:
+ return
+
+ # We must unsubscribe, or we'll leak descriptors.
+ if len(self.subscriptions) > 0:
+ for f in self.subscriptions:
+ try:
+ self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
+ except Exception:
+ pass
+ self.subscriptions = []
+
+ # Linger -1 prevents lost/dropped messages
+ try:
+ self.sock.close(linger=-1)
+ except Exception:
+ pass
+ self.sock = None
+
+ def recv(self):
+ if not self.can_recv:
+ raise RPCException(_("You cannot recv on this socket."))
+ return self.sock.recv_multipart()
+
+ def send(self, data):
+ if not self.can_send:
+ raise RPCException(_("You cannot send on this socket."))
+ self.sock.send_multipart(data)
+
+
+class ZmqClient(object):
+ """Client for ZMQ sockets."""
+
+ def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
+ self.outq = ZmqSocket(addr, socket_type, bind=bind)
+
+ def cast(self, msg_id, topic, data):
+ self.outq.send([str(msg_id), str(topic), str('cast'),
+ _serialize(data)])
+
+ def close(self):
+ self.outq.close()
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+ """Context that supports replying to a rpc.call."""
+ def __init__(self, **kwargs):
+ self.replies = []
+ super(RpcContext, self).__init__(**kwargs)
+
+ def deepcopy(self):
+ values = self.to_dict()
+ values['replies'] = self.replies
+ return self.__class__(**values)
+
+ def reply(self, reply=None, failure=None, ending=False):
+ if ending:
+ return
+ self.replies.append(reply)
+
+ @classmethod
+ def marshal(self, ctx):
+ ctx_data = ctx.to_dict()
+ return _serialize(ctx_data)
+
+ @classmethod
+ def unmarshal(self, data):
+ return RpcContext.from_dict(_deserialize(data))
+
+
+class InternalContext(object):
+ """Used by ConsumerBase as a private context for - methods."""
+
+ def __init__(self, proxy):
+ self.proxy = proxy
+ self.msg_waiter = None
+
+ def _get_response(self, ctx, proxy, topic, data):
+ """Process a curried message and cast the result to topic."""
+ LOG.debug(_("Running func with context: %s"), ctx.to_dict())
+ data.setdefault('version', None)
+ data.setdefault('args', [])
+
+ try:
+ result = proxy.dispatch(
+ ctx, data['version'], data['method'], **data['args'])
+ return ConsumerBase.normalize_reply(result, ctx.replies)
+ except greenlet.GreenletExit:
+ # ignore these since they are just from shutdowns
+ pass
+ except Exception:
+ return {'exc':
+ rpc_common.serialize_remote_exception(sys.exc_info())}
+
+ def reply(self, ctx, proxy,
+ msg_id=None, context=None, topic=None, msg=None):
+ """Reply to a casted call."""
+ # Our real method is curried into msg['args']
+
+ child_ctx = RpcContext.unmarshal(msg[0])
+ response = ConsumerBase.normalize_reply(
+ self._get_response(child_ctx, proxy, topic, msg[1]),
+ ctx.replies)
+
+ LOG.debug(_("Sending reply"))
+ cast(FLAGS, ctx, topic, {
+ 'method': '-process_reply',
+ 'args': {
+ 'msg_id': msg_id,
+ 'response': response
+ }
+ })
+
+
+class ConsumerBase(object):
+ """Base Consumer."""
+
+ def __init__(self):
+ self.private_ctx = InternalContext(None)
+
+ @classmethod
+ def normalize_reply(self, result, replies):
+ #TODO(ewindisch): re-evaluate and document this method.
+ if isinstance(result, types.GeneratorType):
+ return list(result)
+ elif replies:
+ return replies
+ else:
+ return [result]
+
+ def process(self, style, target, proxy, ctx, data):
+ # Method starting with - are
+ # processed internally. (non-valid method name)
+ method = data['method']
+
+ # Internal method
+ # uses internal context for safety.
+ if data['method'][0] == '-':
+ # For reply / process_reply
+ method = method[1:]
+ if method == 'reply':
+ self.private_ctx.reply(ctx, proxy, **data['args'])
+ return
+
+ data.setdefault('version', None)
+ data.setdefault('args', [])
+ proxy.dispatch(ctx, data['version'],
+ data['method'], **data['args'])
+
+
+class ZmqBaseReactor(ConsumerBase):
+ """
+ A consumer class implementing a
+ centralized casting broker (PULL-PUSH)
+ for RoundRobin requests.
+ """
+
+ def __init__(self, conf):
+ super(ZmqBaseReactor, self).__init__()
+
+ self.conf = conf
+ self.mapping = {}
+ self.proxies = {}
+ self.threads = []
+ self.sockets = []
+ self.subscribe = {}
+
+ self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
+
+ def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
+ zmq_type_out=None, in_bind=True, out_bind=True,
+ subscribe=None):
+
+ LOG.info(_("Registering reactor"))
+
+ if zmq_type_in not in (zmq.PULL, zmq.SUB):
+ raise RPCException("Bad input socktype")
+
+ # Items push in.
+ inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
+ subscribe=subscribe)
+
+ self.proxies[inq] = proxy
+ self.sockets.append(inq)
+
+ LOG.info(_("In reactor registered"))
+
+ if not out_addr:
+ return
+
+ if zmq_type_out not in (zmq.PUSH, zmq.PUB):
+ raise RPCException("Bad output socktype")
+
+ # Items push out.
+ outq = ZmqSocket(out_addr, zmq_type_out,
+ bind=out_bind)
+
+ self.mapping[inq] = outq
+ self.mapping[outq] = inq
+ self.sockets.append(outq)
+
+ LOG.info(_("Out reactor registered"))
+
+ def consume_in_thread(self):
+ def _consume(sock):
+ LOG.info(_("Consuming socket"))
+ while True:
+ self.consume(sock)
+
+ for k in self.proxies.keys():
+ self.threads.append(
+ self.pool.spawn(_consume, k)
+ )
+
+ def wait(self):
+ for t in self.threads:
+ t.wait()
+
+ def close(self):
+ for s in self.sockets:
+ s.close()
+
+ for t in self.threads:
+ t.kill()
+
+
+class ZmqProxy(ZmqBaseReactor):
+ """
+ A consumer class implementing a
+ topic-based proxy, forwarding to
+ IPC sockets.
+ """
+
+ def __init__(self, conf):
+ super(ZmqProxy, self).__init__(conf)
+
+ self.topic_proxy = {}
+ ipc_dir = conf.rpc_zmq_ipc_dir
+
+ self.topic_proxy['zmq_replies'] = \
+ ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
+ zmq.PUB, bind=True)
+ self.sockets.append(self.topic_proxy['zmq_replies'])
+
+ def consume(self, sock):
+ ipc_dir = self.conf.rpc_zmq_ipc_dir
+
+ #TODO(ewindisch): use zero-copy (i.e. references, not copying)
+ data = sock.recv()
+ msg_id, topic, style, in_msg = data
+ topic = topic.split('.', 1)[0]
+
+ LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
+
+ # Handle zmq_replies magic
+ if topic.startswith('fanout~'):
+ sock_type = zmq.PUB
+ elif topic.startswith('zmq_replies'):
+ sock_type = zmq.PUB
+ inside = _deserialize(in_msg)
+ msg_id = inside[-1]['args']['msg_id']
+ response = inside[-1]['args']['response']
+ LOG.debug(_("->response->%s"), response)
+ data = [str(msg_id), _serialize(response)]
+ else:
+ sock_type = zmq.PUSH
+
+ if not topic in self.topic_proxy:
+ outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic),
+ sock_type, bind=True)
+ self.topic_proxy[topic] = outq
+ self.sockets.append(outq)
+ LOG.info(_("Created topic proxy: %s"), topic)
+
+ # It takes some time for a pub socket to open,
+ # before we can have any faith in doing a send() to it.
+ if sock_type == zmq.PUB:
+ eventlet.sleep(.5)
+
+ LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data})
+ self.topic_proxy[topic].send(data)
+ LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
+
+
+class ZmqReactor(ZmqBaseReactor):
+ """
+ A consumer class implementing a
+ consumer for messages. Can also be
+ used as a 1:1 proxy
+ """
+
+ def __init__(self, conf):
+ super(ZmqReactor, self).__init__(conf)
+
+ def consume(self, sock):
+ #TODO(ewindisch): use zero-copy (i.e. references, not copying)
+ data = sock.recv()
+ LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
+ if sock in self.mapping:
+ LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
+ 'data': data})
+ self.mapping[sock].send(data)
+ return
+
+ msg_id, topic, style, in_msg = data
+
+ ctx, request = _deserialize(in_msg)
+ ctx = RpcContext.unmarshal(ctx)
+
+ proxy = self.proxies[sock]
+
+ self.pool.spawn_n(self.process, style, topic,
+ proxy, ctx, request)
+
+
+class Connection(rpc_common.Connection):
+ """Manages connections and threads."""
+
+ def __init__(self, conf):
+ self.conf = conf
+ self.reactor = ZmqReactor(conf)
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ # Only consume on the base topic name.
+ topic = topic.split('.', 1)[0]
+
+ LOG.info(_("Create Consumer for topic (%(topic)s)") %
+ {'topic': topic})
+
+ # Subscription scenarios
+ if fanout:
+ subscribe = ('', fanout)[type(fanout) == str]
+ sock_type = zmq.SUB
+ topic = 'fanout~' + topic
+ else:
+ sock_type = zmq.PULL
+ subscribe = None
+
+ # Receive messages from (local) proxy
+ inaddr = "ipc://%s/zmq_topic_%s" % \
+ (self.conf.rpc_zmq_ipc_dir, topic)
+
+ LOG.debug(_("Consumer is a zmq.%s"),
+ ['PULL', 'SUB'][sock_type == zmq.SUB])
+
+ self.reactor.register(proxy, inaddr, sock_type,
+ subscribe=subscribe, in_bind=False)
+
+ def close(self):
+ self.reactor.close()
+
+ def wait(self):
+ self.reactor.wait()
+
+ def consume_in_thread(self):
+ self.reactor.consume_in_thread()
+
+
+def _cast(addr, context, msg_id, topic, msg, timeout=None):
+ timeout_cast = timeout or FLAGS.rpc_cast_timeout
+ payload = [RpcContext.marshal(context), msg]
+
+ with Timeout(timeout_cast, exception=rpc_common.Timeout):
+ try:
+ conn = ZmqClient(addr)
+
+ # assumes cast can't return an exception
+ conn.cast(msg_id, topic, payload)
+ except zmq.ZMQError:
+ raise RPCException("Cast failed. ZMQ Socket Exception")
+ finally:
+ if 'conn' in vars():
+ conn.close()
+
+
+def _call(addr, context, msg_id, topic, msg, timeout=None):
+ # timeout_response is how long we wait for a response
+ timeout = timeout or FLAGS.rpc_response_timeout
+
+ # The msg_id is used to track replies.
+ msg_id = str(uuid.uuid4().hex)
+
+ # Replies always come into the reply service.
+ # We require that FLAGS.host is a FQDN, IP, or resolvable hostname.
+ reply_topic = "zmq_replies.%s" % FLAGS.host
+
+ LOG.debug(_("Creating payload"))
+ # Curry the original request into a reply method.
+ mcontext = RpcContext.marshal(context)
+ payload = {
+ 'method': '-reply',
+ 'args': {
+ 'msg_id': msg_id,
+ 'context': mcontext,
+ 'topic': reply_topic,
+ 'msg': [mcontext, msg]
+ }
+ }
+
+ LOG.debug(_("Creating queue socket for reply waiter"))
+
+ # Messages arriving async.
+ # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
+ with Timeout(timeout, exception=rpc_common.Timeout):
+ try:
+ msg_waiter = ZmqSocket(
+ "ipc://%s/zmq_topic_zmq_replies" % FLAGS.rpc_zmq_ipc_dir,
+ zmq.SUB, subscribe=msg_id, bind=False
+ )
+
+ LOG.debug(_("Sending cast"))
+ _cast(addr, context, msg_id, topic, payload)
+
+ LOG.debug(_("Cast sent; Waiting reply"))
+ # Blocks until receives reply
+ msg = msg_waiter.recv()
+ LOG.debug(_("Received message: %s"), msg)
+ LOG.debug(_("Unpacking response"))
+ responses = _deserialize(msg[-1])
+ # ZMQError trumps the Timeout error.
+ except zmq.ZMQError:
+ raise RPCException("ZMQ Socket Error")
+ finally:
+ if 'msg_waiter' in vars():
+ msg_waiter.close()
+
+ # It seems we don't need to do all of the following,
+ # but perhaps it would be useful for multicall?
+ # One effect of this is that we're checking all
+ # responses for Exceptions.
+ for resp in responses:
+ if isinstance(resp, types.DictType) and 'exc' in resp:
+ raise rpc_common.deserialize_remote_exception(FLAGS, resp['exc'])
+
+ return responses[-1]
+
+
+def _multi_send(method, context, topic, msg, timeout=None):
+ """
+ Wraps the sending of messages,
+ dispatches to the matchmaker and sends
+ message to all relevant hosts.
+ """
+ conf = FLAGS
+ LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
+
+ queues = matchmaker.queues(topic)
+ LOG.debug(_("Sending message(s) to: %s"), queues)
+
+ # Don't stack if we have no matchmaker results
+ if len(queues) == 0:
+ LOG.warn(_("No matchmaker results. Not casting."))
+ # While not strictly a timeout, callers know how to handle
+ # this exception and a timeout isn't too big a lie.
+ raise rpc_common.Timeout, "No match from matchmaker."
+
+ # This supports brokerless fanout (addresses > 1)
+ for queue in queues:
+ (_topic, ip_addr) = queue
+ _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
+
+ if method.__name__ == '_cast':
+ eventlet.spawn_n(method, _addr, context,
+ _topic, _topic, msg, timeout)
+ return
+ return method(_addr, context, _topic, _topic, msg, timeout)
+
+
+def create_connection(conf, new=True):
+ return Connection(conf)
+
+
+def multicall(conf, *args, **kwargs):
+ """Multiple calls."""
+ register_opts(conf)
+ return _multi_send(_call, *args, **kwargs)
+
+
+def call(conf, *args, **kwargs):
+ """Send a message, expect a response."""
+ register_opts(conf)
+ data = _multi_send(_call, *args, **kwargs)
+ return data[-1]
+
+
+def cast(conf, *args, **kwargs):
+ """Send a message expecting no reply."""
+ register_opts(conf)
+ _multi_send(_cast, *args, **kwargs)
+
+
+def fanout_cast(conf, context, topic, msg, **kwargs):
+ """Send a message to all listening and expect no reply."""
+ register_opts(conf)
+ # NOTE(ewindisch): fanout~ is used because it avoid splitting on .
+ # and acts as a non-subtle hint to the matchmaker and ZmqProxy.
+ _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
+
+
+def notify(conf, context, topic, msg, **kwargs):
+ """
+ Send notification event.
+ Notifications are sent to topic-priority.
+ This differs from the AMQP drivers which send to topic.priority.
+ """
+ register_opts(conf)
+ # NOTE(ewindisch): dot-priority in rpc notifier does not
+ # work with our assumptions.
+ topic.replace('.', '-')
+ cast(conf, context, topic, msg, **kwargs)
+
+
+def cleanup():
+ """Clean up resources in use by implementation."""
+ global ZMQ_CTX
+ global matchmaker
+ matchmaker = None
+ ZMQ_CTX.destroy()
+ ZMQ_CTX = None
+
+
+def register_opts(conf):
+ """Registration of options for this driver."""
+ #NOTE(ewindisch): ZMQ_CTX and matchmaker
+ # are initialized here as this is as good
+ # an initialization method as any.
+
+ # We memoize through these globals
+ global ZMQ_CTX
+ global matchmaker
+ global FLAGS
+
+ if not FLAGS:
+ conf.register_opts(zmq_opts)
+ FLAGS = conf
+ # Don't re-set, if this method is called twice.
+ if not ZMQ_CTX:
+ ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)
+ if not matchmaker:
+ # rpc_zmq_matchmaker should be set to a 'module.Class'
+ mm_path = conf.rpc_zmq_matchmaker.split('.')
+ mm_module = '.'.join(mm_path[:-1])
+ mm_class = mm_path[-1]
+
+ # Only initialize a class.
+ if mm_path[-1][0] not in string.ascii_uppercase:
+ LOG.error(_("Matchmaker could not be loaded.\n"
+ "rpc_zmq_matchmaker is not a class."))
+ raise
+
+ mm_impl = importutils.import_module(mm_module)
+ mm_constructor = getattr(mm_impl, mm_class)
+ matchmaker = mm_constructor()
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index bc0d7112e..98bf5099b 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -251,9 +251,7 @@ class Scheduler(object):
"""
# Checking instance is running.
- if instance_ref['power_state'] != power_state.RUNNING and not (
- FLAGS.libvirt_type == 'xen' and
- instance_ref['power_state'] == power_state.BLOCKED):
+ if instance_ref['power_state'] != power_state.RUNNING:
raise exception.InstanceNotRunning(
instance_id=instance_ref['uuid'])
@@ -443,11 +441,7 @@ class Scheduler(object):
available = available_gb * (1024 ** 3)
# Getting necessary disk size
- topic = rpc.queue_get_for(context, FLAGS.compute_topic,
- instance_ref['host'])
- ret = rpc.call(context, topic,
- {"method": 'get_instance_disk_info',
- "args": {'instance_name': instance_ref['name']}})
+ ret = self.compute_rpcapi.get_instance_disk_info(context, instance_ref)
disk_infos = jsonutils.loads(ret)
necessary = 0
@@ -492,21 +486,17 @@ class Scheduler(object):
"""
src = instance_ref['host']
- dst_t = rpc.queue_get_for(context, FLAGS.compute_topic, dest)
- src_t = rpc.queue_get_for(context, FLAGS.compute_topic, src)
- filename = rpc.call(context, dst_t,
- {"method": 'create_shared_storage_test_file'})
+ filename = self.compute_rpcapi.create_shared_storage_test_file(context,
+ dest)
try:
# make sure existence at src host.
- ret = rpc.call(context, src_t,
- {"method": 'check_shared_storage_test_file',
- "args": {'filename': filename}})
+ ret = self.compute_rpcapi.check_shared_storage_test_file(context,
+ filename, src)
finally:
- rpc.cast(context, dst_t,
- {"method": 'cleanup_shared_storage_test_file',
- "args": {'filename': filename}})
+ self.compute_rpcapi.cleanup_shared_storage_test_file(context,
+ filename, dest)
return ret
diff --git a/nova/service.py b/nova/service.py
index b179cda6c..ab1fd339a 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -179,7 +179,6 @@ class Service(object):
LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'),
{'topic': self.topic, 'vcs_string': vcs_string})
utils.cleanup_file_locks()
- rpc.register_opts(FLAGS)
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
@@ -398,7 +397,6 @@ class WSGIService(object):
"""
utils.cleanup_file_locks()
- rpc.register_opts(FLAGS)
if self.manager:
self.manager.init_host()
self.server.start()
@@ -439,7 +437,6 @@ def serve(*servers):
def wait():
LOG.debug(_('Full set of FLAGS:'))
- rpc.register_opts(FLAGS)
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
# hide flag contents from log if contains a password
diff --git a/nova/test.py b/nova/test.py
index a9a095562..0108958b8 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -150,10 +150,6 @@ class TestCase(unittest.TestCase):
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
- if FLAGS.connection_type == 'fake':
- if hasattr(fake.FakeConnection, '_instance'):
- del fake.FakeConnection._instance
-
if FLAGS.image_service == 'nova.image.fake.FakeImageService':
nova.image.fake.FakeImageService_reset()
@@ -293,3 +289,30 @@ class TestCase(unittest.TestCase):
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
+
+ def assertNotRaises(self, exc_class, func, *args, **kwargs):
+ """Assert that a particular exception is not raised.
+
+ If exc_class is None, then we assert that *no* error is raised.
+
+ Otherwise, we assert that only a particular error wasn't raised;
+ if any different exceptions were raised, we just silently capture
+ them and return.
+ """
+ exc_msg = kwargs.pop('exc_msg', '')
+
+ if exc_class is None:
+ # Ensure no errors were raised
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ raise
+ raise AssertionError(exc_msg)
+ else:
+ # Ensure a specific error wasn't raised
+ try:
+ return func(*args, **kwargs)
+ except exc_class:
+ raise AssertionError(exc_msg)
+ except Exception:
+ pass # Any other errors are fine
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index d04681935..d1fa17185 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -60,16 +60,12 @@ def reset_db():
def setup():
import mox # Fail fast if you don't have mox. Workaround for bug 810424
- from nova import rpc # Register rpc_backend before fake_flags sets it
- FLAGS.register_opts(rpc.rpc_opts)
-
from nova import context
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
from nova.tests import fake_flags
fake_flags.set_defaults(FLAGS)
- rpc.register_opts(FLAGS)
if FLAGS.sql_connection == "sqlite://":
if migration.db_version() > migration.INIT_VERSION:
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index c9080c0e3..58cba6d15 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -90,7 +90,7 @@ def get_instances_with_cached_ips(orig_func, *args, **kwargs):
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True)
def dumb(*args, **kwargs):
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index f18c78021..ee3d5bba0 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -35,7 +35,7 @@ FLAGS = flags.FLAGS
class EC2ValidateTestCase(test.TestCase):
def setUp(self):
super(EC2ValidateTestCase, self).setUp()
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True)
def dumb(*args, **kwargs):
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
index c3c28e9e4..cc5475de1 100644
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -22,10 +22,10 @@ from lxml import etree
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
-from nova.common import policy as common_policy
from nova.compute import api
from nova import context
from nova import flags
+from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
index a94dc1cdc..a5cd2bbef 100644
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ b/nova/tests/api/openstack/compute/test_flavors.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 OpenStack LLC.
+# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -23,6 +23,8 @@ import urlparse
from nova.api.openstack.compute import flavors
from nova.api.openstack import xmlutil
import nova.compute.instance_types
+from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import test
@@ -41,13 +43,15 @@ FAKE_FLAVORS = {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
- "root_gb": '10'
+ "root_gb": '10',
+ "disabled": False,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
- "root_gb": '20'
+ "root_gb": '20',
+ "disabled": False,
},
}
@@ -109,6 +113,7 @@ class FlavorsTest(test.TestCase):
expected = {
"flavor": {
"id": "1",
+ "OS-FLV-DISABLED:disabled": False,
"name": "flavor 1",
"ram": "256",
"disk": "10",
@@ -137,6 +142,7 @@ class FlavorsTest(test.TestCase):
expected = {
"flavor": {
"id": "1",
+ "OS-FLV-DISABLED:disabled": False,
"name": "flavor 1",
"ram": "256",
"disk": "10",
@@ -307,6 +313,7 @@ class FlavorsTest(test.TestCase):
"flavors": [
{
"id": "1",
+ "OS-FLV-DISABLED:disabled": False,
"name": "flavor 1",
"ram": "256",
"disk": "10",
@@ -326,6 +333,7 @@ class FlavorsTest(test.TestCase):
},
{
"id": "2",
+ "OS-FLV-DISABLED:disabled": False,
"name": "flavor 2",
"ram": "512",
"disk": "20",
@@ -427,6 +435,7 @@ class FlavorsTest(test.TestCase):
"flavors": [
{
"id": "2",
+ "OS-FLV-DISABLED:disabled": False,
"name": "flavor 2",
"ram": "512",
"disk": "20",
@@ -702,3 +711,98 @@ class FlavorsXMLSerializationTest(test.TestCase):
xmlutil.validate_schema(root, 'flavors_index')
flavor_elems = root.findall('{0}flavor'.format(NS))
self.assertEqual(len(flavor_elems), 0)
+
+
+class DisabledFlavorsWithRealDBTest(test.TestCase):
+ """
+ Tests that disabled flavors should not be shown nor listed.
+ """
+ def setUp(self):
+ super(DisabledFlavorsWithRealDBTest, self).setUp()
+ self.controller = flavors.Controller()
+
+ # Add a new disabled type to the list of instance_types/flavors
+ self.req = fakes.HTTPRequest.blank('/v2/fake/flavors')
+ self.context = self.req.environ['nova.context']
+ self.admin_context = context.get_admin_context()
+
+ self.disabled_type = self._create_disabled_instance_type()
+ self.inst_types = db.api.instance_type_get_all(
+ self.admin_context)
+
+ def tearDown(self):
+ db.api.instance_type_destroy(
+ self.admin_context, self.disabled_type['name'])
+
+ super(DisabledFlavorsWithRealDBTest, self).tearDown()
+
+ def _create_disabled_instance_type(self):
+ inst_types = db.api.instance_type_get_all(
+ self.admin_context)
+
+ inst_type = inst_types[0]
+
+ del inst_type['id']
+ inst_type['name'] += '.disabled'
+ inst_type['flavorid'] = unicode(max(
+ [int(flavor['flavorid']) for flavor in inst_types]) + 1)
+ inst_type['disabled'] = True
+
+ disabled_type = db.api.instance_type_create(
+ self.admin_context, inst_type)
+
+ return disabled_type
+
+ def test_index_should_not_list_disabled_flavors_to_user(self):
+ self.context.is_admin = False
+
+ flavor_list = self.controller.index(self.req)['flavors']
+ api_flavorids = set(f['id'] for f in flavor_list)
+
+ db_flavorids = set(i['flavorid'] for i in self.inst_types)
+ disabled_flavorid = str(self.disabled_type['flavorid'])
+
+ self.assert_(disabled_flavorid in db_flavorids)
+ self.assertEqual(db_flavorids - set([disabled_flavorid]),
+ api_flavorids)
+
+ def test_index_should_list_disabled_flavors_to_admin(self):
+ self.context.is_admin = True
+
+ flavor_list = self.controller.index(self.req)['flavors']
+ api_flavorids = set(f['id'] for f in flavor_list)
+
+ db_flavorids = set(i['flavorid'] for i in self.inst_types)
+ disabled_flavorid = str(self.disabled_type['flavorid'])
+
+ self.assert_(disabled_flavorid in db_flavorids)
+ self.assertEqual(db_flavorids, api_flavorids)
+
+ def test_show_should_include_disabled_flavor_for_user(self):
+ """
+ Counterintuitively we should show disabled flavors to all users and not
+ just admins. The reason is that, when a user performs a server-show
+ request, we want to be able to display the pretty flavor name ('512 MB
+ Instance') and not just the flavor-id even if the flavor id has been
+ marked disabled.
+ """
+ self.context.is_admin = False
+
+ flavor = self.controller.show(
+ self.req, self.disabled_type['flavorid'])['flavor']
+
+ self.assertEqual(flavor['name'], self.disabled_type['name'])
+
+ # FIXME(sirp): the disabled field is currently namespaced so that we
+ # don't impact the Openstack API. Eventually this should probably be
+ # made a first-class attribute in the next OSAPI version.
+ self.assert_('OS-FLV-DISABLED:disabled' in flavor)
+
+ def test_show_should_include_disabled_flavor_for_admin(self):
+ self.context.is_admin = True
+
+ flavor = self.controller.show(
+ self.req, self.disabled_type['flavorid'])['flavor']
+
+ self.assertEqual(flavor['name'], self.disabled_type['name'])
+ self.assert_('OS-FLV-DISABLED:disabled' in flavor)
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index 87fcec57b..577fb84ff 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -39,8 +39,8 @@ def return_create_instance_metadata(context, server_id, metadata, delete):
def return_server_metadata(context, server_id):
- if not isinstance(server_id, int):
- msg = 'id %s must be int in return server metadata' % server_id
+ if not isinstance(server_id, str) or not len(server_id) == 36:
+ msg = 'id %s must be a uuid in return server metadata' % server_id
raise Exception(msg)
return stub_server_metadata()
@@ -70,11 +70,15 @@ def stub_max_server_metadata():
def return_server(context, server_id):
- return {'id': server_id, 'name': 'fake'}
+ return {'id': server_id,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake'}
def return_server_by_uuid(context, server_uuid):
- return {'id': 1, 'name': 'fake'}
+ return {'id': 1,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake'}
def return_server_nonexistant(context, server_id):
@@ -157,7 +161,8 @@ class ServerMetaDataTest(test.TestCase):
self.assertEqual(None, res)
def test_delete_nonexistant_server(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistant)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
@@ -226,7 +231,8 @@ class ServerMetaDataTest(test.TestCase):
req, self.uuid, body)
def test_create_nonexistant_server(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistant)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
@@ -313,7 +319,8 @@ class ServerMetaDataTest(test.TestCase):
self.assertEqual(expected, res_dict)
def test_update_item_nonexistant_server(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistant)
req = fakes.HTTPRequest.blank('/v1.1/fake/servers/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 83a8963a5..44741596b 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -1298,7 +1298,7 @@ class ServersControllerTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- def instance_destroy_mock(context, id):
+ def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
self.stubs.Set(nova.db, 'instance_destroy', instance_destroy_mock)
@@ -1313,7 +1313,7 @@ class ServersControllerTest(test.TestCase):
self.server_delete_called = False
- def instance_destroy_mock(context, id):
+ def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
self.stubs.Set(nova.db, 'instance_destroy', instance_destroy_mock)
@@ -2348,7 +2348,8 @@ class ServersControllerCreateTest(test.TestCase):
self.fail('expected quota to be exceeded')
except webob.exc.HTTPRequestEntityTooLarge as e:
self.assertEquals(e.explanation,
- _('Quota exceeded: already used 1 of 1 instances'))
+ _('Quota exceeded for instances: Requested 1, but'
+ ' already used 0 of 0 instances'))
class TestServerCreateRequestXMLDeserializer(test.TestCase):
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
index 5b9900265..74353ce3c 100644
--- a/nova/tests/baremetal/test_proxy_bare_metal.py
+++ b/nova/tests/baremetal/test_proxy_bare_metal.py
@@ -174,18 +174,14 @@ class BareMetalDomTestCase(test.TestCase):
"""Check to see that all entries in the domain list are removed
except for the one that is in the running state"""
- fake_file = StringIO.StringIO()
-
domains = [dict(node_id=1, name='i-00000001',
status=power_state.NOSTATE),
dict(node_id=2, name='i-00000002', status=power_state.RUNNING),
- dict(node_id=3, name='i-00000003', status=power_state.BLOCKED),
- dict(node_id=4, name='i-00000004', status=power_state.PAUSED),
- dict(node_id=5, name='i-00000005', status=power_state.SHUTDOWN),
- dict(node_id=6, name='i-00000006', status=power_state.SHUTOFF),
- dict(node_id=7, name='i-00000007', status=power_state.CRASHED),
- dict(node_id=8, name='i-00000008', status=power_state.SUSPENDED),
- dict(node_id=9, name='i-00000009', status=power_state.FAILED)]
+ dict(node_id=3, name='i-00000003', status=power_state.PAUSED),
+ dict(node_id=5, name='i-00000004', status=power_state.SHUTDOWN),
+ dict(node_id=7, name='i-00000005', status=power_state.CRASHED),
+ dict(node_id=8, name='i-00000006', status=power_state.SUSPENDED),
+ dict(node_id=9, name='i-00000007', status=power_state.NOSTATE)]
# Create the mock objects
self.mox.StubOutWithMock(dom, 'read_domains')
@@ -260,7 +256,7 @@ class ProxyBareMetalTestCase(test.TestCase):
self.mox.ReplayAll()
# Code under test
- conn = proxy.get_connection(True)
+ conn = proxy.ProxyConnection(True)
# TODO(mikalstill): this is not a very good fake instance
info = conn.get_info({'name': 'instance-00000001'})
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 6e9457062..6f03e9c84 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -26,7 +26,6 @@ import time
import mox
import nova
-import nova.common.policy
from nova import compute
from nova.compute import aggregate_states
from nova.compute import api as compute_api
@@ -44,6 +43,7 @@ from nova.image import fake as fake_image
from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
+from nova.openstack.common import policy as common_policy
import nova.policy
from nova import quota
from nova import rpc
@@ -106,7 +106,7 @@ class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
@@ -420,12 +420,12 @@ class ComputeTestCase(BaseTestCase):
def fake_rescue(self, context, instance_ref, network_info, image_meta):
called['rescued'] = True
- self.stubs.Set(nova.virt.fake.FakeConnection, 'rescue', fake_rescue)
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
- self.stubs.Set(nova.virt.fake.FakeConnection, 'unrescue',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = self._create_fake_instance()
@@ -593,7 +593,7 @@ class ComputeTestCase(BaseTestCase):
'num_cpu': 2,
'cpu_time': 0}
- self.stubs.Set(nova.virt.fake.FakeConnection, 'get_info',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'get_info',
fake_driver_get_info)
self.assertRaises(exception.Invalid,
@@ -613,7 +613,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_set_pass(self2, _instance, _pwd):
raise exception.NotAuthorized(_('Internal error'))
- self.stubs.Set(nova.virt.fake.FakeConnection, 'set_admin_password',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'set_admin_password',
fake_driver_set_pass)
instance = self._create_fake_instance()
@@ -647,7 +647,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(contents, "File Contents")
called['inject'] = True
- self.stubs.Set(nova.virt.fake.FakeConnection, 'inject_file',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_file',
fake_driver_inject_file)
instance = self._create_fake_instance()
@@ -664,7 +664,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_inject_network(self, instance, network_info):
called['inject'] = True
- self.stubs.Set(nova.virt.fake.FakeConnection, 'inject_network_info',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
fake_driver_inject_network)
instance = self._create_fake_instance()
@@ -681,7 +681,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_reset_network(self, instance):
called['reset'] = True
- self.stubs.Set(nova.virt.fake.FakeConnection, 'reset_network',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
fake_driver_reset_network)
instance = self._create_fake_instance()
@@ -700,7 +700,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(url, 'http://fake/url/')
self.assertEqual(md5hash, 'fakehash')
- self.stubs.Set(nova.virt.fake.FakeConnection, 'agent_update',
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'agent_update',
fake_driver_agent_update)
instance = self._create_fake_instance()
@@ -1456,9 +1456,11 @@ class ComputeTestCase(BaseTestCase):
'args': {'instance_id': inst_ref['id'],
'host': self.compute.host,
'teardown': False}})
- rpc.call(c, topic, {"method": "remove_volume_connection",
- "args": {'instance_id': inst_ref['id'],
- 'volume_id': volume_id}})
+ rpc.call(c, topic,
+ {"method": "remove_volume_connection",
+ "args": {'instance_id': inst_ref['id'],
+ 'volume_id': volume_id},
+ "version": compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None)
rpc.cast(c, topic, {"method": "rollback_live_migration_at_destination",
"args": {'instance_id': inst_ref['id']}})
@@ -2274,6 +2276,22 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['id'])
+ def test_delete_fast_if_host_not_set(self):
+ instance = self._create_fake_instance({'host': None})
+ self.compute_api.delete(self.context, instance)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ self.context, instance['uuid'])
+
+ def test_delete_handles_host_setting_race_condition(self):
+ instance, instance_uuid = self._run_instance()
+ instance['host'] = None # make it think host was never set
+ self.compute_api.delete(self.context, instance)
+
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_states.DELETING)
+
+ db.instance_destroy(self.context, instance['id'])
+
def test_delete_fail(self):
instance, instance_uuid = self._run_instance()
@@ -3945,7 +3963,7 @@ class ComputePolicyTestCase(BaseTestCase):
nova.policy.reset()
def _set_rules(self, rules):
- nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
+ common_policy.set_brain(common_policy.HttpBrain(rules))
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
@@ -3954,12 +3972,7 @@ class ComputePolicyTestCase(BaseTestCase):
nova.compute.api.check_policy(self.context, 'reboot', {})
def test_wrapped_method(self):
- instance = self._create_fake_instance()
- # Reset this to None for this policy check. If it's set, it
- # tries to do a compute_api.update() and we're not testing for
- # that here.
- instance['host'] = None
- self.compute.run_instance(self.context, instance['uuid'])
+ instance = self._create_fake_instance(params={'host': None})
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
@@ -4191,3 +4204,130 @@ class KeypairAPITestCase(BaseTestCase):
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'foo', self.pub_key)
+
+
+class DisabledInstanceTypesTestCase(BaseTestCase):
+ """
+ Some instance-types are marked 'disabled' which means that they will not
+ show up in customer-facing listings. We do, however, want those
+ instance-types to be availble for emergency migrations and for rebuilding
+ of existing instances.
+
+ One legitimate use of the 'disabled' field would be when phasing out a
+ particular instance-type. We still want customers to be able to use an
+ instance that of the old type, and we want Ops to be able perform
+ migrations against it, but we *don't* want customers building new slices
+ with ths phased-out instance-type.
+ """
+ def setUp(self):
+ super(DisabledInstanceTypesTestCase, self).setUp()
+ self.compute_api = compute.API()
+ self.inst_type = instance_types.get_default_instance_type()
+
+ def test_can_build_instance_from_visible_instance_type(self):
+ self.inst_type['disabled'] = False
+
+ self.assertNotRaises(exception.InstanceTypeNotFound,
+ self.compute_api.create, self.context, self.inst_type, None,
+ exc_msg="Visible instance-types can be built from")
+
+ def test_cannot_build_instance_from_disabled_instance_type(self):
+ self.inst_type['disabled'] = True
+ self.assertRaises(exception.InstanceTypeNotFound,
+ self.compute_api.create, self.context, self.inst_type, None)
+
+ def test_can_rebuild_instance_from_visible_instance_type(self):
+ instance = self._create_fake_instance()
+ image_href = None
+ admin_password = 'blah'
+
+ instance['instance_type']['disabled'] = True
+
+ # Assert no errors were raised
+ self.assertNotRaises(None,
+ self.compute_api.rebuild, self.context, instance, image_href,
+ admin_password,
+ exc_msg="Visible instance-types can be rebuilt from")
+
+ def test_can_rebuild_instance_from_disabled_instance_type(self):
+ """
+ A rebuild or a restore should only change the 'image',
+ not the 'instance_type'. Therefore, should be allowed even
+ when the slice is on disabled type already.
+ """
+ instance = self._create_fake_instance()
+ image_href = None
+ admin_password = 'blah'
+
+ instance['instance_type']['disabled'] = True
+
+ # Assert no errors were raised
+ self.assertNotRaises(None,
+ self.compute_api.rebuild, self.context, instance, image_href,
+ admin_password,
+ exc_msg="Disabled instance-types can be rebuilt from")
+
+ def test_can_resize_to_visible_instance_type(self):
+ instance = self._create_fake_instance()
+ orig_get_instance_type_by_flavor_id =\
+ instance_types.get_instance_type_by_flavor_id
+
+ def fake_get_instance_type_by_flavor_id(flavor_id):
+ instance_type = orig_get_instance_type_by_flavor_id(flavor_id)
+ instance_type['disabled'] = False
+ return instance_type
+
+ self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id',
+ fake_get_instance_type_by_flavor_id)
+
+ # FIXME(sirp): for legacy this raises FlavorNotFound instead of
+ # InstanceTypeNot; we should eventually make it raise
+ # InstanceTypeNotFound for consistency.
+ self.assertNotRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context, instance, '4',
+ exc_msg="Visible flavors can be resized to")
+
+ def test_cannot_resize_to_disabled_instance_type(self):
+ instance = self._create_fake_instance()
+ orig_get_instance_type_by_flavor_id = \
+ instance_types.get_instance_type_by_flavor_id
+
+ def fake_get_instance_type_by_flavor_id(flavor_id):
+ instance_type = orig_get_instance_type_by_flavor_id(flavor_id)
+ instance_type['disabled'] = True
+ return instance_type
+
+ self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id',
+ fake_get_instance_type_by_flavor_id)
+
+ # FIXME(sirp): for legacy this raises FlavorNotFound instead of
+ # InstanceTypeNot; we should eventually make it raise
+ # InstanceTypeNotFound for consistency.
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context, instance, '4')
+
+ def test_can_migrate_to_visible_instance_type(self):
+ instance = self._create_fake_instance()
+ instance['instance_type']['disabled'] = False
+
+ # FIXME(sirp): for legacy this raises FlavorNotFound instead of
+ # InstanceTypeNot; we should eventually make it raise
+ # InstanceTypeNotFound for consistency.
+ self.assertNotRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context, instance, None,
+ exc_msg="Visible flavors can be migrated to")
+
+ def test_can_migrate_to_disabled_instance_type(self):
+ """
+ We don't want to require a customers instance-type to change when ops
+ is migrating a failed server.
+ """
+ instance = self._create_fake_instance()
+ instance['instance_type']['disabled'] = True
+
+ # FIXME(sirp): for legacy this raises FlavorNotFound instead of
+ # InstanceTypeNot; we should eventually make it raise
+ # InstanceTypeNotFound for consistency.
+ self.assertNotRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context, instance, None,
+ exc_msg="Disabled flavors can be migrated to")
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 23ac55fc3..47fb10645 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -58,12 +58,13 @@ class ComputeRpcAPITestCase(test.TestCase):
instance = expected_msg['args']['instance']
del expected_msg['args']['instance']
if method in ['rollback_live_migration_at_destination',
- 'pre_live_migration',
+ 'pre_live_migration', 'remove_volume_connection',
'post_live_migration_at_destination']:
expected_msg['args']['instance_id'] = instance['id']
+ elif method == 'get_instance_disk_info':
+ expected_msg['args']['instance_name'] = instance['name']
else:
expected_msg['args']['instance_uuid'] = instance['uuid']
-
expected_msg['version'] = rpcapi.RPC_API_VERSION
cast_and_call = ['confirm_resize', 'stop_instance']
@@ -105,6 +106,14 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance, volume_id='id', mountpoint='mp')
+ def test_check_shared_storage_test_file(self):
+ self._test_compute_api('check_shared_storage_test_file', 'call',
+ filename='fn', host='host')
+
+ def test_cleanup_shared_storage_test_file(self):
+ self._test_compute_api('cleanup_shared_storage_test_file', 'cast',
+ filename='fn', host='host')
+
def test_compare_cpu(self):
self._test_compute_api('compare_cpu', 'call', cpu_info='info',
host='host')
@@ -117,6 +126,10 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('confirm_resize', 'call',
instance=self.fake_instance, migration_id='id', host='host')
+ def test_create_shared_storage_test_file(self):
+ self._test_compute_api('create_shared_storage_test_file', 'call',
+ host='host')
+
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance, volume_id='id')
@@ -145,6 +158,10 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance)
+ def test_get_instance_disk_info(self):
+ self._test_compute_api('get_instance_disk_info', 'call',
+ instance=self.fake_instance)
+
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance, console_type='type')
@@ -221,6 +238,10 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance, address='addr')
+ def test_remove_volume_connection(self):
+ self._test_compute_api('remove_volume_connection', 'call',
+ instance=self.fake_instance, volume_id='id', host='host')
+
def test_rescue_instance(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance, rescue_password='pw')
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index f25a9be45..2f5bc2a3b 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -33,6 +33,7 @@ flags.DECLARE('volume_driver', 'nova.volume.manager')
def set_defaults(conf):
conf.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini')
conf.set_default('auth_driver', 'nova.auth.dbdriver.DbDriver')
+ conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
conf.set_default('connection_type', 'fake')
conf.set_default('fake_network', True)
conf.set_default('fake_rabbit', True)
@@ -47,3 +48,6 @@ def set_defaults(conf):
conf.set_default('use_ipv6', True)
conf.set_default('verbose', True)
conf.set_default('volume_driver', 'nova.volume.driver.FakeISCSIDriver')
+ conf.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini')
+ conf.set_default('rpc_response_timeout', 5)
+ conf.set_default('rpc_cast_timeout', 5)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 2d59c49e3..dccc879a7 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -56,7 +56,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def throw_error(*_):
raise Exception()
- self.stubs.Set(nova.virt.fake.FakeConnection, 'spawn', throw_error)
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
diff --git a/nova/tests/rpc/test_kombu.py b/nova/tests/rpc/test_kombu.py
index a69dcdfe9..0055f253c 100644
--- a/nova/tests/rpc/test_kombu.py
+++ b/nova/tests/rpc/test_kombu.py
@@ -61,7 +61,6 @@ class RpcKombuTestCase(common.BaseRpcAMQPTestCase):
def setUp(self):
if kombu:
self.rpc = impl_kombu
- impl_kombu.register_opts(FLAGS)
else:
self.rpc = None
super(RpcKombuTestCase, self).setUp()
diff --git a/nova/tests/rpc/test_kombu_ssl.py b/nova/tests/rpc/test_kombu_ssl.py
index e17521981..f74d38492 100644
--- a/nova/tests/rpc/test_kombu_ssl.py
+++ b/nova/tests/rpc/test_kombu_ssl.py
@@ -44,7 +44,6 @@ class RpcKombuSslTestCase(test.TestCase):
def setUp(self):
super(RpcKombuSslTestCase, self).setUp()
if kombu:
- impl_kombu.register_opts(FLAGS)
self.flags(kombu_ssl_keyfile=SSL_KEYFILE,
kombu_ssl_ca_certs=SSL_CA_CERT,
kombu_ssl_certfile=SSL_CERT,
diff --git a/nova/tests/rpc/test_qpid.py b/nova/tests/rpc/test_qpid.py
index c523f3fe3..1b21158f8 100644
--- a/nova/tests/rpc/test_qpid.py
+++ b/nova/tests/rpc/test_qpid.py
@@ -66,7 +66,6 @@ class RpcQpidTestCase(test.TestCase):
self.mock_receiver = None
if qpid:
- impl_qpid.register_opts(FLAGS)
self.orig_connection = qpid.messaging.Connection
self.orig_session = qpid.messaging.Session
self.orig_sender = qpid.messaging.Sender
diff --git a/nova/tests/rpc/test_zmq.py b/nova/tests/rpc/test_zmq.py
new file mode 100644
index 000000000..27ae4e6fb
--- /dev/null
+++ b/nova/tests/rpc/test_zmq.py
@@ -0,0 +1,128 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudscaling Group, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using zeromq
+"""
+
+import os
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import test
+from nova.tests.rpc import common
+from nova import utils
+
+try:
+ from eventlet.green import zmq
+ from nova.rpc import impl_zmq
+except ImportError:
+ zmq = None
+ impl_zmq = None
+
+LOG = logging.getLogger(__name__)
+FLAGS = flags.FLAGS
+
+
+class _RpcZmqBaseTestCase(common.BaseRpcTestCase):
+ @test.skip_if(zmq is None, "Test requires zmq")
+ def setUp(self, topic='test', topic_nested='nested'):
+ if not impl_zmq:
+ return None
+
+ self.reactor = None
+ FLAGS.register_opts(rpc.rpc_opts)
+ self.rpc = impl_zmq
+ self.rpc.register_opts(FLAGS)
+ FLAGS.set_default('rpc_zmq_matchmaker',
+ 'mod_matchmaker.MatchMakerLocalhost')
+
+ # We'll change this if we detect no daemon running.
+ ipc_dir = FLAGS.rpc_zmq_ipc_dir
+
+ # Only launch the router if it isn't running independently.
+ if not os.path.exists(os.path.join(ipc_dir, "zmq_topic_zmq_replies")):
+ LOG.info(_("Running internal zmq receiver."))
+ # The normal ipc_dir default needs to run as root,
+ # /tmp is easier within a testing environment.
+ FLAGS.set_default('rpc_zmq_ipc_dir', '/tmp/nova-zmq.ipc.test')
+
+ # Value has changed.
+ ipc_dir = FLAGS.rpc_zmq_ipc_dir
+
+ try:
+ # Only launch the receiver if it isn't running independently.
+ # This is checked again, with the (possibly) new ipc_dir.
+ if os.path.exists(os.path.join(ipc_dir, "zmq_topic_zmq_replies")):
+ LOG.warning(_("Detected zmq-receiver socket. "
+ "Assuming nova-rpc-zmq-receiver is running."))
+ return
+
+ if not os.path.isdir(ipc_dir):
+ os.mkdir(ipc_dir)
+
+ self.reactor = impl_zmq.ZmqProxy(FLAGS)
+ consume_in = "tcp://%s:%s" % \
+ (FLAGS.rpc_zmq_bind_address,
+ FLAGS.rpc_zmq_port)
+ consumption_proxy = impl_zmq.InternalContext(None)
+
+ self.reactor.register(consumption_proxy,
+ consume_in, zmq.PULL, out_bind=True)
+ self.reactor.consume_in_thread()
+ except zmq.ZMQError:
+ assert False, _("Could not create ZeroMQ receiver daemon. "
+ "Socket may already be in use.")
+ except exception.ProcessExecutionError:
+ assert False, _("Could not create IPC directory %s") % \
+ (ipc_dir, )
+ finally:
+ super(_RpcZmqBaseTestCase, self).setUp(
+ topic=topic, topic_nested=topic_nested)
+
+ def tearDown(self):
+ if not impl_zmq:
+ return None
+ if self.reactor:
+ self.reactor.close()
+
+ try:
+ utils.execute('rm', '-rf', FLAGS.rpc_zmq_ipc_dir)
+ except exception.ProcessExecutionError:
+ pass
+
+ super(_RpcZmqBaseTestCase, self).tearDown()
+
+
+class RpcZmqBaseTopicTestCase(_RpcZmqBaseTestCase):
+ """
+ This tests with topics such as 'test' and 'nested',
+ without any .host appended. Stresses the matchmaker.
+ """
+ pass
+
+
+class RpcZmqDirectTopicTestCase(_RpcZmqBaseTestCase):
+ """
+ Test communication directly to a host,
+ tests use 'localhost'.
+ """
+ def setUp(self):
+ super(RpcZmqDirectTopicTestCase, self).setUp(
+ topic='test.localhost',
+ topic_nested='nested.localhost')
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 2fb432f52..3a2205aea 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -468,6 +468,29 @@ class SchedulerTestCase(test.TestCase):
block_migration=block_migration,
disk_over_commit=disk_over_commit)
+ def _check_shared_storage(self, dest, instance, check_result):
+ tmp_filename = 'test-filename'
+ rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file',
+ 'args': {},
+ 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
+ ).AndReturn(tmp_filename)
+ rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename},
+ 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION}, None
+ ).AndReturn(check_result)
+ rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ rpc.cast(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename},
+ 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION})
+
def test_live_migration_all_checks_pass(self):
"""Test live migration when all checks pass."""
@@ -508,24 +531,14 @@ class SchedulerTestCase(test.TestCase):
instance['host']).AndReturn('src_queue1')
rpc.call(self.context, 'src_queue1',
{'method': 'get_instance_disk_info',
- 'args': {'instance_name': instance['name']}}).AndReturn(
+ 'args': {'instance_name': instance['name']},
+ 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION},
+ None).AndReturn(
json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
- # Common checks (shared storage ok, same hypervisor,e tc)
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- dest).AndReturn('dest_queue')
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- instance['host']).AndReturn('src_queue')
- tmp_filename = 'test-filename'
- rpc.call(self.context, 'dest_queue',
- {'method': 'create_shared_storage_test_file'}
- ).AndReturn(tmp_filename)
- rpc.call(self.context, 'src_queue',
- {'method': 'check_shared_storage_test_file',
- 'args': {'filename': tmp_filename}}).AndReturn(False)
- rpc.cast(self.context, 'dest_queue',
- {'method': 'cleanup_shared_storage_test_file',
- 'args': {'filename': tmp_filename}})
+ # Common checks (shared storage ok, same hypervisor, etc)
+ self._check_shared_storage(dest, instance, False)
+
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
@@ -723,7 +736,9 @@ class SchedulerTestCase(test.TestCase):
instance['host']).AndReturn('src_queue')
rpc.call(self.context, 'src_queue',
{'method': 'get_instance_disk_info',
- 'args': {'instance_name': instance['name']}}).AndReturn(
+ 'args': {'instance_name': instance['name']},
+ 'version': compute_rpcapi.ComputeAPI.RPC_API_VERSION},
+ None).AndReturn(
json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
self.mox.ReplayAll()
@@ -753,20 +768,7 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- dest).AndReturn('dest_queue')
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- instance['host']).AndReturn('src_queue')
- tmp_filename = 'test-filename'
- rpc.call(self.context, 'dest_queue',
- {'method': 'create_shared_storage_test_file'}
- ).AndReturn(tmp_filename)
- rpc.call(self.context, 'src_queue',
- {'method': 'check_shared_storage_test_file',
- 'args': {'filename': tmp_filename}}).AndReturn(False)
- rpc.cast(self.context, 'dest_queue',
- {'method': 'cleanup_shared_storage_test_file',
- 'args': {'filename': tmp_filename}})
+ self._check_shared_storage(dest, instance, False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
@@ -795,20 +797,7 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- dest).AndReturn('dest_queue')
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- instance['host']).AndReturn('src_queue')
- tmp_filename = 'test-filename'
- rpc.call(self.context, 'dest_queue',
- {'method': 'create_shared_storage_test_file'}
- ).AndReturn(tmp_filename)
- rpc.call(self.context, 'src_queue',
- {'method': 'check_shared_storage_test_file',
- 'args': {'filename': tmp_filename}}).AndReturn(False)
- rpc.cast(self.context, 'dest_queue',
- {'method': 'cleanup_shared_storage_test_file',
- 'args': {'filename': tmp_filename}})
+ self._check_shared_storage(dest, instance, False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
@@ -836,20 +825,8 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- dest).AndReturn('dest_queue')
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- instance['host']).AndReturn('src_queue')
- tmp_filename = 'test-filename'
- rpc.call(self.context, 'dest_queue',
- {'method': 'create_shared_storage_test_file'}
- ).AndReturn(tmp_filename)
- rpc.call(self.context, 'src_queue',
- {'method': 'check_shared_storage_test_file',
- 'args': {'filename': tmp_filename}}).AndReturn(True)
- rpc.cast(self.context, 'dest_queue',
- {'method': 'cleanup_shared_storage_test_file',
- 'args': {'filename': tmp_filename}})
+ self._check_shared_storage(dest, instance, True)
+
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
@@ -885,20 +862,8 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- dest).AndReturn('dest_queue')
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- instance['host']).AndReturn('src_queue')
- tmp_filename = 'test-filename'
- rpc.call(self.context, 'dest_queue',
- {'method': 'create_shared_storage_test_file'}
- ).AndReturn(tmp_filename)
- rpc.call(self.context, 'src_queue',
- {'method': 'check_shared_storage_test_file',
- 'args': {'filename': tmp_filename}}).AndReturn(True)
- rpc.cast(self.context, 'dest_queue',
- {'method': 'cleanup_shared_storage_test_file',
- 'args': {'filename': tmp_filename}})
+ self._check_shared_storage(dest, instance, True)
+
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
@@ -933,20 +898,8 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- dest).AndReturn('dest_queue')
- rpc.queue_get_for(self.context, FLAGS.compute_topic,
- instance['host']).AndReturn('src_queue')
- tmp_filename = 'test-filename'
- rpc.call(self.context, 'dest_queue',
- {'method': 'create_shared_storage_test_file'}
- ).AndReturn(tmp_filename)
- rpc.call(self.context, 'src_queue',
- {'method': 'check_shared_storage_test_file',
- 'args': {'filename': tmp_filename}}).AndReturn(True)
- rpc.cast(self.context, 'dest_queue',
- {'method': 'cleanup_shared_storage_test_file',
- 'args': {'filename': tmp_filename}})
+ self._check_shared_storage(dest, instance, True)
+
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index eaf0d4815..afee021cd 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -91,7 +91,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
def setUp(self):
super(_AuthManagerBaseTestCase, self).setUp()
self.flags(auth_driver=self.auth_driver,
- connection_type='fake')
+ compute_driver='nova.virt.fake.FakeDriver')
self.manager = manager.AuthManager(new=True)
self.manager.mc.cache = {}
diff --git a/nova/tests/test_compute_utils.py b/nova/tests/test_compute_utils.py
index cc069c9d5..5fbf2e80c 100644
--- a/nova/tests/test_compute_utils.py
+++ b/nova/tests/test_compute_utils.py
@@ -48,7 +48,7 @@ class UsageInfoTestCase(test.TestCase):
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
fake_get_nw_info)
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 8ce2ab6ee..292288c65 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -181,7 +181,7 @@ class DbApiTestCase(test.TestCase):
# Retrieve the user-provided metadata to ensure it was successfully
# updated
- instance_meta = db.instance_metadata_get(ctxt, instance.id)
+ instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
self.assertEqual('bar', instance_meta['host'])
# Retrieve the system metadata to ensure it was successfully updated
@@ -204,7 +204,7 @@ class DbApiTestCase(test.TestCase):
# Retrieve the user-provided metadata to ensure it was successfully
# updated
- instance_meta = db.instance_metadata_get(ctxt, instance.id)
+ instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
self.assertEqual('bar', instance_meta['host'])
# Retrieve the system metadata to ensure it was successfully updated
@@ -840,3 +840,40 @@ class TestIpAllocation(test.TestCase):
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip.instance_id, self.instance.id)
self.assertEqual(fixed_ip.network_id, self.network.id)
+
+
+class InstanceDestroyConstraints(test.TestCase):
+
+ def test_destroy_with_equal_any_constraint_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.equal_any('deleting'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_equal_any_constraint_not_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'vm_state': 'resize'})
+ constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
+ self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
+ ctx, instance['uuid'], constraint)
+ instance = db.instance_get_by_uuid(ctx, instance['uuid'])
+ self.assertFalse(instance['deleted'])
+
+ def test_destroy_with_not_equal_constraint_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_not_equal_constraint_not_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'vm_state': 'active'})
+ constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
+ self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
+ ctx, instance['uuid'], constraint)
+ instance = db.instance_get_by_uuid(ctx, instance['uuid'])
+ self.assertFalse(instance['deleted'])
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 9e6dedfe0..958a00874 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -56,6 +56,9 @@ try:
import libvirt
connection.libvirt = libvirt
except ImportError:
+ # TODO(sdague): there should be a cleaner way to handle this
+ # in the case where libvirt python isn't installed
+ connection.libvirt = ""
libvirt = None
@@ -128,7 +131,7 @@ class LibvirtVolumeTestCase(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
- class FakeLibvirtConnection(object):
+ class FakeLibvirtDriver(object):
def __init__(self, hyperv="QEMU"):
self.hyperv = hyperv
@@ -138,7 +141,7 @@ class LibvirtVolumeTestCase(test.TestCase):
def get_all_block_devices(self):
return []
- self.fake_conn = FakeLibvirtConnection()
+ self.fake_conn = FakeLibvirtDriver()
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
@@ -348,7 +351,7 @@ class CacheConcurrencyTestCase(test.TestCase):
def test_same_fname_concurrency(self):
"""Ensures that the same fname cache runs at a sequentially"""
- conn = connection.LibvirtConnection
+ conn = connection.LibvirtDriver
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
@@ -369,7 +372,7 @@ class CacheConcurrencyTestCase(test.TestCase):
def test_different_fname_concurrency(self):
"""Ensures that two different fname caches are concurrent"""
- conn = connection.LibvirtConnection
+ conn = connection.LibvirtDriver
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
@@ -439,17 +442,17 @@ class LibvirtConnTestCase(test.TestCase):
'instance_type_id': '5'} # m1.small
def create_fake_libvirt_mock(self, **kwargs):
- """Defining mocks for LibvirtConnection(libvirt is not used)."""
+ """Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
- class FakeLibvirtConnection(object):
+ class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = 'iscsi=nova.tests.test_libvirt.FakeVolumeDriver'
self.flags(libvirt_volume_drivers=[volume_driver])
- fake = FakeLibvirtConnection()
+ fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
@@ -457,8 +460,8 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(image_service='nova.image.fake.FakeImageService')
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn = fake
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
@@ -482,7 +485,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(my_ip=ip)
self.flags(host=host)
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
expected = {
'ip': ip,
'initiator': initiator,
@@ -495,7 +498,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertDictMatch(expected, result)
def test_get_guest_config(self):
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -524,7 +527,7 @@ class LibvirtConnTestCase(test.TestCase):
config.LibvirtConfigGuestGraphics)
def test_get_guest_config_with_two_nics(self):
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -556,7 +559,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, False,
@@ -631,12 +634,12 @@ class LibvirtConnTestCase(test.TestCase):
"cdrom", "ide")
def test_list_instances(self):
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByID = self.fake_lookup
- connection.LibvirtConnection._conn.listDomainsID = lambda: [0, 1]
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByID = self.fake_lookup
+ connection.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
@@ -683,12 +686,12 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup(id):
return FakeVirtDomain(xml[id])
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.listDomainsID = lambda: range(4)
- connection.LibvirtConnection._conn.lookupByID = fake_lookup
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.listDomainsID = lambda: range(4)
+ connection.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
@@ -742,13 +745,13 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_name(name):
return FakeVirtDomain(xml[1])
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.listDomainsID = lambda: range(4)
- connection.LibvirtConnection._conn.lookupByID = fake_lookup
- connection.LibvirtConnection._conn.lookupByName = fake_lookup_name
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.listDomainsID = lambda: range(4)
+ connection.LibvirtDriver._conn.lookupByID = fake_lookup
+ connection.LibvirtDriver._conn.lookupByName = fake_lookup_name
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
@@ -774,14 +777,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -808,14 +811,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -843,14 +846,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -882,14 +885,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -916,14 +919,14 @@ class LibvirtConnTestCase(test.TestCase):
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -933,9 +936,9 @@ class LibvirtConnTestCase(test.TestCase):
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
- connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
@@ -945,7 +948,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, network_info, None, False)
tree = etree.fromstring(xml)
@@ -962,7 +965,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
self.assertEquals(conn.uri, 'lxc:///')
@@ -1005,7 +1008,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info)
@@ -1034,14 +1037,14 @@ class LibvirtConnTestCase(test.TestCase):
def connection_supports_direct_io_stub(*args, **kwargs):
return directio_supported
- self.stubs.Set(connection.LibvirtConnection,
+ self.stubs.Set(connection.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = connection.LibvirtConnection(True).to_xml(instance_ref,
+ xml = connection.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1052,8 +1055,8 @@ class LibvirtConnTestCase(test.TestCase):
directio_supported = False
# The O_DIRECT availability is cached on first use in
- # LibvirtConnection, hence we re-create it here
- xml = connection.LibvirtConnection(True).to_xml(instance_ref,
+ # LibvirtDriver, hence we re-create it here
+ xml = connection.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1066,7 +1069,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = connection.LibvirtConnection(True).to_xml(instance_ref,
+ xml = connection.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1079,7 +1082,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = connection.LibvirtConnection(True).to_xml(instance_ref,
+ xml = connection.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1172,7 +1175,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
self.assertEquals(conn.uri, expected_uri)
@@ -1201,7 +1204,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = connection.LibvirtConnection(True)
+ conn = connection.LibvirtDriver(True)
self.assertEquals(conn.uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
@@ -1234,7 +1237,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
try:
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -1294,7 +1297,7 @@ class LibvirtConnTestCase(test.TestCase):
#start test
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
@@ -1315,7 +1318,7 @@ class LibvirtConnTestCase(test.TestCase):
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
@@ -1346,7 +1349,7 @@ class LibvirtConnTestCase(test.TestCase):
# qemu-img should be mockd since test environment might not have
# large disk space.
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson % tmpdir)
@@ -1402,7 +1405,7 @@ class LibvirtConnTestCase(test.TestCase):
os.path.getsize('/test/disk.local').AndReturn((21474836480))
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
info = conn.get_instance_disk_info(instance_ref.name)
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
@@ -1434,7 +1437,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -1492,10 +1495,10 @@ class LibvirtConnTestCase(test.TestCase):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
- connection.LibvirtConnection._conn.lookupByName = fake_lookup
+ connection.LibvirtDriver._conn.lookupByName = fake_lookup
connection.libvirt_utils = fake_libvirt_utils
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
@@ -1536,16 +1539,16 @@ class LibvirtConnTestCase(test.TestCase):
return fp.read()
self.create_fake_libvirt_mock()
- connection.LibvirtConnection._conn.lookupByName = fake_lookup
- connection.LibvirtConnection._flush_libvirt_console = _fake_flush
+ connection.LibvirtDriver._conn.lookupByName = fake_lookup
+ connection.LibvirtDriver._flush_libvirt_console = _fake_flush
connection.libvirt_utils = fake_libvirt_utils
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
def test_get_host_ip_addr(self):
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
@@ -1555,7 +1558,7 @@ class LibvirtConnTestCase(test.TestCase):
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC)):
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getCapabilities")
@@ -1575,7 +1578,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.UnsetStubs()
def test_volume_in_mapping(self):
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
@@ -1612,9 +1615,9 @@ class LibvirtConnTestCase(test.TestCase):
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_immediate_delete(self):
- conn = connection.LibvirtConnection(False)
- self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = lambda x: None
+ conn = connection.LibvirtDriver(False)
+ self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
+ connection.LibvirtDriver._conn.lookupByName = lambda x: None
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
@@ -1633,7 +1636,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_by_name(instance_name):
return mock
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
@@ -1641,7 +1644,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_available_least_handles_missing(self):
"""Ensure destroy calls managedSaveRemove for saved instance"""
- conn = connection.LibvirtConnection(False)
+ conn = connection.LibvirtDriver(False)
def list_instances():
return ['fake']
@@ -1701,8 +1704,8 @@ class HostStateTestCase(test.TestCase):
return 13091
def test_update_status(self):
- self.mox.StubOutWithMock(connection, 'get_connection')
- connection.get_connection(True).AndReturn(self.FakeConnection())
+ self.mox.StubOutWithMock(connection, 'LibvirtDriver')
+ connection.LibvirtDriver(True).AndReturn(self.FakeConnection())
self.mox.ReplayAll()
hs = connection.HostState(True)
@@ -1759,11 +1762,11 @@ class IptablesFirewallTestCase(test.TestCase):
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- class FakeLibvirtConnection(object):
+ class FakeLibvirtDriver(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
- self.fake_libvirt_connection = FakeLibvirtConnection()
+ self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
@@ -2307,7 +2310,7 @@ disk size: 4.4M''', ''))
def test_mkfs(self):
self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkfs', '-t', 'ext4', '/my/block/dev')
+ utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev')
utils.execute('mkswap', '/my/swap/block/dev')
self.mox.ReplayAll()
@@ -2452,11 +2455,11 @@ disk size: 4.4M''', ''))
self.assertEqual(out, 'c')
-class LibvirtConnectionTestCase(test.TestCase):
- """Test for nova.virt.libvirt.connection.LibvirtConnection."""
+class LibvirtDriverTestCase(test.TestCase):
+ """Test for nova.virt.libvirt.connection.LibvirtDriver."""
def setUp(self):
- super(LibvirtConnectionTestCase, self).setUp()
- self.libvirtconnection = connection.LibvirtConnection(read_only=True)
+ super(LibvirtDriverTestCase, self).setUp()
+ self.libvirtconnection = connection.LibvirtDriver(read_only=True)
def _create_instance(self, params=None):
"""Create a test instance"""
@@ -2575,7 +2578,7 @@ class LibvirtConnectionTestCase(test.TestCase):
elif instance['name'] == "running":
return {'state': power_state.RUNNING}
else:
- return {'state': power_state.SHUTOFF}
+ return {'state': power_state.SHUTDOWN}
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
@@ -2693,5 +2696,5 @@ class LibvirtNonblockingTestCase(test.TestCase):
def test_connection_to_primitive(self):
"""Test bug 962840"""
import nova.virt.libvirt.connection
- connection = nova.virt.libvirt.connection.get_connection('')
+ connection = nova.virt.libvirt.connection.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
index daf96df47..b144dd9d4 100644
--- a/nova/tests/test_notifications.py
+++ b/nova/tests/test_notifications.py
@@ -50,7 +50,7 @@ class NotificationsTestCase(test.TestCase):
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
fake_get_nw_info)
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager',
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index f4219d74d..8ee9d0f21 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -21,11 +21,10 @@ import os.path
import StringIO
import urllib2
-from nova.common import policy as common_policy
-import nova.common.policy
from nova import context
from nova import exception
from nova import flags
+from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova import utils
@@ -169,8 +168,8 @@ class DefaultPolicyTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake')
def _set_brain(self, default_rule):
- brain = nova.common.policy.HttpBrain(self.rules, default_rule)
- nova.common.policy.set_brain(brain)
+ brain = common_policy.HttpBrain(self.rules, default_rule)
+ common_policy.set_brain(brain)
def tearDown(self):
super(DefaultPolicyTestCase, self).tearDown()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index f17243cdc..3aee30911 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -46,7 +46,7 @@ class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 95990cee5..87684ec59 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -23,6 +23,7 @@ from nova import exception
from nova import flags
from nova import image
from nova import log as logging
+from nova.openstack.common import importutils
from nova import test
from nova.tests import utils as test_utils
@@ -55,7 +56,7 @@ def catch_notimplementederror(f):
class _VirtDriverTestCase(test.TestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
- self.connection = self.driver_module.get_connection('')
+ self.connection = importutils.import_object(self.driver_module, '')
self.ctxt = test_utils.get_test_admin_context()
self.image_service = image.get_default_image_service()
@@ -395,21 +396,23 @@ class _VirtDriverTestCase(test.TestCase):
class AbstractDriverTestCase(_VirtDriverTestCase):
def setUp(self):
- import nova.virt.driver
+ from nova.virt.driver import ComputeDriver
- self.driver_module = nova.virt.driver
+ self.driver_module = "nova.virt.driver.ComputeDriver"
- def get_driver_connection(_):
- return nova.virt.driver.ComputeDriver()
+ # TODO(sdague): the abstract driver doesn't have a constructor,
+ # add one now that the loader loads classes directly
+ def __new_init__(self, read_only=False):
+ super(ComputeDriver, self).__init__()
+
+ ComputeDriver.__init__ = __new_init__
- self.driver_module.get_connection = get_driver_connection
super(AbstractDriverTestCase, self).setUp()
class FakeConnectionTestCase(_VirtDriverTestCase):
def setUp(self):
- import nova.virt.fake
- self.driver_module = nova.virt.fake
+ self.driver_module = 'nova.virt.fake.FakeDriver'
super(FakeConnectionTestCase, self).setUp()
@@ -439,7 +442,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase):
FLAGS.instances_path = ''
# Point _VirtDriverTestCase at the right module
- self.driver_module = nova.virt.libvirt.connection
+ self.driver_module = 'nova.virt.libvirt.connection.LibvirtDriver'
super(LibvirtConnTestCase, self).setUp()
self.flags(firewall_driver=nova.virt.libvirt.firewall.drivers[0],
rescue_image_id="2",
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index ca852c86a..b47b34335 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -51,7 +51,7 @@ class VMWareAPIVMTestCase(test.TestCase):
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
- self.conn = vmwareapi_conn.get_connection(False)
+ self.conn = vmwareapi_conn.VMWareESXDriver(False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index 20ac495c9..c8135cc29 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -48,7 +48,7 @@ class VolumeTestCase(test.TestCase):
def setUp(self):
super(VolumeTestCase, self).setUp()
self.compute = importutils.import_object(FLAGS.compute_manager)
- self.flags(connection_type='fake')
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
'nova.notifier.test_notifier')
self.volume = importutils.import_object(FLAGS.volume_manager)
diff --git a/nova/tests/test_volume_utils.py b/nova/tests/test_volume_utils.py
index da84dd5d2..222f12266 100644
--- a/nova/tests/test_volume_utils.py
+++ b/nova/tests/test_volume_utils.py
@@ -36,7 +36,7 @@ class UsageInfoTestCase(test.TestCase):
def setUp(self):
super(UsageInfoTestCase, self).setUp()
- self.flags(connection_type='fake',
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True,
host='fake')
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 7283fe585..62bed35a0 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -153,8 +153,7 @@ class XenAPIVolumeTestCase(test.TestCase):
}
for (input, expected) in cases.iteritems():
- func = volume_utils.VolumeHelper.mountpoint_to_number
- actual = func(input)
+ actual = volume_utils.mountpoint_to_number(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
@@ -165,7 +164,7 @@ class XenAPIVolumeTestCase(test.TestCase):
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
- volume_utils.VolumeHelper.parse_volume_info,
+ volume_utils.parse_volume_info,
self._make_info(),
'dev/sd'
)
@@ -174,7 +173,7 @@ class XenAPIVolumeTestCase(test.TestCase):
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
@@ -192,7 +191,7 @@ class XenAPIVolumeTestCase(test.TestCase):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
@@ -228,7 +227,7 @@ class XenAPIVMTestCase(test.TestCase):
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
@@ -699,19 +698,19 @@ class XenAPIVMTestCase(test.TestCase):
def test_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
- vm_ref = vm_utils.VMHelper.lookup(session, instance.name)
+ vm_ref = vm_utils.lookup(session, instance.name)
xenapi_fake.create_vbd(vm_ref, "swap", userdevice=1)
xenapi_fake.create_vbd(vm_ref, "rootfs", userdevice=0)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta)
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
- rescue_ref = vm_utils.VMHelper.lookup(session, rescue_name)
+ rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_uuids = []
@@ -721,7 +720,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_unrescue(self):
instance = self._create_instance()
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
@@ -729,7 +728,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
@@ -745,7 +744,7 @@ class XenAPIVMTestCase(test.TestCase):
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
@@ -869,17 +868,18 @@ class XenAPIMigrateInstance(test.TestCase):
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0))
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
- conn._vmops._resize_instance(instance, vdi_uuid)
+ conn._vmops._resize_instance(instance,
+ {'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
@@ -892,7 +892,7 @@ class XenAPIMigrateInstance(test.TestCase):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
@@ -919,7 +919,7 @@ class XenAPIMigrateInstance(test.TestCase):
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -951,7 +951,7 @@ class XenAPIMigrateInstance(test.TestCase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -973,7 +973,7 @@ class XenAPIMigrateInstance(test.TestCase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
@@ -989,7 +989,7 @@ class XenAPIMigrateInstance(test.TestCase):
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.get_connection(False)
+ conn = xenapi_conn.XenAPIDriver(False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
@@ -1030,7 +1030,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, image_meta, expected_disk_type):
- actual = vm_utils.VMHelper.determine_disk_image_type(image_meta)
+ actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
@@ -1079,7 +1079,7 @@ class XenAPIHostTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
xenapi_fake.create_local_srs()
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
def test_host_state(self):
stats = self.conn.get_host_stats()
@@ -1131,7 +1131,7 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1149,14 +1149,11 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
- @classmethod
- def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True):
pass
- self.stubs.Set(vm_utils.VMHelper,
- "create_vbd",
- fake_create_vbd)
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
@@ -1166,14 +1163,18 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
- instance = db.instance_create(self.context, self.instance_values)
+ ctx = context.RequestContext(self.user_id, self.project_id)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+
disk_image_type = vm_utils.ImageType.DISK_VHD
- vm_ref = "blah"
- first_vdi_ref = "blah"
- vdis = ["blah"]
+ instance = db.instance_create(self.context, self.instance_values)
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
- self.conn._vmops._attach_disks(
- instance, disk_image_type, vm_ref, first_vdi_ref, vdis)
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.conn._vmops._attach_disks(instance, disk_image_type, vm_ref, vdis)
self.assertEqual(marker["partition_called"], called)
@@ -1224,7 +1225,7 @@ class XenAPIGenerateLocal(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.reset()
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1242,24 +1243,25 @@ class XenAPIGenerateLocal(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
- @classmethod
- def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True):
pass
- self.stubs.Set(vm_utils.VMHelper,
- "create_vbd",
- fake_create_vbd)
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance):
+ ctx = context.RequestContext(self.user_id, self.project_id)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+
disk_image_type = vm_utils.ImageType.DISK_VHD
- vm_ref = "blah"
- first_vdi_ref = "blah"
- vdis = ["blah"]
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
- self.conn._vmops._attach_disks(instance, disk_image_type,
- vm_ref, first_vdi_ref, vdis)
+ self.conn._vmops._attach_disks(instance, disk_image_type, vm_ref, vdis)
self.assertTrue(self.called)
def test_generate_swap(self):
@@ -1268,11 +1270,9 @@ class XenAPIGenerateLocal(test.TestCase):
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 5})
- @classmethod
- def fake_generate_swap(cls, *args, **kwargs):
+ def fake_generate_swap(*args, **kwargs):
self.called = True
- self.stubs.Set(vm_utils.VMHelper, 'generate_swap',
- fake_generate_swap)
+ self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
@@ -1282,11 +1282,9 @@ class XenAPIGenerateLocal(test.TestCase):
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 4})
- @classmethod
- def fake_generate_ephemeral(cls, *args):
+ def fake_generate_ephemeral(*args):
self.called = True
- self.stubs.Set(vm_utils.VMHelper, 'generate_ephemeral',
- fake_generate_ephemeral)
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
@@ -1294,7 +1292,7 @@ class XenAPIGenerateLocal(test.TestCase):
class XenAPIBWUsageTestCase(test.TestCase):
def setUp(self):
super(XenAPIBWUsageTestCase, self).setUp()
- self.stubs.Set(vm_utils.VMHelper, "compile_metrics",
+ self.stubs.Set(vm_utils, 'compile_metrics',
XenAPIBWUsageTestCase._fake_compile_metrics)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
@@ -1303,7 +1301,7 @@ class XenAPIBWUsageTestCase(test.TestCase):
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
@classmethod
def _fake_compile_metrics(cls, start_time, stop_time=None):
@@ -1380,7 +1378,7 @@ class XenAPIDom0IptablesFirewallTestCase(test.TestCase):
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(FLAGS.network_manager)
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
@@ -1638,7 +1636,7 @@ class XenAPISRSelectionTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
self.assertRaises(exception.StorageRepositoryNotFound,
- vm_utils.VMHelper.safe_find_sr, session)
+ vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
"""Ensure the default local-storage is found."""
@@ -1653,7 +1651,7 @@ class XenAPISRSelectionTestCase(test.TestCase):
'Local storage',
'i18n-key': 'local-storage'},
host_ref=host_ref)
- expected = vm_utils.VMHelper.safe_find_sr(session)
+ expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
@@ -1666,7 +1664,7 @@ class XenAPISRSelectionTestCase(test.TestCase):
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
- expected = vm_utils.VMHelper.safe_find_sr(session)
+ expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
@@ -1675,7 +1673,7 @@ class XenAPISRSelectionTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
pool_ref = xenapi_fake.create_pool('')
- expected = vm_utils.VMHelper.safe_find_sr(session)
+ expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
@@ -1695,7 +1693,7 @@ class XenAPIAggregateTestCase(test.TestCase):
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
- self.conn = xenapi_conn.get_connection(False)
+ self.conn = xenapi_conn.XenAPIDriver(False)
self.fake_metadata = {'master_compute': 'host',
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 7486c4d74..0a87110e6 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -37,11 +37,12 @@ def stubout_firewall_driver(stubs, conn):
def stubout_instance_snapshot(stubs):
- @classmethod
- def fake_fetch_image(cls, context, session, instance, image, type):
- return [dict(vdi_type='root', vdi_uuid=_make_fake_vdi())]
+ def fake_fetch_image(context, session, instance, image, type):
+ return {'root': dict(uuid=_make_fake_vdi(), file=None),
+ 'kernel': dict(uuid=_make_fake_vdi(), file=None),
+ 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
- stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
+ stubs.Set(vm_utils, 'fetch_image', fake_fetch_image)
def fake_wait_for_vhd_coalesce(*args):
#TODO(sirp): Should we actually fake out the data here
@@ -95,20 +96,18 @@ def stubout_is_vdi_pv(stubs):
def stubout_determine_is_pv_objectstore(stubs):
"""Assumes VMs stu have PV kernels"""
- @classmethod
- def f(cls, *args):
+ def f(*args):
return False
- stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
+ stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
def stubout_is_snapshot(stubs):
""" Always returns true
xenapi fake driver does not create vmrefs for snapshots """
- @classmethod
- def f(cls, *args):
+ def f(*args):
return True
- stubs.Set(vm_utils.VMHelper, 'is_snapshot', f)
+ stubs.Set(vm_utils, 'is_snapshot', f)
def stubout_lookup_image(stubs):
@@ -121,8 +120,7 @@ def stubout_lookup_image(stubs):
def stubout_fetch_image_glance_disk(stubs, raise_failure=False):
"""Simulates a failure in fetch image_glance_disk."""
- @classmethod
- def _fake_fetch_image_glance_disk(cls, context, session, instance, image,
+ def _fake_fetch_image_glance_disk(context, session, instance, image,
image_type):
if raise_failure:
raise fake.Failure("Test Exception raised by "
@@ -134,22 +132,20 @@ def stubout_fetch_image_glance_disk(stubs, raise_failure=False):
else:
filename = "unknown"
- return [dict(vdi_type=vm_utils.ImageType.to_string(image_type),
- vdi_uuid=None,
- file=filename)]
+ vdi_type = vm_utils.ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
- stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk',
+ stubs.Set(vm_utils, '_fetch_image_glance_disk',
_fake_fetch_image_glance_disk)
def stubout_create_vm(stubs):
"""Simulates a failure in create_vm."""
- @classmethod
- def f(cls, *args):
+ def f(*args):
raise fake.Failure("Test Exception raised by " +
"fake create_vm")
- stubs.Set(vm_utils.VMHelper, 'create_vm', f)
+ stubs.Set(vm_utils, 'create_vm', f)
def _make_fake_vdi():
@@ -285,8 +281,7 @@ def stub_out_vm_methods(stubs):
def fake_release_bootlock(self, vm):
pass
- @classmethod
- def fake_generate_ephemeral(cls, *args):
+ def fake_generate_ephemeral(*args):
pass
def fake_wait_for_device(dev):
@@ -294,7 +289,7 @@ def stub_out_vm_methods(stubs):
stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
- stubs.Set(vm_utils.VMHelper, 'generate_ephemeral', fake_generate_ephemeral)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
@@ -338,35 +333,33 @@ def stub_out_migration_methods(stubs):
return 'vm_ref', dict(image='foo', snap='bar')
def fake_move_disks(self, instance, disk_info):
- vdi_ref = fake.create_vdi('new', 'fake')
- return fake.get_record('VDI', vdi_ref)['uuid']
+ vdi_ref = fake.create_vdi(instance['name'], 'fake')
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ vdi_rec['other_config']['nova_disk_type'] = 'root'
+ return {'uuid': vdi_rec['uuid'], 'ref': vdi_ref}
- @classmethod
- def fake_get_vdi(cls, session, vm_ref):
+ def fake_get_vdi(session, vm_ref):
vdi_ref = fake.create_vdi('derp', 'herp')
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
return vdi_ref, {'uuid': vdi_rec['uuid'], }
- @classmethod
- def fake_sr(cls, session, *args):
+ def fake_sr(session, *args):
pass
- @classmethod
- def fake_get_sr_path(cls, *args):
+ def fake_get_sr_path(*args):
return "fake"
def fake_destroy(*args, **kwargs):
pass
- @classmethod
- def fake_generate_ephemeral(cls, *args):
+ def fake_generate_ephemeral(*args):
pass
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
stubs.Set(vmops.VMOps, '_move_disks', fake_move_disks)
- stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
- stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
+ stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils, 'scan_sr', fake_sr)
stubs.Set(vmops.VMOps, '_create_snapshot', fake_create_snapshot)
- stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
- stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
- stubs.Set(vm_utils.VMHelper, 'generate_ephemeral', fake_generate_ephemeral)
+ stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
diff --git a/nova/virt/baremetal/proxy.py b/nova/virt/baremetal/proxy.py
index 1ee927c03..1c5729422 100644
--- a/nova/virt/baremetal/proxy.py
+++ b/nova/virt/baremetal/proxy.py
@@ -66,15 +66,6 @@ baremetal_opts = [
FLAGS.register_opts(baremetal_opts)
-def get_connection(read_only):
- # These are loaded late so that there's no need to install these
- # libraries when not using baremetal.
- # Cheetah is separate because the unit tests want to load Cheetah,
- # but not baremetal.
- _late_load_cheetah()
- return ProxyConnection(read_only)
-
-
def _late_load_cheetah():
global Template
if Template is None:
@@ -86,6 +77,7 @@ def _late_load_cheetah():
class ProxyConnection(driver.ComputeDriver):
def __init__(self, read_only):
+ _late_load_cheetah()
# Note that baremetal doesn't have a read-only connection
# mode, so the read_only parameter is ignored
super(ProxyConnection, self).__init__()
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 84f84744b..8bf1ed5f5 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -23,23 +23,20 @@ import sys
from nova import flags
from nova import log as logging
+from nova.openstack.common import importutils
from nova import utils
from nova.virt import driver
-from nova.virt import fake
-from nova.virt.libvirt import connection as libvirt_conn
-from nova.virt import vmwareapi_conn
-from nova.virt.xenapi import connection as xenapi_conn
-
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
-"""
-In case of baremetal (FLAGS.connection_type),
-specific driver is set by FLAGS.baremetal_driver
-"""
-if FLAGS.connection_type == 'baremetal':
- from nova.virt.baremetal import proxy
+known_drivers = {
+ 'baremetal': 'nova.virt.baremetal.proxy.ProxyConnection',
+ 'fake': 'nova.virt.fake.FakeDriver',
+ 'libvirt': 'nova.virt.libvirt.connection.LibvirtDriver',
+ 'vmwareapi': 'nova.virt.vmwareapi_conn.VMWareESXDriver',
+ 'xenapi': 'nova.virt.xenapi.connection.XenAPIDriver'
+ }
def get_connection(read_only=False):
@@ -68,21 +65,17 @@ def get_connection(read_only=False):
* baremetal
"""
- # TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
- t = FLAGS.connection_type
- if t == 'fake':
- conn = fake.get_connection(read_only)
- elif t == 'libvirt':
- conn = libvirt_conn.get_connection(read_only)
- elif t == 'xenapi':
- conn = xenapi_conn.get_connection(read_only)
- elif t == 'vmwareapi':
- conn = vmwareapi_conn.get_connection(read_only)
- elif t == 'baremetal':
- conn = proxy.get_connection(read_only)
- else:
- raise Exception('Unknown connection type "%s"' % t)
+ # TODO(sdague): is there a better way to mark things deprecated
+ LOG.error(_('Specifying virt driver via connection_type is deprecated'))
+
+ driver_name = known_drivers.get(FLAGS.connection_type)
+
+ if driver_name is None:
+ raise Exception('Unknown virt connection type "%s"' %
+ FLAGS.connection_type)
+
+ conn = importutils.import_object(driver_name, read_only=read_only)
if conn is None:
LOG.error(_('Failed to open connection to underlying virt platform'))
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 57c358521..0ec705a53 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -36,11 +36,6 @@ from nova.virt import driver
LOG = logging.getLogger(__name__)
-def get_connection(_read_only):
- # The read_only parameter is ignored.
- return FakeConnection.instance()
-
-
class FakeInstance(object):
def __init__(self, name, state):
@@ -48,10 +43,10 @@ class FakeInstance(object):
self.state = state
-class FakeConnection(driver.ComputeDriver):
+class FakeDriver(driver.ComputeDriver):
"""Fake hypervisor driver"""
- def __init__(self):
+ def __init__(self, readonly=False):
self.instances = {}
self.host_status = {
'host_name-description': 'Fake Host',
@@ -70,12 +65,6 @@ class FakeConnection(driver.ComputeDriver):
'host_name_label': 'fake-mini'}
self._mounts = {}
- @classmethod
- def instance(cls):
- if not hasattr(cls, '_instance'):
- cls._instance = cls()
- return cls._instance
-
def init_host(self, host):
return
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index bca50d21f..be6a0f7c9 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -331,8 +331,8 @@ class IptablesFirewallDriver(FirewallDriver):
else:
fw_rules = ipv6_rules
- protocol = rule.protocol
- if version == 6 and rule.protocol == 'icmp':
+ protocol = rule.protocol.lower()
+ if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-j ACCEPT']
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 237d91816..0cfcf2993 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -74,11 +74,9 @@ from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
-
libvirt = None
Template = None
-
LOG = logging.getLogger(__name__)
libvirt_opts = [
@@ -116,17 +114,19 @@ libvirt_opts = [
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
- help='Define protocol used by live_migration feature'),
+ help='Migration target URI '
+ '(any included "%s" is replaced with '
+ 'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
- help='Define live migration behavior.'),
+ help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
- help='Define block migration behavior.'),
+ help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
- help='Define live migration behavior'),
+ help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
@@ -190,17 +190,33 @@ def patch_tpool_proxy():
patch_tpool_proxy()
-
-def get_connection(read_only):
- # These are loaded late so that there's no need to install these
- # libraries when not using libvirt.
- # Cheetah is separate because the unit tests want to load Cheetah,
- # but not libvirt.
- global libvirt
- if libvirt is None:
- libvirt = __import__('libvirt')
- _late_load_cheetah()
- return LibvirtConnection(read_only)
+VIR_DOMAIN_NOSTATE = 0
+VIR_DOMAIN_RUNNING = 1
+VIR_DOMAIN_BLOCKED = 2
+VIR_DOMAIN_PAUSED = 3
+VIR_DOMAIN_SHUTDOWN = 4
+VIR_DOMAIN_SHUTOFF = 5
+VIR_DOMAIN_CRASHED = 6
+VIR_DOMAIN_PMSUSPENDED = 7
+
+LIBVIRT_POWER_STATE = {
+ VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
+ VIR_DOMAIN_RUNNING: power_state.RUNNING,
+ # NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
+ # It means that the VM is running and the vCPU is idle. So,
+ # we map it to RUNNING
+ VIR_DOMAIN_BLOCKED: power_state.RUNNING,
+ VIR_DOMAIN_PAUSED: power_state.PAUSED,
+ # NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
+ # means the domain is being shut down. So technically the domain
+ # is still running. SHUTOFF is the real powered off state.
+ # But we will map both to SHUTDOWN anyway.
+ # http://libvirt.org/html/libvirt-libvirt.html
+ VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
+ VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
+ VIR_DOMAIN_CRASHED: power_state.CRASHED,
+ VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
+}
def _late_load_cheetah():
@@ -215,10 +231,16 @@ def _get_eph_disk(ephemeral):
return 'disk.eph' + str(ephemeral['num'])
-class LibvirtConnection(driver.ComputeDriver):
+class LibvirtDriver(driver.ComputeDriver):
def __init__(self, read_only):
- super(LibvirtConnection, self).__init__()
+ super(LibvirtDriver, self).__init__()
+
+ global libvirt
+ if libvirt is None:
+ libvirt = __import__('libvirt')
+
+ _late_load_cheetah()
self._host_state = None
self._initiator = None
@@ -354,6 +376,8 @@ class LibvirtConnection(driver.ComputeDriver):
# puTime: the time used by the domain in nanoseconds
(state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
+ state = LIBVIRT_POWER_STATE[state]
+
name = domain.name()
return driver.InstanceInfo(name, state)
@@ -396,7 +420,8 @@ class LibvirtConnection(driver.ComputeDriver):
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
- if state == power_state.SHUTOFF:
+ state = LIBVIRT_POWER_STATE[state]
+ if state == power_state.SHUTDOWN:
is_okay = True
if not is_okay:
@@ -675,6 +700,8 @@ class LibvirtConnection(driver.ComputeDriver):
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
+ state = LIBVIRT_POWER_STATE[state]
+
if state == power_state.RUNNING:
virt_dom.managedSave(0)
# Make the snapshot
@@ -726,6 +753,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
dom = self._lookup_by_name(instance.name)
(state, _max_mem, _mem, _cpus, _t) = dom.info()
+ state = LIBVIRT_POWER_STATE[state]
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
@@ -735,8 +763,9 @@ class LibvirtConnection(driver.ComputeDriver):
# call takes to return.
for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
(state, _max_mem, _mem, _cpus, _t) = dom.info()
+ state = LIBVIRT_POWER_STATE[state]
+
if state in [power_state.SHUTDOWN,
- power_state.SHUTOFF,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
@@ -1700,7 +1729,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
- return {'state': state,
+ return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
@@ -2628,7 +2657,7 @@ class HostState(object):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
if self.connection is None:
- self.connection = get_connection(self.read_only)
+ self.connection = LibvirtDriver(self.read_only)
data = {}
data["vcpus"] = self.connection.get_vcpu_total()
data["vcpus_used"] = self.connection.get_vcpu_used()
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 9f63c13c1..40b8d98d5 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -150,6 +150,9 @@ def mkfs(fs, path, label=None):
execute('mkswap', path)
else:
args = ['mkfs', '-t', fs]
+ #add -F to force no interactive excute on non-block device.
+ if fs in ['ext3', 'ext4']:
+ args.extend(['-F'])
if label:
args.extend(['-n', label])
args.append(path)
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index ef5656ff3..0c66791a6 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -93,27 +93,22 @@ class Failure(Exception):
return str(self.details)
-def get_connection(_read_only):
- """Sets up the ESX host connection."""
- host_ip = FLAGS.vmwareapi_host_ip
- host_username = FLAGS.vmwareapi_host_username
- host_password = FLAGS.vmwareapi_host_password
- api_retry_count = FLAGS.vmwareapi_api_retry_count
- if not host_ip or host_username is None or host_password is None:
- raise Exception(_("Must specify vmwareapi_host_ip,"
- "vmwareapi_host_username "
- "and vmwareapi_host_password to use"
- "connection_type=vmwareapi"))
- return VMWareESXConnection(host_ip, host_username, host_password,
- api_retry_count)
-
-
-class VMWareESXConnection(driver.ComputeDriver):
+class VMWareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
- def __init__(self, host_ip, host_username, host_password,
- api_retry_count, scheme="https"):
- super(VMWareESXConnection, self).__init__()
+ def __init__(self, read_only=False, scheme="https"):
+ super(VMWareESXDriver, self).__init__()
+
+ host_ip = FLAGS.vmwareapi_host_ip
+ host_username = FLAGS.vmwareapi_host_username
+ host_password = FLAGS.vmwareapi_host_password
+ api_retry_count = FLAGS.vmwareapi_api_retry_count
+ if not host_ip or host_username is None or host_password is None:
+ raise Exception(_("Must specify vmwareapi_host_ip,"
+ "vmwareapi_host_username "
+ "and vmwareapi_host_password to use"
+ "connection_type=vmwareapi"))
+
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = vmops.VMWareVMOps(session)
diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py
index 7eae5f767..d9abe54c5 100644
--- a/nova/virt/xenapi/__init__.py
+++ b/nova/virt/xenapi/__init__.py
@@ -18,33 +18,3 @@
:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
==================================================================
"""
-
-
-class HelperBase(object):
- """
- The base for helper classes.
- """
- @classmethod
- def get_rec(cls, session, record_type, ref):
- try:
- return session.call_xenapi('%s.get_record' % record_type, ref)
- except session.XenAPI.Failure, e:
- if e.details[0] != 'HANDLE_INVALID':
- raise
-
- return None
-
- @classmethod
- def get_all_refs_and_recs(cls, session, record_type):
- """Retrieve all refs and recs for a Xen record type.
-
- Handles race-conditions where the record may be deleted between
- the `get_all` call and the `get_record` call.
- """
-
- for ref in session.call_xenapi('%s.get_all' % record_type):
- rec = cls.get_rec(session, record_type, ref)
- # Check to make sure the record still exists. It may have
- # been deleted between the get_all call and get_record call
- if rec:
- yield ref, rec
diff --git a/nova/virt/xenapi/connection.py b/nova/virt/xenapi/connection.py
index bd26e86a6..20f9745d2 100644
--- a/nova/virt/xenapi/connection.py
+++ b/nova/virt/xenapi/connection.py
@@ -127,26 +127,22 @@ FLAGS = flags.FLAGS
FLAGS.register_opts(xenapi_opts)
-def get_connection(_read_only):
- """Note that XenAPI doesn't have a read-only connection mode, so
- the read_only parameter is ignored."""
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- if not url or password is None:
- raise Exception(_('Must specify xenapi_connection_url, '
- 'xenapi_connection_username (optionally), and '
- 'xenapi_connection_password to use '
- 'connection_type=xenapi'))
- return XenAPIConnection(url, username, password)
-
-
-class XenAPIConnection(driver.ComputeDriver):
+class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform"""
- def __init__(self, url, user, pw):
- super(XenAPIConnection, self).__init__()
- self._session = XenAPISession(url, user, pw)
+ def __init__(self, read_only=False):
+ super(XenAPIDriver, self).__init__()
+
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ if not url or password is None:
+ raise Exception(_('Must specify xenapi_connection_url, '
+ 'xenapi_connection_username (optionally), and '
+ 'xenapi_connection_password to use '
+ 'connection_type=xenapi'))
+
+ self._session = XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session)
@@ -632,3 +628,26 @@ class XenAPISession(object):
except xmlrpclib.ProtocolError, exc:
LOG.debug(_("Got exception: %s"), exc)
raise
+
+ def get_rec(self, record_type, ref):
+ try:
+ return self.call_xenapi('%s.get_record' % record_type, ref)
+ except self.XenAPI.Failure, e:
+ if e.details[0] != 'HANDLE_INVALID':
+ raise
+
+ return None
+
+ def get_all_refs_and_recs(self, record_type):
+ """Retrieve all refs and recs for a Xen record type.
+
+ Handles race-conditions where the record may be deleted between
+ the `get_all` call and the `get_record` call.
+ """
+
+ for ref in self.call_xenapi('%s.get_all' % record_type):
+ rec = self.get_rec(record_type, ref)
+ # Check to make sure the record still exists. It may have
+ # been deleted between the get_all call and get_record call
+ if rec:
+ yield ref, rec
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 532c9f70d..3ccd6247d 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -475,19 +475,14 @@ class SessionBase(object):
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
- vdi_ref = create_vdi(name_label, sr_ref, sharable=sharable,
- read_only=read_only)
- return vdi_ref
+ other_config = db_ref['other_config'].copy()
+ return create_vdi(name_label, sr_ref, sharable=sharable,
+ read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
- name_label = db_ref['name_label']
- read_only = db_ref['read_only']
sr_ref = db_ref['SR']
- sharable = db_ref['sharable']
- vdi_ref = create_vdi(name_label, sr_ref, sharable=sharable,
- read_only=read_only)
- return vdi_ref
+ return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index a6fb4a92e..044173a03 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -57,7 +57,7 @@ class Host(object):
if host_ref != self._session.get_xenapi_host()]
migrations_counter = vm_counter = 0
ctxt = context.get_admin_context()
- for vm_ref, vm_rec in vm_utils.VMHelper.list_vms(self._session):
+ for vm_ref, vm_rec in vm_utils.list_vms(self._session):
for host_ref in host_list:
try:
# Ensure only guest instances are migrated
@@ -144,7 +144,7 @@ class HostState(object):
if data:
try:
# Get the SR usage
- sr_ref = vm_utils.VMHelper.safe_find_sr(self._session)
+ sr_ref = vm_utils.safe_find_sr(self._session)
except exception.NotFound as e:
# No SR configured
LOG.error(_("Unable to get SR for this host: %s") % e)
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index 50b36933c..dceaed949 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -21,38 +21,29 @@ their lookup functions.
"""
-from nova.virt import xenapi
+def find_network_with_name_label(session, name_label):
+ networks = session.call_xenapi('network.get_by_name_label', name_label)
+ if len(networks) == 1:
+ return networks[0]
+ elif len(networks) > 1:
+ raise Exception(_('Found non-unique network for name_label %s') %
+ name_label)
+ else:
+ return None
-class NetworkHelper(xenapi.HelperBase):
+def find_network_with_bridge(session, bridge):
"""
- The class that wraps the helper methods together.
+ Return the network on which the bridge is attached, if found.
+ The bridge is defined in the nova db and can be found either in the
+ 'bridge' or 'name_label' fields of the XenAPI network record.
"""
- @classmethod
- def find_network_with_name_label(cls, session, name_label):
- networks = session.call_xenapi('network.get_by_name_label', name_label)
- if len(networks) == 1:
- return networks[0]
- elif len(networks) > 1:
- raise Exception(_('Found non-unique network'
- ' for name_label %s') % name_label)
- else:
- return None
-
- @classmethod
- def find_network_with_bridge(cls, session, bridge):
- """
- Return the network on which the bridge is attached, if found.
- The bridge is defined in the nova db and can be found either in the
- 'bridge' or 'name_label' fields of the XenAPI network record.
- """
- expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
- (bridge, bridge))
- networks = session.call_xenapi('network.get_all_records_where', expr)
- if len(networks) == 1:
- return networks.keys()[0]
- elif len(networks) > 1:
- raise Exception(_('Found non-unique network'
- ' for bridge %s') % bridge)
- else:
- raise Exception(_('Found no network for bridge %s') % bridge)
+ expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
+ (bridge, bridge))
+ networks = session.call_xenapi('network.get_all_records_where', expr)
+ if len(networks) == 1:
+ return networks.keys()[0]
+ elif len(networks) > 1:
+ raise Exception(_('Found non-unique network for bridge %s') % bridge)
+ else:
+ raise Exception(_('Found no network for bridge %s') % bridge)
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 05bbc33c1..39778521b 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -46,15 +46,15 @@ class XenAPIBridgeDriver(XenVIFDriver):
def plug(self, instance, vif, vm_ref=None, device=None):
if not vm_ref:
- vm_ref = vm_utils.VMHelper.lookup(self._session, instance.name)
+ vm_ref = vm_utils.lookup(self._session, instance.name)
if not device:
device = 0
if vif['network'].get_meta('should_create_vlan'):
network_ref = self._ensure_vlan_bridge(vif['network'])
else:
- network_ref = network_utils.NetworkHelper.find_network_with_bridge(
- self._session, vif['network']['bridge'])
+ network_ref = network_utils.find_network_with_bridge(
+ self._session, vif['network']['bridge'])
vif_rec = {}
vif_rec['device'] = str(device)
vif_rec['network'] = network_ref
@@ -80,7 +80,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
network.get_meta('bridge_interface'))
# Check whether bridge already exists
# Retrieve network whose name_label is "bridge"
- network_ref = network_utils.NetworkHelper.find_network_with_name_label(
+ network_ref = network_utils.find_network_with_name_label(
self._session, bridge)
if network_ref is None:
# If bridge does not exists
@@ -136,15 +136,15 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
def plug(self, instance, vif, vm_ref=None, device=None):
if not vm_ref:
- vm_ref = vm_utils.VMHelper.lookup(self._session, instance.name)
+ vm_ref = vm_utils.lookup(self._session, instance.name)
if not device:
device = 0
# with OVS model, always plug into an OVS integration bridge
# that is already created
- network_ref = network_utils.NetworkHelper.find_network_with_bridge(
- self._session, FLAGS.xenapi_ovs_integration_bridge)
+ network_ref = network_utils.find_network_with_bridge(
+ self._session, FLAGS.xenapi_ovs_integration_bridge)
vif_rec = {}
vif_rec['device'] = str(device)
vif_rec['network'] = network_ref
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 508fd34e9..570fccb1c 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -136,1079 +136,1058 @@ class ImageType:
return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str)
-class VMHelper(xenapi.HelperBase):
- """
- The class that wraps the helper methods together.
- """
+def create_vm(session, instance, kernel, ramdisk, use_pv_kernel=False):
+ """Create a VM record. Returns new VM reference.
+ the use_pv_kernel flag indicates whether the guest is HVM or PV
- @classmethod
- def create_vm(cls, session, instance, kernel, ramdisk,
- use_pv_kernel=False):
- """Create a VM record. Returns new VM reference.
- the use_pv_kernel flag indicates whether the guest is HVM or PV
-
- There are 3 scenarios:
-
- 1. Using paravirtualization, kernel passed in
-
- 2. Using paravirtualization, kernel within the image
-
- 3. Using hardware virtualization
- """
- inst_type_id = instance.instance_type_id
- instance_type = instance_types.get_instance_type(inst_type_id)
- mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
- vcpus = str(instance_type['vcpus'])
-
- rec = {
- 'actions_after_crash': 'destroy',
- 'actions_after_reboot': 'restart',
- 'actions_after_shutdown': 'destroy',
- 'affinity': '',
- 'blocked_operations': {},
- 'ha_always_run': False,
- 'ha_restart_priority': '',
- 'HVM_boot_params': {},
- 'HVM_boot_policy': '',
- 'is_a_template': False,
- 'memory_dynamic_min': mem,
- 'memory_dynamic_max': mem,
- 'memory_static_min': '0',
- 'memory_static_max': mem,
- 'memory_target': mem,
- 'name_description': '',
- 'name_label': instance.name,
- 'other_config': {'allowvssprovider': str(False),
- 'nova_uuid': str(instance.uuid), },
- 'PCI_bus': '',
- 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
- 'viridian': 'true', 'timeoffset': '0'},
- 'PV_args': '',
- 'PV_bootloader': '',
- 'PV_bootloader_args': '',
- 'PV_kernel': '',
- 'PV_legacy_args': '',
- 'PV_ramdisk': '',
- 'recommendations': '',
- 'tags': [],
- 'user_version': '0',
- 'VCPUs_at_startup': vcpus,
- 'VCPUs_max': vcpus,
- 'VCPUs_params': {},
- 'xenstore_data': {}}
-
- # Complete VM configuration record according to the image type
- # non-raw/raw with PV kernel/raw in HVM mode
- if use_pv_kernel:
- rec['platform']['nx'] = 'false'
- if instance.kernel_id:
- # 1. Kernel explicitly passed in, use that
- rec['PV_args'] = 'root=/dev/xvda1'
- rec['PV_kernel'] = kernel
- rec['PV_ramdisk'] = ramdisk
- else:
- # 2. Use kernel within the image
- rec['PV_bootloader'] = 'pygrub'
- else:
- # 3. Using hardware virtualization
- rec['platform']['nx'] = 'true'
- rec['HVM_boot_params'] = {'order': 'dc'}
- rec['HVM_boot_policy'] = 'BIOS order'
-
- vm_ref = session.call_xenapi('VM.create', rec)
- LOG.debug(_('Created VM'), instance=instance)
- return vm_ref
+ There are 3 scenarios:
- @classmethod
- def ensure_free_mem(cls, session, instance):
- inst_type_id = instance.instance_type_id
- instance_type = instance_types.get_instance_type(inst_type_id)
- mem = long(instance_type['memory_mb']) * 1024 * 1024
- #get free memory from host
- host = session.get_xenapi_host()
- host_free_mem = long(session.call_xenapi("host.compute_free_memory",
- host))
- return host_free_mem >= mem
+ 1. Using paravirtualization, kernel passed in
- @classmethod
- def find_vbd_by_number(cls, session, vm_ref, number):
- """Get the VBD reference from the device number"""
- vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
- if vbd_refs:
- for vbd_ref in vbd_refs:
- try:
- vbd_rec = session.call_xenapi("VBD.get_record", vbd_ref)
- if vbd_rec['userdevice'] == str(number):
- return vbd_ref
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(
- _('VBD not found in instance %s') % vm_ref)
+ 2. Using paravirtualization, kernel within the image
- @classmethod
- def unplug_vbd(cls, session, vbd_ref):
- """Unplug VBD from VM"""
- # Call VBD.unplug on the given VBD, with a retry if we get
- # DEVICE_DETACH_REJECTED. For reasons which we don't understand,
- # we're seeing the device still in use, even when all processes
- # using the device should be dead.
- max_attempts = FLAGS.xenapi_num_vbd_unplug_retries + 1
- for num_attempt in xrange(1, max_attempts + 1):
+ 3. Using hardware virtualization
+ """
+ inst_type_id = instance.instance_type_id
+ instance_type = instance_types.get_instance_type(inst_type_id)
+ mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
+ vcpus = str(instance_type['vcpus'])
+
+ rec = {
+ 'actions_after_crash': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_shutdown': 'destroy',
+ 'affinity': '',
+ 'blocked_operations': {},
+ 'ha_always_run': False,
+ 'ha_restart_priority': '',
+ 'HVM_boot_params': {},
+ 'HVM_boot_policy': '',
+ 'is_a_template': False,
+ 'memory_dynamic_min': mem,
+ 'memory_dynamic_max': mem,
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_target': mem,
+ 'name_description': '',
+ 'name_label': instance.name,
+ 'other_config': {'allowvssprovider': str(False),
+ 'nova_uuid': str(instance.uuid)},
+ 'PCI_bus': '',
+ 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
+ 'viridian': 'true', 'timeoffset': '0'},
+ 'PV_args': '',
+ 'PV_bootloader': '',
+ 'PV_bootloader_args': '',
+ 'PV_kernel': '',
+ 'PV_legacy_args': '',
+ 'PV_ramdisk': '',
+ 'recommendations': '',
+ 'tags': [],
+ 'user_version': '0',
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'xenstore_data': {}}
+
+ # Complete VM configuration record according to the image type
+ # non-raw/raw with PV kernel/raw in HVM mode
+ if use_pv_kernel:
+ rec['platform']['nx'] = 'false'
+ if instance.kernel_id:
+ # 1. Kernel explicitly passed in, use that
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
+ else:
+ # 2. Use kernel within the image
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ # 3. Using hardware virtualization
+ rec['platform']['nx'] = 'true'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['HVM_boot_policy'] = 'BIOS order'
+
+ vm_ref = session.call_xenapi('VM.create', rec)
+ LOG.debug(_('Created VM'), instance=instance)
+ return vm_ref
+
+
+def ensure_free_mem(session, instance):
+ inst_type_id = instance.instance_type_id
+ instance_type = instance_types.get_instance_type(inst_type_id)
+ mem = long(instance_type['memory_mb']) * 1024 * 1024
+ #get free memory from host
+ host = session.get_xenapi_host()
+ host_free_mem = long(session.call_xenapi("host.compute_free_memory",
+ host))
+ return host_free_mem >= mem
+
+
+def find_vbd_by_number(session, vm_ref, number):
+ """Get the VBD reference from the device number"""
+ vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- session.call_xenapi('VBD.unplug', vbd_ref)
- return
+ vbd_rec = session.call_xenapi("VBD.get_record", vbd_ref)
+ if vbd_rec['userdevice'] == str(number):
+ return vbd_ref
except session.XenAPI.Failure, exc:
- err = len(exc.details) > 0 and exc.details[0]
- if err == 'DEVICE_ALREADY_DETACHED':
- LOG.info(_('VBD %s already detached'), vbd_ref)
- return
- elif err == 'DEVICE_DETACH_REJECTED':
- LOG.info(_('VBD %(vbd_ref)s detach rejected, attempt'
- ' %(num_attempt)d/%(max_attempts)d'), locals())
- else:
- LOG.exception(exc)
- raise volume_utils.StorageError(
- _('Unable to unplug VBD %s') % vbd_ref)
-
- greenthread.sleep(1)
+ LOG.exception(exc)
+ raise volume_utils.StorageError(
+ _('VBD not found in instance %s') % vm_ref)
- raise volume_utils.StorageError(
- _('Reached maximum number of retries trying to unplug VBD %s')
- % vbd_ref)
- @classmethod
- def destroy_vbd(cls, session, vbd_ref):
- """Destroy VBD from host database"""
+def unplug_vbd(session, vbd_ref):
+ """Unplug VBD from VM"""
+ # Call VBD.unplug on the given VBD, with a retry if we get
+ # DEVICE_DETACH_REJECTED. For reasons which we don't understand,
+ # we're seeing the device still in use, even when all processes
+ # using the device should be dead.
+ max_attempts = FLAGS.xenapi_num_vbd_unplug_retries + 1
+ for num_attempt in xrange(1, max_attempts + 1):
try:
- session.call_xenapi('VBD.destroy', vbd_ref)
+ session.call_xenapi('VBD.unplug', vbd_ref)
+ return
except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(
- _('Unable to destroy VBD %s') % vbd_ref)
+ err = len(exc.details) > 0 and exc.details[0]
+ if err == 'DEVICE_ALREADY_DETACHED':
+ LOG.info(_('VBD %s already detached'), vbd_ref)
+ return
+ elif err == 'DEVICE_DETACH_REJECTED':
+ LOG.info(_('VBD %(vbd_ref)s detach rejected, attempt'
+ ' %(num_attempt)d/%(max_attempts)d'), locals())
+ else:
+ LOG.exception(exc)
+ raise volume_utils.StorageError(
+ _('Unable to unplug VBD %s') % vbd_ref)
- @classmethod
- def create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
- vbd_type='disk', read_only=False, bootable=False):
- """Create a VBD record and returns its reference."""
- vbd_rec = {}
- vbd_rec['VM'] = vm_ref
- vbd_rec['VDI'] = vdi_ref
- vbd_rec['userdevice'] = str(userdevice)
- vbd_rec['bootable'] = bootable
- vbd_rec['mode'] = read_only and 'RO' or 'RW'
- vbd_rec['type'] = vbd_type
- vbd_rec['unpluggable'] = True
- vbd_rec['empty'] = False
- vbd_rec['other_config'] = {}
- vbd_rec['qos_algorithm_type'] = ''
- vbd_rec['qos_algorithm_params'] = {}
- vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
- ' VDI %(vdi_ref)s ... '), locals())
- vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
- ' VDI %(vdi_ref)s.'), locals())
- return vbd_ref
+ greenthread.sleep(1)
- @classmethod
- def destroy_vdi(cls, session, vdi_ref):
- try:
- session.call_xenapi('VDI.destroy', vdi_ref)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(
- _('Unable to destroy VDI %s') % vdi_ref)
+ raise volume_utils.StorageError(
+ _('Reached maximum number of retries trying to unplug VBD %s')
+ % vbd_ref)
- @classmethod
- def create_vdi(cls, session, sr_ref, instance, name_description,
- virtual_size, read_only=False):
- """Create a VDI record and returns its reference."""
- name_label = instance['name']
- vdi_ref = session.call_xenapi("VDI.create",
- {'name_label': name_label,
- 'name_description': name_description,
- 'SR': sr_ref,
- 'virtual_size': str(virtual_size),
- 'type': 'User',
- 'sharable': False,
- 'read_only': read_only,
- 'xenstore_data': {},
- 'other_config': {'nova_instance_uuid': instance['uuid']},
- 'sm_config': {},
- 'tags': []})
- LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
- ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'),
- locals())
- return vdi_ref
- @classmethod
- def copy_vdi(cls, session, sr_ref, vdi_to_copy_ref):
- """Copy a VDI and return the new VDIs reference."""
- vdi_ref = session.call_xenapi('VDI.copy', vdi_to_copy_ref, sr_ref)
- LOG.debug(_('Copied VDI %(vdi_ref)s from VDI '
- '%(vdi_to_copy_ref)s on %(sr_ref)s.') % locals())
- return vdi_ref
+def destroy_vbd(session, vbd_ref):
+ """Destroy VBD from host database"""
+ try:
+ session.call_xenapi('VBD.destroy', vbd_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise volume_utils.StorageError(
+ _('Unable to destroy VBD %s') % vbd_ref)
+
+
+def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
+ read_only=False, bootable=False):
+ """Create a VBD record and returns its reference."""
+ vbd_rec = {}
+ vbd_rec['VM'] = vm_ref
+ vbd_rec['VDI'] = vdi_ref
+ vbd_rec['userdevice'] = str(userdevice)
+ vbd_rec['bootable'] = bootable
+ vbd_rec['mode'] = read_only and 'RO' or 'RW'
+ vbd_rec['type'] = vbd_type
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... '), locals())
+ vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
+ LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s.'), locals())
+ return vbd_ref
+
+
+def destroy_vdi(session, vdi_ref):
+ try:
+ session.call_xenapi('VDI.destroy', vdi_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise volume_utils.StorageError(
+ _('Unable to destroy VDI %s') % vdi_ref)
+
+
+def create_vdi(session, sr_ref, instance, disk_type, virtual_size,
+ read_only=False):
+ """Create a VDI record and returns its reference."""
+ name_label = instance['name']
+ vdi_ref = session.call_xenapi("VDI.create",
+ {'name_label': name_label,
+ 'name_description': disk_type,
+ 'SR': sr_ref,
+ 'virtual_size': str(virtual_size),
+ 'type': 'User',
+ 'sharable': False,
+ 'read_only': read_only,
+ 'xenstore_data': {},
+ 'other_config': {'nova_instance_uuid': instance['uuid'],
+ 'nova_disk_type': disk_type},
+ 'sm_config': {},
+ 'tags': []})
+ LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
+ ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'),
+ locals())
+ return vdi_ref
- @classmethod
- def clone_vdi(cls, session, vdi_to_clone_ref):
- """Clones a VDI and return the new VDIs reference."""
- vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
- LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
- '%(vdi_to_clone_ref)s') % locals())
- return vdi_ref
- @classmethod
- def set_vdi_name(cls, session, vdi_uuid, label, description,
- vdi_ref=None):
- vdi_ref = vdi_ref or session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- session.call_xenapi('VDI.set_name_label', vdi_ref, label)
- session.call_xenapi('VDI.set_name_description', vdi_ref, description)
+def copy_vdi(session, sr_ref, vdi_to_copy_ref):
+ """Copy a VDI and return the new VDIs reference."""
+ vdi_ref = session.call_xenapi('VDI.copy', vdi_to_copy_ref, sr_ref)
+ LOG.debug(_('Copied VDI %(vdi_ref)s from VDI '
+ '%(vdi_to_copy_ref)s on %(sr_ref)s.') % locals())
+ return vdi_ref
- @classmethod
- def get_vdi_for_vm_safely(cls, session, vm_ref):
- """Retrieves the primary VDI for a VM"""
- vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
- for vbd in vbd_refs:
- vbd_rec = session.call_xenapi("VBD.get_record", vbd)
- # Convention dictates the primary VDI will be userdevice 0
- if vbd_rec['userdevice'] == '0':
- vdi_rec = session.call_xenapi("VDI.get_record", vbd_rec['VDI'])
- return vbd_rec['VDI'], vdi_rec
- raise exception.NovaException(_("No primary VDI found for %(vm_ref)s")
- % locals())
- @classmethod
- def create_snapshot(cls, session, instance, vm_ref, label):
- """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
- Snapshot VHD"""
- LOG.debug(_("Snapshotting with label '%(label)s'"), locals(),
- instance=instance)
+def clone_vdi(session, vdi_to_clone_ref):
+ """Clones a VDI and return the new VDIs reference."""
+ vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
+ LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
+ '%(vdi_to_clone_ref)s') % locals())
+ return vdi_ref
+
+
+def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
+ vdi_ref = vdi_ref or session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ session.call_xenapi('VDI.set_name_label', vdi_ref, label)
+ session.call_xenapi('VDI.set_name_description', vdi_ref, description)
+
+
+def get_vdi_for_vm_safely(session, vm_ref):
+ """Retrieves the primary VDI for a VM"""
+ vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
+ for vbd in vbd_refs:
+ vbd_rec = session.call_xenapi("VBD.get_record", vbd)
+ # Convention dictates the primary VDI will be userdevice 0
+ if vbd_rec['userdevice'] == '0':
+ vdi_rec = session.call_xenapi("VDI.get_record", vbd_rec['VDI'])
+ return vbd_rec['VDI'], vdi_rec
+ raise exception.NovaException(_("No primary VDI found for %(vm_ref)s")
+ % locals())
- vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
- sr_ref = vm_vdi_rec["SR"]
- original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
+def create_snapshot(session, instance, vm_ref, label):
+ """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD"""
+ LOG.debug(_("Snapshotting with label '%(label)s'"), locals(),
+ instance=instance)
- template_vm_ref = session.call_xenapi('VM.snapshot', vm_ref, label)
- template_vdi_rec = cls.get_vdi_for_vm_safely(session,
- template_vm_ref)[1]
- template_vdi_uuid = template_vdi_rec["uuid"]
+ vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ sr_ref = vm_vdi_rec["SR"]
- LOG.debug(_('Created snapshot %(template_vm_ref)s'), locals(),
- instance=instance)
+ original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
- parent_uuid, base_uuid = _wait_for_vhd_coalesce(
+ template_vm_ref = session.call_xenapi('VM.snapshot', vm_ref, label)
+ template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vdi_uuid = template_vdi_rec["uuid"]
+
+ LOG.debug(_('Created snapshot %(template_vm_ref)s'), locals(),
+ instance=instance)
+
+ parent_uuid, base_uuid = _wait_for_vhd_coalesce(
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
- template_vdi_uuids = {'base': base_uuid,
- 'image': parent_uuid,
- 'snap': template_vdi_uuid}
- return template_vm_ref, template_vdi_uuids
+ template_vdi_uuids = {'base': base_uuid,
+ 'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
- @classmethod
- def get_sr_path(cls, session):
- """Return the path to our storage repository
- This is used when we're dealing with VHDs directly, either by taking
- snapshots or by restoring an image in the DISK_VHD format.
- """
- sr_ref = cls.safe_find_sr(session)
- sr_rec = session.call_xenapi("SR.get_record", sr_ref)
- sr_uuid = sr_rec["uuid"]
- return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+def get_sr_path(session):
+ """Return the path to our storage repository
- @classmethod
- def find_cached_image(cls, session, image_id, sr_ref):
- """Returns the vdi-ref of the cached image."""
- for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
- if ('image-id' in vdi_rec['other_config'] and
- vdi_rec['other_config']['image-id'] == image_id):
- return vdi_ref
- return None
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.call_xenapi("SR.get_record", sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
- @classmethod
- def upload_image(cls, context, session, instance, vdi_uuids, image_id):
- """ Requests that the Glance plugin bundle the specified VDIs and
- push them into Glance using the specified human-friendly name.
- """
- # NOTE(sirp): Currently we only support uploading images as VHD, there
- # is no RAW equivalent (yet)
- LOG.debug(_("Asking xapi to upload %(vdi_uuids)s as"
- " ID %(image_id)s"), locals(), instance=instance)
- glance_host, glance_port = glance.pick_glance_api_server()
+def find_cached_image(session, image_id, sr_ref):
+ """Returns the vdi-ref of the cached image."""
+ for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
+ if ('image-id' in vdi_rec['other_config'] and
+ vdi_rec['other_config']['image-id'] == image_id):
+ return vdi_ref
+ return None
- properties = {}
- properties['auto_disk_config'] = instance.auto_disk_config
- properties['os_type'] = instance.os_type or FLAGS.default_os_type
- params = {'vdi_uuids': vdi_uuids,
- 'image_id': image_id,
- 'glance_host': glance_host,
- 'glance_port': glance_port,
- 'sr_path': cls.get_sr_path(session),
- 'auth_token': getattr(context, 'auth_token', None),
- 'properties': properties}
+def upload_image(context, session, instance, vdi_uuids, image_id):
+ """Requests that the Glance plugin bundle the specified VDIs and
+ push them into Glance using the specified human-friendly name.
+ """
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
+ LOG.debug(_("Asking xapi to upload %(vdi_uuids)s as"
+ " ID %(image_id)s"), locals(), instance=instance)
- kwargs = {'params': pickle.dumps(params)}
- session.call_plugin('glance', 'upload_vhd', kwargs)
+ glance_host, glance_port = glance.pick_glance_api_server()
- @classmethod
- def resize_disk(cls, session, instance, vdi_ref, instance_type):
- # Copy VDI over to something we can resize
- # NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
- sr_ref = cls.safe_find_sr(session)
- copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
+ properties = {}
+ properties['auto_disk_config'] = instance.auto_disk_config
+ properties['os_type'] = instance.os_type or FLAGS.default_os_type
- try:
- # Resize partition and filesystem down
- cls.auto_configure_disk(session=session,
- vdi_ref=copy_ref,
- new_gb=instance_type['root_gb'])
+ params = {'vdi_uuids': vdi_uuids,
+ 'image_id': image_id,
+ 'glance_host': glance_host,
+ 'glance_port': glance_port,
+ 'sr_path': get_sr_path(session),
+ 'auth_token': getattr(context, 'auth_token', None),
+ 'properties': properties}
- # Create new VDI
- vdi_size = instance_type['root_gb'] * 1024 * 1024 * 1024
- new_ref = cls.create_vdi(session, sr_ref, instance, 'root',
- vdi_size)
+ kwargs = {'params': pickle.dumps(params)}
+ session.call_plugin('glance', 'upload_vhd', kwargs)
- new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
- # Manually copy contents over
- virtual_size = instance_type['root_gb'] * 1024 * 1024 * 1024
- _copy_partition(session, copy_ref, new_ref, 1, virtual_size)
+def resize_disk(session, instance, vdi_ref, instance_type):
+ # Copy VDI over to something we can resize
+ # NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
+ sr_ref = safe_find_sr(session)
+ copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
- return new_ref, new_uuid
- finally:
- cls.destroy_vdi(session, copy_ref)
+ try:
+ # Resize partition and filesystem down
+ auto_configure_disk(session, copy_ref, instance_type['root_gb'])
- @classmethod
- def auto_configure_disk(cls, session, vdi_ref, new_gb):
- """Partition and resize FS to match the size specified by
- instance_types.root_gb.
+ # Create new VDI
+ vdi_size = instance_type['root_gb'] * 1024 * 1024 * 1024
+ new_ref = create_vdi(session, sr_ref, instance, 'root', vdi_size)
- This is a fail-safe to prevent accidentally destroying data on a disk
- erroneously marked as auto_disk_config=True.
+ new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
- The criteria for allowing resize are:
+ # Manually copy contents over
+ virtual_size = instance_type['root_gb'] * 1024 * 1024 * 1024
+ _copy_partition(session, copy_ref, new_ref, 1, virtual_size)
- 1. 'auto_disk_config' must be true for the instance (and image).
- (If we've made it here, then auto_disk_config=True.)
+ return new_ref, new_uuid
+ finally:
+ destroy_vdi(session, copy_ref)
- 2. The disk must have only one partition.
- 3. The file-system on the one partition must be ext3 or ext4.
- """
- with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
- partitions = _get_partitions(dev)
+def auto_configure_disk(session, vdi_ref, new_gb):
+ """Partition and resize FS to match the size specified by
+ instance_types.root_gb.
- if len(partitions) != 1:
- return
+ This is a fail-safe to prevent accidentally destroying data on a disk
+ erroneously marked as auto_disk_config=True.
- _num, start, old_sectors, ptype = partitions[0]
- if ptype in ('ext3', 'ext4'):
- new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
- _resize_part_and_fs(dev, start, old_sectors, new_sectors)
+ The criteria for allowing resize are:
- @classmethod
- def _generate_disk(cls, session, instance, vm_ref, userdevice, name,
- size_mb, fs_type):
- """
- Steps to programmatically generate a disk:
+ 1. 'auto_disk_config' must be true for the instance (and image).
+ (If we've made it here, then auto_disk_config=True.)
- 1. Create VDI of desired size
+ 2. The disk must have only one partition.
- 2. Attach VDI to compute worker
+ 3. The file-system on the one partition must be ext3 or ext4.
+ """
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ partitions = _get_partitions(dev)
- 3. Create partition
+ if len(partitions) != 1:
+ return
- 4. Create VBD between instance VM and VDI
- """
- # 1. Create VDI
- sr_ref = cls.safe_find_sr(session)
- ONE_MEG = 1024 * 1024
- virtual_size = size_mb * ONE_MEG
- vdi_ref = cls.create_vdi(session, sr_ref, instance, name,
- virtual_size)
+ _num, start, old_sectors, ptype = partitions[0]
+ if ptype in ('ext3', 'ext4'):
+ new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
+ _resize_part_and_fs(dev, start, old_sectors, new_sectors)
- try:
- # 2. Attach VDI to compute worker (VBD hotplug)
- with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
- # 3. Create partition
- dev_path = utils.make_dev_path(dev)
- utils.execute('parted', '--script', dev_path,
- 'mklabel', 'msdos', run_as_root=True)
-
- partition_start = 0
- partition_end = size_mb
- utils.execute('parted', '--script', dev_path,
- 'mkpart', 'primary',
- str(partition_start),
- str(partition_end),
+
+def _generate_disk(session, instance, vm_ref, userdevice, name, size_mb,
+ fs_type):
+ """
+ Steps to programmatically generate a disk:
+
+ 1. Create VDI of desired size
+
+ 2. Attach VDI to compute worker
+
+ 3. Create partition
+
+ 4. Create VBD between instance VM and VDI
+ """
+ # 1. Create VDI
+ sr_ref = safe_find_sr(session)
+ ONE_MEG = 1024 * 1024
+ virtual_size = size_mb * ONE_MEG
+ vdi_ref = create_vdi(session, sr_ref, instance, name, virtual_size)
+
+ try:
+ # 2. Attach VDI to compute worker (VBD hotplug)
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ # 3. Create partition
+ dev_path = utils.make_dev_path(dev)
+ utils.execute('parted', '--script', dev_path,
+ 'mklabel', 'msdos', run_as_root=True)
+
+ partition_start = 0
+ partition_end = size_mb
+ utils.execute('parted', '--script', dev_path,
+ 'mkpart', 'primary',
+ str(partition_start),
+ str(partition_end),
+ run_as_root=True)
+
+ partition_path = utils.make_dev_path(dev, partition=1)
+
+ if fs_type == 'linux-swap':
+ utils.execute('mkswap', partition_path, run_as_root=True)
+ elif fs_type is not None:
+ utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
- partition_path = utils.make_dev_path(dev, partition=1)
+ # 4. Create VBD between instance VM and swap VDI
+ create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ destroy_vdi(session, vdi_ref)
- if fs_type == 'linux-swap':
- utils.execute('mkswap', partition_path,
- run_as_root=True)
- elif fs_type is not None:
- utils.execute('mkfs', '-t', fs_type, partition_path,
- run_as_root=True)
- # 4. Create VBD between instance VM and swap VDI
- cls.create_vbd(session, vm_ref, vdi_ref, userdevice,
- bootable=False)
- except Exception:
- with excutils.save_and_reraise_exception():
- cls.destroy_vdi(session, vdi_ref)
+def generate_swap(session, instance, vm_ref, userdevice, swap_mb):
+ # NOTE(jk0): We use a FAT32 filesystem for the Windows swap
+ # partition because that is what parted supports.
+ is_windows = instance.os_type == "windows"
+ fs_type = "vfat" if is_windows else "linux-swap"
- @classmethod
- def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb):
- # NOTE(jk0): We use a FAT32 filesystem for the Windows swap
- # partition because that is what parted supports.
- is_windows = instance.os_type == "windows"
- fs_type = "vfat" if is_windows else "linux-swap"
+ _generate_disk(session, instance, vm_ref, userdevice, 'swap', swap_mb,
+ fs_type)
- cls._generate_disk(session, instance, vm_ref, userdevice,
- 'swap', swap_mb, fs_type)
- @classmethod
- def generate_ephemeral(cls, session, instance, vm_ref, userdevice,
- size_gb):
- cls._generate_disk(session, instance, vm_ref, userdevice,
- 'ephemeral', size_gb * 1024,
- FLAGS.default_ephemeral_format)
+def generate_ephemeral(session, instance, vm_ref, userdevice, size_gb):
+ _generate_disk(session, instance, vm_ref, userdevice, 'ephemeral',
+ size_gb * 1024, FLAGS.default_ephemeral_format)
- @classmethod
- def create_kernel_image(cls, context, session, instance, image, user_id,
- project_id, image_type):
- """Creates kernel/ramdisk file from the image stored in the cache.
- If the image is not present in the cache, it streams it from glance.
-
- Returns: A list of dictionaries that describe VDIs
- """
- filename = ""
- if FLAGS.cache_images:
- args = {}
- args['cached-image'] = image
- args['new-image-uuid'] = str(uuid.uuid4())
- filename = session.call_plugin('glance', 'create_kernel_ramdisk',
- args)
- if filename == "":
- return cls.fetch_image(context, session, instance, image,
- image_type)
- else:
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=None,
- file=filename)]
+def create_kernel_image(context, session, instance, image, user_id,
+ project_id, image_type):
+ """Creates kernel/ramdisk file from the image stored in the cache.
+ If the image is not present in the cache, it streams it from glance.
- @classmethod
- def _create_cached_image(cls, context, session, instance, image,
- image_type):
- sr_ref = cls.safe_find_sr(session)
- sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
- vdi_return_list = []
-
- if FLAGS.use_cow_images and sr_type != "ext":
- LOG.warning(_("Fast cloning is only supported on default local SR "
- "of type ext. SR on this system was found to be of "
- "type %(sr_type)s. Ignoring the cow flag.")
- % locals())
-
- vdi_ref = cls.find_cached_image(session, image, sr_ref)
- if vdi_ref is None:
- vdis = cls.fetch_image(context, session, instance, image,
+ Returns: A list of dictionaries that describe VDIs
+ """
+ filename = ""
+ if FLAGS.cache_images:
+ args = {}
+ args['cached-image'] = image
+ args['new-image-uuid'] = str(uuid.uuid4())
+ filename = session.call_plugin('glance', 'create_kernel_ramdisk',
+ args)
+
+ if filename == "":
+ return fetch_image(context, session, instance, image, image_type)
+ else:
+ vdi_type = ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
+
+
+def _create_cached_image(context, session, instance, image, image_type):
+ sr_ref = safe_find_sr(session)
+ sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
+ vdis = {}
+
+ if FLAGS.use_cow_images and sr_type != "ext":
+ LOG.warning(_("Fast cloning is only supported on default local SR "
+ "of type ext. SR on this system was found to be of "
+ "type %(sr_type)s. Ignoring the cow flag.")
+ % locals())
+
+ root_vdi_ref = find_cached_image(session, image, sr_ref)
+ if root_vdi_ref is None:
+ fetched_vdis = fetch_image(context, session, instance, image,
image_type)
+ root_vdi = fetched_vdis['root']
+ root_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
+ root_vdi['uuid'])
+ set_vdi_name(session, root_vdi['uuid'], 'Glance Image %s' % image,
+ 'root', vdi_ref=root_vdi_ref)
+ session.call_xenapi('VDI.add_to_other_config',
+ root_vdi_ref, 'image-id', str(image))
+
+ for vdi_type, vdi in fetched_vdis.iteritems():
vdi_ref = session.call_xenapi('VDI.get_by_uuid',
- vdis[0]['vdi_uuid'])
- cls.set_vdi_name(session, vdis[0]['vdi_uuid'],
- 'Glance Image %s' % image, 'root',
- vdi_ref=vdi_ref)
+ vdi['uuid'])
session.call_xenapi('VDI.add_to_other_config',
- vdi_ref, 'image-id', str(image))
+ vdi_ref, 'nova_disk_type',
+ vdi_type)
- for vdi in vdis:
- if vdi["vdi_type"] == "swap":
- session.call_xenapi('VDI.add_to_other_config',
- vdi_ref, "swap-disk",
- str(vdi['vdi_uuid']))
+ if vdi_type == 'swap':
+ session.call_xenapi('VDI.add_to_other_config',
+ root_vdi_ref, 'swap-disk',
+ str(vdi['uuid']))
- if FLAGS.use_cow_images and sr_type == 'ext':
- new_vdi_ref = cls.clone_vdi(session, vdi_ref)
- else:
- new_vdi_ref = cls.copy_vdi(session, sr_ref, vdi_ref)
-
- # Set the name label for the image we just created and remove image id
- # field from other-config.
- session.call_xenapi('VDI.remove_from_other_config',
- new_vdi_ref, 'image-id')
-
- vdi_return_list.append(dict(
- vdi_type=("root" if image_type == ImageType.DISK_VHD
- else ImageType.to_string(image_type)),
- vdi_uuid=session.call_xenapi('VDI.get_uuid', new_vdi_ref),
- file=None))
-
- # Create a swap disk if the glance image had one associated with it.
- vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
- if 'swap-disk' in vdi_rec['other_config']:
- swap_disk_uuid = vdi_rec['other_config']['swap-disk']
- swap_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
- swap_disk_uuid)
- new_swap_vdi_ref = cls.copy_vdi(session, sr_ref, swap_vdi_ref)
- new_swap_vdi_uuid = session.call_xenapi('VDI.get_uuid',
- new_swap_vdi_ref)
- vdi_return_list.append(dict(vdi_type="swap",
- vdi_uuid=new_swap_vdi_uuid,
- file=None))
-
- return vdi_return_list
+ if FLAGS.use_cow_images and sr_type == 'ext':
+ new_vdi_ref = clone_vdi(session, root_vdi_ref)
+ else:
+ new_vdi_ref = copy_vdi(session, sr_ref, root_vdi_ref)
- @classmethod
- def create_image(cls, context, session, instance, image, image_type):
- """Creates VDI from the image stored in the local cache. If the image
- is not present in the cache, it streams it from glance.
-
- Returns: A list of dictionaries that describe VDIs
- """
- if FLAGS.cache_images is True and image_type != ImageType.DISK_ISO:
- vdi_return_list = cls._create_cached_image(context, session,
- instance, image,
- image_type)
- else:
- # If caching is disabled, we do not have to keep a copy of the
- # image. Fetch the image from glance.
- vdi_return_list = cls.fetch_image(context, session, instance,
- instance.image_ref, image_type)
+ # Set the name label for the image we just created and remove image id
+ # field from other-config.
+ session.call_xenapi('VDI.remove_from_other_config',
+ new_vdi_ref, 'image-id')
- # Set the name label and description to easily identify what
- # instance and disk it's for
- for vdi in vdi_return_list:
- cls.set_vdi_name(session, vdi['vdi_uuid'], instance.name,
- vdi['vdi_type'])
+ vdi_type = ("root" if image_type == ImageType.DISK_VHD
+ else ImageType.to_string(image_type))
+ vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
+ vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
- return vdi_return_list
+ # Create a swap disk if the glance image had one associated with it.
+ vdi_rec = session.call_xenapi('VDI.get_record', root_vdi_ref)
+ if 'swap-disk' in vdi_rec['other_config']:
+ swap_disk_uuid = vdi_rec['other_config']['swap-disk']
+ swap_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
+ swap_disk_uuid)
+ new_swap_vdi_ref = copy_vdi(session, sr_ref, swap_vdi_ref)
+ new_swap_vdi_uuid = session.call_xenapi('VDI.get_uuid',
+ new_swap_vdi_ref)
+ vdis['swap'] = dict(uuid=new_swap_vdi_uuid, file=None)
- @classmethod
- def fetch_image(cls, context, session, instance, image, image_type):
- """Fetch image from glance based on image type.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- if image_type == ImageType.DISK_VHD:
- return cls._fetch_image_glance_vhd(context, session, instance,
- image)
- else:
- return cls._fetch_image_glance_disk(context, session, instance,
- image, image_type)
+ return vdis
- @classmethod
- def _retry_glance_download_vhd(cls, context, session, image):
- # NOTE(sirp): The Glance plugin runs under Python 2.4
- # which does not have the `uuid` module. To work around this,
- # we generate the uuids here (under Python 2.6+) and
- # pass them as arguments
- uuid_stack = [str(uuid.uuid4()) for i in xrange(3)]
-
- max_attempts = FLAGS.glance_num_retries + 1
- sleep_time = 0.5
- for attempt_num in xrange(1, max_attempts + 1):
- glance_host, glance_port = glance.pick_glance_api_server()
- params = {'image_id': image,
- 'glance_host': glance_host,
- 'glance_port': glance_port,
- 'uuid_stack': uuid_stack,
- 'sr_path': cls.get_sr_path(session),
- 'auth_token': getattr(context, 'auth_token', None)}
- kwargs = {'params': pickle.dumps(params)}
-
- LOG.info(_('download_vhd %(image)s '
- 'attempt %(attempt_num)d/%(max_attempts)d '
- 'from %(glance_host)s:%(glance_port)s') % locals())
- try:
- result = session.call_plugin('glance', 'download_vhd', kwargs)
- return json.loads(result)
- except session.XenAPI.Failure as exc:
- _type, _method, error = exc.details[:3]
- if error == 'RetryableError':
- LOG.error(_('download_vhd failed: %r') %
- (exc.details[3:],))
- else:
- raise
+def create_image(context, session, instance, image, image_type):
+ """Creates VDI from the image stored in the local cache. If the image
+ is not present in the cache, it streams it from glance.
- time.sleep(sleep_time)
- sleep_time = min(2 * sleep_time, 15)
+ Returns: A list of dictionaries that describe VDIs
+ """
+ if FLAGS.cache_images is True and image_type != ImageType.DISK_ISO:
+ vdis = _create_cached_image(context, session, instance, image,
+ image_type)
+ else:
+ # If caching is disabled, we do not have to keep a copy of the
+ # image. Fetch the image from glance.
+ vdis = fetch_image(context, session, instance,
+ instance.image_ref, image_type)
- raise exception.CouldNotFetchImage(image=image)
+ # Set the name label and description to easily identify what
+ # instance and disk it's for
+ for vdi_type, vdi in vdis.iteritems():
+ set_vdi_name(session, vdi['uuid'], instance.name, vdi_type)
- @classmethod
- def _fetch_image_glance_vhd(cls, context, session, instance, image):
- """Tell glance to download an image and put the VHDs into the SR
+ return vdis
- Returns: A list of dictionaries that describe VDIs
- """
- LOG.debug(_("Asking xapi to fetch vhd image %(image)s"), locals(),
- instance=instance)
- sr_ref = cls.safe_find_sr(session)
- vdis = cls._retry_glance_download_vhd(context, session, image)
+def fetch_image(context, session, instance, image, image_type):
+ """Fetch image from glance based on image type.
- # 'download_vhd' will return a list of dictionaries describing VDIs.
- # The dictionary will contain 'vdi_type' and 'vdi_uuid' keys.
- # 'vdi_type' can be 'root' or 'swap' right now.
- for vdi in vdis:
- LOG.debug(_("xapi 'download_vhd' returned VDI of "
- "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"),
- vdi, instance=instance)
+ Returns: A single filename if image_type is KERNEL or RAMDISK
+ A list of dictionaries that describe VDIs, otherwise
+ """
+ if image_type == ImageType.DISK_VHD:
+ return _fetch_image_glance_vhd(context, session, instance, image)
+ else:
+ return _fetch_image_glance_disk(context, session, instance, image,
+ image_type)
- cls.scan_sr(session, sr_ref)
- # Pull out the UUID of the first VDI (which is the os VDI)
- os_vdi_uuid = vdis[0]['vdi_uuid']
+def _retry_glance_download_vhd(context, session, image):
+ # NOTE(sirp): The Glance plugin runs under Python 2.4
+ # which does not have the `uuid` module. To work around this,
+ # we generate the uuids here (under Python 2.6+) and
+ # pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(3)]
- # Set the name-label to ease debugging
- cls.set_vdi_name(session, os_vdi_uuid, instance.name,
- vdis[0]['vdi_type'])
+ max_attempts = FLAGS.glance_num_retries + 1
+ sleep_time = 0.5
+ for attempt_num in xrange(1, max_attempts + 1):
+ glance_host, glance_port = glance.pick_glance_api_server()
+ params = {'image_id': image,
+ 'glance_host': glance_host,
+ 'glance_port': glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': get_sr_path(session),
+ 'auth_token': getattr(context, 'auth_token', None)}
+ kwargs = {'params': pickle.dumps(params)}
- cls._check_vdi_size(context, session, instance, os_vdi_uuid)
- return vdis
+ LOG.info(_('download_vhd %(image)s '
+ 'attempt %(attempt_num)d/%(max_attempts)d '
+ 'from %(glance_host)s:%(glance_port)s') % locals())
- @classmethod
- def _get_vdi_chain_size(cls, session, vdi_uuid):
- """Compute the total size of a VDI chain, starting with the specified
- VDI UUID.
-
- This will walk the VDI chain to the root, add the size of each VDI into
- the total.
- """
- size_bytes = 0
- for vdi_rec in walk_vdi_chain(session, vdi_uuid):
- cur_vdi_uuid = vdi_rec['uuid']
- vdi_size_bytes = int(vdi_rec['physical_utilisation'])
- LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
- '%(vdi_size_bytes)d'), locals())
- size_bytes += vdi_size_bytes
- return size_bytes
+ try:
+ result = session.call_plugin('glance', 'download_vhd', kwargs)
+ return json.loads(result)
+ except session.XenAPI.Failure as exc:
+ _type, _method, error = exc.details[:3]
+ if error == 'RetryableError':
+ LOG.error(_('download_vhd failed: %r') %
+ (exc.details[3:],))
+ else:
+ raise
- @classmethod
- def _check_vdi_size(cls, context, session, instance, vdi_uuid):
- size_bytes = cls._get_vdi_chain_size(session, vdi_uuid)
+ time.sleep(sleep_time)
+ sleep_time = min(2 * sleep_time, 15)
- # FIXME(jk0): this was copied directly from compute.manager.py, let's
- # refactor this to a common area
- instance_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(instance_type_id)
- allowed_size_gb = instance_type['root_gb']
- allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+ raise exception.CouldNotFetchImage(image=image)
- LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
- "%(allowed_size_bytes)d"), locals(), instance=instance)
- if size_bytes > allowed_size_bytes:
- LOG.info(_("Image size %(size_bytes)d exceeded instance_type "
- "allowed size %(allowed_size_bytes)d"),
- locals(), instance=instance)
- raise exception.ImageTooLarge()
+def _fetch_image_glance_vhd(context, session, instance, image):
+ """Tell glance to download an image and put the VHDs into the SR
- @classmethod
- def _fetch_image_glance_disk(cls, context, session, instance, image,
- image_type):
- """Fetch the image from Glance
-
- NOTE:
- Unlike _fetch_image_glance_vhd, this method does not use the Glance
- plugin; instead, it streams the disks through domU to the VDI
- directly.
-
- Returns: A single filename if image_type is KERNEL_RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- # FIXME(sirp): Since the Glance plugin seems to be required for the
- # VHD disk, it may be worth using the plugin for both VHD and RAW and
- # DISK restores
- image_type_str = ImageType.to_string(image_type)
- LOG.debug(_("Fetching image %(image)s, type %(image_type_str)s"),
- locals(), instance=instance)
-
- if image_type == ImageType.DISK_ISO:
- sr_ref = cls.safe_find_iso_sr(session)
+ Returns: A list of dictionaries that describe VDIs
+ """
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s"), locals(),
+ instance=instance)
+ sr_ref = safe_find_sr(session)
+
+ fetched_vdis = _retry_glance_download_vhd(context, session, image)
+
+ # 'download_vhd' will return a list of dictionaries describing VDIs.
+ # The dictionary will contain 'vdi_type' and 'vdi_uuid' keys.
+ # 'vdi_type' can be 'root' or 'swap' right now.
+ for vdi in fetched_vdis:
+ LOG.debug(_("xapi 'download_vhd' returned VDI of "
+ "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"),
+ vdi, instance=instance)
+
+ scan_sr(session, sr_ref)
+
+ vdis = {}
+ for vdi in fetched_vdis:
+ vdis[vdi['vdi_type']] = dict(uuid=vdi['vdi_uuid'], file=None)
+
+ # Pull out the UUID of the root VDI
+ root_vdi_uuid = vdis['root']['uuid']
+
+ # Set the name-label to ease debugging
+ set_vdi_name(session, root_vdi_uuid, instance.name, 'root')
+
+ _check_vdi_size(context, session, instance, root_vdi_uuid)
+ return vdis
+
+
+def _get_vdi_chain_size(session, vdi_uuid):
+ """Compute the total size of a VDI chain, starting with the specified
+ VDI UUID.
+
+ This will walk the VDI chain to the root, add the size of each VDI into
+ the total.
+ """
+ size_bytes = 0
+ for vdi_rec in walk_vdi_chain(session, vdi_uuid):
+ cur_vdi_uuid = vdi_rec['uuid']
+ vdi_size_bytes = int(vdi_rec['physical_utilisation'])
+ LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
+ '%(vdi_size_bytes)d'), locals())
+ size_bytes += vdi_size_bytes
+ return size_bytes
+
+
+def _check_vdi_size(context, session, instance, vdi_uuid):
+ size_bytes = _get_vdi_chain_size(session, vdi_uuid)
+
+ # FIXME(jk0): this was copied directly from compute.manager.py, let's
+ # refactor this to a common area
+ instance_type_id = instance['instance_type_id']
+ instance_type = instance_types.get_instance_type(instance_type_id)
+ allowed_size_gb = instance_type['root_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d"), locals(), instance=instance)
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image size %(size_bytes)d exceeded instance_type "
+ "allowed size %(allowed_size_bytes)d"),
+ locals(), instance=instance)
+ raise exception.ImageTooLarge()
+
+
+def _fetch_image_glance_disk(context, session, instance, image, image_type):
+ """Fetch the image from Glance
+
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
+
+ Returns: A single filename if image_type is KERNEL_RAMDISK
+ A list of dictionaries that describe VDIs, otherwise
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ image_type_str = ImageType.to_string(image_type)
+ LOG.debug(_("Fetching image %(image)s, type %(image_type_str)s"),
+ locals(), instance=instance)
+
+ if image_type == ImageType.DISK_ISO:
+ sr_ref = safe_find_iso_sr(session)
+ else:
+ sr_ref = safe_find_sr(session)
+
+ glance_client, image_id = glance.get_glance_client(context, image)
+ glance_client.set_auth_token(getattr(context, 'auth_token', None))
+ meta, image_file = glance_client.get_image(image_id)
+ virtual_size = int(meta['size'])
+ vdi_size = virtual_size
+ LOG.debug(_("Size for image %(image)s: %(virtual_size)d"), locals(),
+ instance=instance)
+ if image_type == ImageType.DISK:
+ # Make room for MBR.
+ vdi_size += MBR_SIZE_BYTES
+ elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
+ vdi_size > FLAGS.max_kernel_ramdisk_size):
+ max_size = FLAGS.max_kernel_ramdisk_size
+ raise exception.NovaException(
+ _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
+ "max %(max_size)d bytes") % locals())
+
+ vdi_ref = create_vdi(session, sr_ref, instance, image_type_str, vdi_size)
+ # From this point we have a VDI on Xen host;
+ # If anything goes wrong, we need to remember its uuid.
+ try:
+ filename = None
+ vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
+
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ _stream_disk(dev, image_type, virtual_size, image_file)
+
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ # We need to invoke a plugin for copying the
+ # content of the VDI into the proper path.
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"),
+ vdi_ref, instance=instance)
+ fn = "copy_kernel_vdi"
+ args = {}
+ args['vdi-ref'] = vdi_ref
+
+ # Let the plugin copy the correct number of bytes.
+ args['image-size'] = str(vdi_size)
+ if FLAGS.cache_images:
+ args['cached-image'] = image
+ filename = session.call_plugin('glance', fn, args)
+
+ # Remove the VDI as it is not needed anymore.
+ destroy_vdi(session, vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref,
+ instance=instance)
+ vdi_type = ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
else:
- sr_ref = cls.safe_find_sr(session)
-
- glance_client, image_id = glance.get_glance_client(context, image)
- glance_client.set_auth_token(getattr(context, 'auth_token', None))
- meta, image_file = glance_client.get_image(image_id)
- virtual_size = int(meta['size'])
- vdi_size = virtual_size
- LOG.debug(_("Size for image %(image)s: %(virtual_size)d"), locals(),
- instance=instance)
- if image_type == ImageType.DISK:
- # Make room for MBR.
- vdi_size += MBR_SIZE_BYTES
- elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
- vdi_size > FLAGS.max_kernel_ramdisk_size):
- max_size = FLAGS.max_kernel_ramdisk_size
- raise exception.NovaException(
- _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
- "max %(max_size)d bytes") % locals())
-
- vdi_ref = cls.create_vdi(session, sr_ref, instance, image_type_str,
- vdi_size)
- # From this point we have a VDI on Xen host;
- # If anything goes wrong, we need to remember its uuid.
- try:
- filename = None
- vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
-
- with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
- _stream_disk(dev, image_type, virtual_size, image_file)
-
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- # We need to invoke a plugin for copying the
- # content of the VDI into the proper path.
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"),
- vdi_ref, instance=instance)
- fn = "copy_kernel_vdi"
- args = {}
- args['vdi-ref'] = vdi_ref
-
- # Let the plugin copy the correct number of bytes.
- args['image-size'] = str(vdi_size)
- if FLAGS.cache_images:
- args['cached-image'] = image
- filename = session.call_plugin('glance', fn, args)
-
- # Remove the VDI as it is not needed anymore.
- cls.destroy_vdi(session, vdi_ref)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref,
- instance=instance)
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=None,
- file=filename)]
- else:
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=vdi_uuid,
- file=None)]
- except (session.XenAPI.Failure, IOError, OSError) as e:
- # We look for XenAPI and OS failures.
- LOG.exception(_("Failed to fetch glance image"),
- instance=instance)
- e.args = e.args + ([dict(vdi_type=ImageType.
- to_string(image_type),
- vdi_uuid=vdi_uuid,
- file=filename)],)
- raise e
+ vdi_type = ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=vdi_uuid, file=None)}
+ except (session.XenAPI.Failure, IOError, OSError) as e:
+ # We look for XenAPI and OS failures.
+ LOG.exception(_("Failed to fetch glance image"),
+ instance=instance)
+ e.args = e.args + ([dict(type=ImageType.to_string(image_type),
+ uuid=vdi_uuid,
+ file=filename)],)
+ raise e
+
+
+def determine_disk_image_type(image_meta):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ disk_format = image_meta['disk_format']
- @classmethod
- def determine_disk_image_type(cls, image_meta):
- """Disk Image Types are used to determine where the kernel will reside
- within an image. To figure out which type we're dealing with, we use
- the following rules:
-
- 1. If we're using Glance, we can use the image_type field to
- determine the image_type
-
- 2. If we're not using Glance, then we need to deduce this based on
- whether a kernel_id is specified.
- """
- disk_format = image_meta['disk_format']
-
- disk_format_map = {
- 'ami': 'DISK',
- 'aki': 'KERNEL',
- 'ari': 'RAMDISK',
- 'raw': 'DISK_RAW',
- 'vhd': 'DISK_VHD',
- 'iso': 'DISK_ISO',
- }
+ disk_format_map = {
+ 'ami': 'DISK',
+ 'aki': 'KERNEL',
+ 'ari': 'RAMDISK',
+ 'raw': 'DISK_RAW',
+ 'vhd': 'DISK_VHD',
+ 'iso': 'DISK_ISO',
+ }
- try:
- image_type_str = disk_format_map[disk_format]
- except KeyError:
- raise exception.InvalidDiskFormat(disk_format=disk_format)
+ try:
+ image_type_str = disk_format_map[disk_format]
+ except KeyError:
+ raise exception.InvalidDiskFormat(disk_format=disk_format)
- image_type = getattr(ImageType, image_type_str)
+ image_type = getattr(ImageType, image_type_str)
- image_ref = image_meta['id']
- msg = _("Detected %(image_type_str)s format for image %(image_ref)s")
- LOG.debug(msg % locals())
+ image_ref = image_meta['id']
+ msg = _("Detected %(image_type_str)s format for image %(image_ref)s")
+ LOG.debug(msg % locals())
- return image_type
+ return image_type
- @classmethod
- def determine_is_pv(cls, session, vdi_ref, disk_image_type, os_type):
- """
- Determine whether the VM will use a paravirtualized kernel or if it
- will use hardware virtualization.
- 1. Glance (VHD): then we use `os_type`, raise if not set
+def determine_is_pv(session, vdi_ref, disk_image_type, os_type):
+ """
+ Determine whether the VM will use a paravirtualized kernel or if it
+ will use hardware virtualization.
- 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
- available
+ 1. Glance (VHD): then we use `os_type`, raise if not set
- 3. Glance (DISK): pv is assumed
+ 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
- 4. Glance (DISK_ISO): no pv is assumed
- """
+ 3. Glance (DISK): pv is assumed
- LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
- if disk_image_type == ImageType.DISK_VHD:
- # 1. VHD
- if os_type == 'windows':
- is_pv = False
- else:
- is_pv = True
- elif disk_image_type == ImageType.DISK_RAW:
- # 2. RAW
- with vdi_attached_here(session, vdi_ref, read_only=True) as dev:
- is_pv = _is_vdi_pv(dev)
- elif disk_image_type == ImageType.DISK:
- # 3. Disk
- is_pv = True
- elif disk_image_type == ImageType.DISK_ISO:
- # 4. ISO
+ 4. Glance (DISK_ISO): no pv is assumed
+ """
+
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
+ if disk_image_type == ImageType.DISK_VHD:
+ # 1. VHD
+ if os_type == 'windows':
is_pv = False
else:
- msg = _("Unknown image format %(disk_image_type)s") % locals()
- raise exception.NovaException(msg)
+ is_pv = True
+ elif disk_image_type == ImageType.DISK_RAW:
+ # 2. RAW
+ with vdi_attached_here(session, vdi_ref, read_only=True) as dev:
+ is_pv = _is_vdi_pv(dev)
+ elif disk_image_type == ImageType.DISK:
+ # 3. Disk
+ is_pv = True
+ elif disk_image_type == ImageType.DISK_ISO:
+ # 4. ISO
+ is_pv = False
+ else:
+ msg = _("Unknown image format %(disk_image_type)s") % locals()
+ raise exception.NovaException(msg)
- return is_pv
+ return is_pv
- @classmethod
- def set_vm_name_label(cls, session, vm_ref, name_label):
- session.call_xenapi("VM.set_name_label", vm_ref, name_label)
- @classmethod
- def list_vms(cls, session):
- for vm_ref, vm_rec in cls.get_all_refs_and_recs(session, 'VM'):
- if (vm_rec["resident_on"] != session.get_xenapi_host() or
- vm_rec["is_a_template"] or vm_rec["is_control_domain"]):
- continue
- else:
- yield vm_ref, vm_rec
+def set_vm_name_label(session, vm_ref, name_label):
+ session.call_xenapi("VM.set_name_label", vm_ref, name_label)
- @classmethod
- def lookup(cls, session, name_label):
- """Look the instance up and return it if available"""
- vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
- n = len(vm_refs)
- if n == 0:
- return None
- elif n > 1:
- raise exception.InstanceExists(name=name_label)
+
+def list_vms(session):
+ for vm_ref, vm_rec in session.get_all_refs_and_recs('VM'):
+ if (vm_rec["resident_on"] != session.get_xenapi_host() or
+ vm_rec["is_a_template"] or vm_rec["is_control_domain"]):
+ continue
else:
- return vm_refs[0]
+ yield vm_ref, vm_rec
- @classmethod
- def lookup_vm_vdis(cls, session, vm_ref):
- """Look for the VDIs that are attached to the VM"""
- # Firstly we get the VBDs, then the VDIs.
- # TODO(Armando): do we leave the read-only devices?
- vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
- vdi_refs = []
- if vbd_refs:
- for vbd_ref in vbd_refs:
- try:
- vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
- # Test valid VDI
- record = session.call_xenapi("VDI.get_record", vdi_ref)
- LOG.debug(_('VDI %s is still available'), record['uuid'])
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- else:
- vdi_refs.append(vdi_ref)
- return vdi_refs
- @classmethod
- def preconfigure_instance(cls, session, instance, vdi_ref, network_info):
- """Makes alterations to the image before launching as part of spawn.
- """
-
- # As mounting the image VDI is expensive, we only want do do it once,
- # if at all, so determine whether it's required first, and then do
- # everything
- mount_required = False
- key, net, metadata = _prepare_injectables(instance, network_info)
- mount_required = key or net or metadata
- if not mount_required:
- return
+def lookup(session, name_label):
+ """Look the instance up and return it if available"""
+ vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
+ n = len(vm_refs)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise exception.InstanceExists(name=name_label)
+ else:
+ return vm_refs[0]
- with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
- _mounted_processing(dev, key, net, metadata)
- @classmethod
- def lookup_kernel_ramdisk(cls, session, vm):
- vm_rec = session.call_xenapi("VM.get_record", vm)
- if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
- return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
- else:
- return (None, None)
+def lookup_vm_vdis(session, vm_ref):
+ """Look for the VDIs that are attached to the VM"""
+ # Firstly we get the VBDs, then the VDIs.
+ # TODO(Armando): do we leave the read-only devices?
+ vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
+ vdi_refs = []
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
+ try:
+ vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
+ # Test valid VDI
+ record = session.call_xenapi("VDI.get_record", vdi_ref)
+ LOG.debug(_('VDI %s is still available'), record['uuid'])
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ else:
+ vdi_refs.append(vdi_ref)
+ return vdi_refs
- @classmethod
- def is_snapshot(cls, session, vm):
- vm_rec = session.call_xenapi("VM.get_record", vm)
- if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
- return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
- else:
- return False
- @classmethod
- def compile_info(cls, record):
- """Fill record with VM status information"""
- return {'state': XENAPI_POWER_STATE[record['power_state']],
- 'max_mem': long(record['memory_static_max']) >> 10,
- 'mem': long(record['memory_dynamic_max']) >> 10,
- 'num_cpu': record['VCPUs_max'],
- 'cpu_time': 0}
+def preconfigure_instance(session, instance, vdi_ref, network_info):
+ """Makes alterations to the image before launching as part of spawn.
+ """
- @classmethod
- def compile_diagnostics(cls, record):
- """Compile VM diagnostics data"""
- try:
- keys = []
- diags = {}
- vm_uuid = record["uuid"]
- xml = get_rrd(get_rrd_server(), vm_uuid)
- if xml:
- rrd = minidom.parseString(xml)
- for i, node in enumerate(rrd.firstChild.childNodes):
- # Provide the last update of the information
- if node.localName == 'lastupdate':
- diags['last_update'] = node.firstChild.data
-
- # Create a list of the diagnostic keys (in their order)
- if node.localName == 'ds':
- ref = node.childNodes
- # Name and Value
- if len(ref) > 6:
- keys.append(ref[0].firstChild.data)
-
- # Read the last row of the first RRA to get the latest info
- if node.localName == 'rra':
- rows = node.childNodes[4].childNodes
- last_row = rows[rows.length - 1].childNodes
- for j, value in enumerate(last_row):
- diags[keys[j]] = value.firstChild.data
- break
-
- return diags
- except expat.ExpatError as e:
- LOG.exception(_('Unable to parse rrd of %(vm_uuid)s') % locals())
- return {"Unable to retrieve diagnostics": e}
+ # As mounting the image VDI is expensive, we only want do do it once,
+ # if at all, so determine whether it's required first, and then do
+ # everything
+ mount_required = False
+ key, net, metadata = _prepare_injectables(instance, network_info)
+ mount_required = key or net or metadata
+ if not mount_required:
+ return
+
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ _mounted_processing(dev, key, net, metadata)
+
+
+def lookup_kernel_ramdisk(session, vm):
+ vm_rec = session.call_xenapi("VM.get_record", vm)
+ if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
+ return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
+ else:
+ return (None, None)
- @classmethod
- def compile_metrics(cls, start_time, stop_time=None):
- """Compile bandwidth usage, cpu, and disk metrics for all VMs on
- this host"""
- start_time = int(start_time)
- xml = get_rrd_updates(get_rrd_server(), start_time)
+def is_snapshot(session, vm):
+ vm_rec = session.call_xenapi("VM.get_record", vm)
+ if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
+ return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
+ else:
+ return False
+
+
+def compile_info(record):
+ """Fill record with VM status information"""
+ return {'state': XENAPI_POWER_STATE[record['power_state']],
+ 'max_mem': long(record['memory_static_max']) >> 10,
+ 'mem': long(record['memory_dynamic_max']) >> 10,
+ 'num_cpu': record['VCPUs_max'],
+ 'cpu_time': 0}
+
+
+def compile_diagnostics(record):
+ """Compile VM diagnostics data"""
+ try:
+ keys = []
+ diags = {}
+ vm_uuid = record["uuid"]
+ xml = get_rrd(get_rrd_server(), vm_uuid)
if xml:
- doc = minidom.parseString(xml)
- return parse_rrd_update(doc, start_time, stop_time)
+ rrd = minidom.parseString(xml)
+ for i, node in enumerate(rrd.firstChild.childNodes):
+ # Provide the last update of the information
+ if node.localName == 'lastupdate':
+ diags['last_update'] = node.firstChild.data
- raise exception.CouldNotFetchMetrics()
+ # Create a list of the diagnostic keys (in their order)
+ if node.localName == 'ds':
+ ref = node.childNodes
+ # Name and Value
+ if len(ref) > 6:
+ keys.append(ref[0].firstChild.data)
- @classmethod
- def scan_sr(cls, session, sr_ref=None):
- """Scans the SR specified by sr_ref"""
- if sr_ref:
- LOG.debug(_("Re-scanning SR %s"), sr_ref)
- session.call_xenapi('SR.scan', sr_ref)
+ # Read the last row of the first RRA to get the latest info
+ if node.localName == 'rra':
+ rows = node.childNodes[4].childNodes
+ last_row = rows[rows.length - 1].childNodes
+ for j, value in enumerate(last_row):
+ diags[keys[j]] = value.firstChild.data
+ break
- @classmethod
- def scan_default_sr(cls, session):
- """Looks for the system default SR and triggers a re-scan"""
- cls.scan_sr(session, cls.find_sr(session))
+ return diags
+ except expat.ExpatError as e:
+ LOG.exception(_('Unable to parse rrd of %(vm_uuid)s') % locals())
+ return {"Unable to retrieve diagnostics": e}
- @classmethod
- def safe_find_sr(cls, session):
- """Same as find_sr except raises a NotFound exception if SR cannot be
- determined
- """
- sr_ref = cls.find_sr(session)
- if sr_ref is None:
- raise exception.StorageRepositoryNotFound()
- return sr_ref
- @classmethod
- def find_sr(cls, session):
- """Return the storage repository to hold VM images"""
- host = session.get_xenapi_host()
- try:
- tokens = FLAGS.sr_matching_filter.split(':')
- filter_criteria = tokens[0]
- filter_pattern = tokens[1]
- except IndexError:
- # oops, flag is invalid
- LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
- "formatting convention"), FLAGS.sr_matching_filter)
- return None
-
- if filter_criteria == 'other-config':
- key, value = filter_pattern.split('=', 1)
- for sr_ref, sr_rec in cls.get_all_refs_and_recs(session, 'SR'):
- if not (key in sr_rec['other_config'] and
- sr_rec['other_config'][key] == value):
- continue
- for pbd_ref in sr_rec['PBDs']:
- pbd_rec = cls.get_rec(session, 'PBD', pbd_ref)
- if pbd_rec and pbd_rec['host'] == host:
- return sr_ref
- elif filter_criteria == 'default-sr' and filter_pattern == 'true':
- pool_ref = session.call_xenapi('pool.get_all')[0]
- return session.call_xenapi('pool.get_default_SR', pool_ref)
- # No SR found!
- LOG.warning(_("XenAPI is unable to find a Storage Repository to "
- "install guest instances on. Please check your "
- "configuration and/or configure the flag "
- "'sr_matching_filter'"))
- return None
+def compile_metrics(start_time, stop_time=None):
+ """Compile bandwidth usage, cpu, and disk metrics for all VMs on
+ this host"""
+ start_time = int(start_time)
- @classmethod
- def safe_find_iso_sr(cls, session):
- """Same as find_iso_sr except raises a NotFound exception if SR
- cannot be determined
- """
- sr_ref = cls.find_iso_sr(session)
- if sr_ref is None:
- raise exception.NotFound(_('Cannot find SR of content-type ISO'))
- return sr_ref
+ xml = get_rrd_updates(get_rrd_server(), start_time)
+ if xml:
+ doc = minidom.parseString(xml)
+ return parse_rrd_update(doc, start_time, stop_time)
- @classmethod
- def find_iso_sr(cls, session):
- """Return the storage repository to hold ISO images"""
- host = session.get_xenapi_host()
- for sr_ref, sr_rec in cls.get_all_refs_and_recs(session, 'SR'):
- LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
- if not sr_rec['content_type'] == 'iso':
- LOG.debug(_("ISO: not iso content"))
- continue
- if not 'i18n-key' in sr_rec['other_config']:
- LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
- continue
- if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
- LOG.debug(_("ISO: iso content_type, i18n-key value not "
- "'local-storage-iso'"))
- continue
+ raise exception.CouldNotFetchMetrics()
+
+
+def scan_sr(session, sr_ref=None):
+ """Scans the SR specified by sr_ref"""
+ if sr_ref:
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
+ session.call_xenapi('SR.scan', sr_ref)
+
+
+def scan_default_sr(session):
+ """Looks for the system default SR and triggers a re-scan"""
+ scan_sr(session, find_sr(session))
+
+
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.StorageRepositoryNotFound()
+ return sr_ref
- LOG.debug(_("ISO: SR MATCHing our criteria"))
+
+def find_sr(session):
+ """Return the storage repository to hold VM images"""
+ host = session.get_xenapi_host()
+ try:
+ tokens = FLAGS.sr_matching_filter.split(':')
+ filter_criteria = tokens[0]
+ filter_pattern = tokens[1]
+ except IndexError:
+ # oops, flag is invalid
+ LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
+ "formatting convention"), FLAGS.sr_matching_filter)
+ return None
+
+ if filter_criteria == 'other-config':
+ key, value = filter_pattern.split('=', 1)
+ for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
+ if not (key in sr_rec['other_config'] and
+ sr_rec['other_config'][key] == value):
+ continue
for pbd_ref in sr_rec['PBDs']:
- LOG.debug(_("ISO: ISO, looking to see if it is host local"))
- pbd_rec = cls.get_rec(session, 'PBD', pbd_ref)
- if not pbd_rec:
- LOG.debug(_("ISO: PBD %(pbd_ref)s disappeared") % locals())
- continue
- pbd_rec_host = pbd_rec['host']
- LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, "
- "have %(host)s") % locals())
- if pbd_rec_host == host:
- LOG.debug(_("ISO: SR with local PBD"))
+ pbd_rec = session.get_rec('PBD', pbd_ref)
+ if pbd_rec and pbd_rec['host'] == host:
return sr_ref
- return None
+ elif filter_criteria == 'default-sr' and filter_pattern == 'true':
+ pool_ref = session.call_xenapi('pool.get_all')[0]
+ return session.call_xenapi('pool.get_default_SR', pool_ref)
+ # No SR found!
+ LOG.warning(_("XenAPI is unable to find a Storage Repository to "
+ "install guest instances on. Please check your "
+ "configuration and/or configure the flag "
+ "'sr_matching_filter'"))
+ return None
+
+
+def safe_find_iso_sr(session):
+ """Same as find_iso_sr except raises a NotFound exception if SR
+ cannot be determined
+ """
+ sr_ref = find_iso_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR of content-type ISO'))
+ return sr_ref
+
+
+def find_iso_sr(session):
+ """Return the storage repository to hold ISO images"""
+ host = session.get_xenapi_host()
+ for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
+ LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
+ if not sr_rec['content_type'] == 'iso':
+ LOG.debug(_("ISO: not iso content"))
+ continue
+ if not 'i18n-key' in sr_rec['other_config']:
+ LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
+ continue
+ if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
+ LOG.debug(_("ISO: iso content_type, i18n-key value not "
+ "'local-storage-iso'"))
+ continue
+
+ LOG.debug(_("ISO: SR MATCHing our criteria"))
+ for pbd_ref in sr_rec['PBDs']:
+ LOG.debug(_("ISO: ISO, looking to see if it is host local"))
+ pbd_rec = session.get_rec('PBD', pbd_ref)
+ if not pbd_rec:
+ LOG.debug(_("ISO: PBD %(pbd_ref)s disappeared") % locals())
+ continue
+ pbd_rec_host = pbd_rec['host']
+ LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, "
+ "have %(host)s") % locals())
+ if pbd_rec_host == host:
+ LOG.debug(_("ISO: SR with local PBD"))
+ return sr_ref
+ return None
def get_rrd_server():
@@ -1422,7 +1401,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
for i in xrange(max_attempts):
- VMHelper.scan_sr(session, sr_ref)
+ scan_sr(session, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
@@ -1492,17 +1471,16 @@ def cleanup_attached_vdis(session):
# unclean restart
LOG.info(_('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid'])
- VMHelper.unplug_vbd(session, vbd_ref)
- VMHelper.destroy_vbd(session, vbd_ref)
+ unplug_vbd(session, vbd_ref)
+ destroy_vbd(session, vbd_ref)
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = get_this_vm_ref(session)
- vbd_ref = VMHelper.create_vbd(session, this_vm_ref, vdi_ref,
- 'autodetect', read_only=read_only,
- bootable=False)
+ vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
+ read_only=read_only, bootable=False)
try:
LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
session.call_xenapi("VBD.plug", vbd_ref)
@@ -1518,10 +1496,10 @@ def vdi_attached_here(session, vdi_ref, read_only=False):
yield dev
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
- VMHelper.unplug_vbd(session, vbd_ref)
+ unplug_vbd(session, vbd_ref)
finally:
try:
- VMHelper.destroy_vbd(session, vbd_ref)
+ destroy_vbd(session, vbd_ref)
except volume_utils.StorageError:
# destroy_vbd() will log error
pass
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 735a9d6bd..8ce47662d 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -47,9 +47,6 @@ from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
-VolumeHelper = volume_utils.VolumeHelper
-NetworkHelper = network_utils.NetworkHelper
-VMHelper = vm_utils.VMHelper
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
@@ -78,6 +75,12 @@ flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
RESIZE_TOTAL_STEPS = 5
+DEVICE_ROOT = '0'
+DEVICE_RESCUE = '1'
+DEVICE_SWAP = '2'
+DEVICE_EPHEMERAL = '3'
+DEVICE_CD = '4'
+
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
@@ -165,7 +168,7 @@ class VMOps(object):
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
- for vm_ref, vm_rec in VMHelper.list_vms(self._session):
+ for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
@@ -173,11 +176,11 @@ class VMOps(object):
def list_instances_detail(self):
"""List VM instances, returning InstanceInfo objects."""
details = []
- for vm_ref, vm_rec in VMHelper.list_vms(self._session):
+ for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name = vm_rec["name_label"]
# TODO(justinsb): This a roundabout way to map the state
- openstack_format = VMHelper.compile_info(vm_rec)
+ openstack_format = vm_utils.compile_info(vm_rec)
state = openstack_format['state']
instance_info = driver.InstanceInfo(name, state)
@@ -187,31 +190,31 @@ class VMOps(object):
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
- vm_ref = VMHelper.lookup(self._session, name_label)
+ vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info)
def finish_revert_migration(self, instance):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
- vm_ref = VMHelper.lookup(self._session, name_label)
+ vm_ref = vm_utils.lookup(self._session, name_label)
# Remove the '-orig' suffix (which was added in case the resized VM
# ends up on the source host, common during testing)
name_label = instance.name
- VMHelper.set_vm_name_label(self._session, vm_ref, name_label)
+ vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance):
- vdi_uuid = self._move_disks(instance, disk_info)
+ root_vdi = self._move_disks(instance, disk_info)
if resize_instance:
- self._resize_instance(instance, vdi_uuid)
+ self._resize_instance(instance, root_vdi)
vm_ref = self._create_vm(context, instance,
- [dict(vdi_type='root', vdi_uuid=vdi_uuid)],
+ {'root': root_vdi},
network_info, image_meta)
# 5. Start VM
@@ -229,14 +232,19 @@ class VMOps(object):
False, False)
def _create_disks(self, context, instance, image_meta):
- disk_image_type = VMHelper.determine_disk_image_type(image_meta)
- vdis = VMHelper.create_image(context, self._session,
+ disk_image_type = vm_utils.determine_disk_image_type(image_meta)
+ vdis = vm_utils.create_image(context, self._session,
instance, instance.image_ref,
disk_image_type)
- for vdi in vdis:
- if vdi["vdi_type"] == "root":
- self._resize_instance(instance, vdi["vdi_uuid"])
+ # Just get the VDI ref once
+ for vdi in vdis.itervalues():
+ vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
+ vdi['uuid'])
+
+ root_vdi = vdis.get('root')
+ if root_vdi:
+ self._resize_instance(instance, root_vdi)
return vdis
@@ -258,17 +266,7 @@ class VMOps(object):
vdis = self._create_disks(context, instance, image_meta)
def undo_create_disks():
- vdi_refs = []
- for vdi in vdis:
- try:
- vdi_ref = self._session.call_xenapi(
- 'VDI.get_by_uuid', vdi['vdi_uuid'])
- except self.XenAPI.Failure:
- continue
-
- vdi_refs.append(vdi_ref)
-
- self._safe_destroy_vdis(vdi_refs)
+ self._safe_destroy_vdis([vdi['ref'] for vdi in vdis.values()])
undo_mgr.undo_with(undo_create_disks)
return vdis
@@ -279,16 +277,16 @@ class VMOps(object):
ramdisk_file = None
if instance.kernel_id:
- kernel = VMHelper.create_kernel_image(context, self._session,
+ vdis = vm_utils.create_kernel_image(context, self._session,
instance, instance.kernel_id, instance.user_id,
- instance.project_id, vm_utils.ImageType.KERNEL)[0]
- kernel_file = kernel.get('file')
+ instance.project_id, vm_utils.ImageType.KERNEL)
+ kernel_file = vdis['kernel'].get('file')
if instance.ramdisk_id:
- ramdisk = VMHelper.create_kernel_image(context, self._session,
+ vdis = vm_utils.create_kernel_image(context, self._session,
instance, instance.ramdisk_id, instance.user_id,
- instance.project_id, vm_utils.ImageType.RAMDISK)[0]
- ramdisk_file = ramdisk.get('file')
+ instance.project_id, vm_utils.ImageType.RAMDISK)
+ ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
@@ -362,25 +360,15 @@ class VMOps(object):
kernel_file=None, ramdisk_file=None):
"""Create VM instance."""
instance_name = instance.name
- vm_ref = VMHelper.lookup(self._session, instance_name)
+ vm_ref = vm_utils.lookup(self._session, instance_name)
if vm_ref is not None:
raise exception.InstanceExists(name=instance_name)
# Ensure enough free memory is available
- if not VMHelper.ensure_free_mem(self._session, instance):
+ if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance.uuid)
- disk_image_type = VMHelper.determine_disk_image_type(image_meta)
-
- # NOTE(jk0): Since vdi_type may contain either 'root' or 'swap', we
- # need to ensure that the 'swap' VDI is not chosen as the mount
- # point for file injection.
- first_vdi_ref = None
- for vdi in vdis:
- if vdi.get('vdi_type') != 'swap':
- # Create the VM ref and attach the first disk
- first_vdi_ref = self._session.call_xenapi(
- 'VDI.get_by_uuid', vdi['vdi_uuid'])
+ disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vm_mode = instance.vm_mode and instance.vm_mode.lower()
if vm_mode == 'pv':
@@ -389,8 +377,8 @@ class VMOps(object):
use_pv_kernel = False
vm_mode = 'hvm' # Normalize
else:
- use_pv_kernel = VMHelper.determine_is_pv(self._session,
- first_vdi_ref, disk_image_type, instance.os_type)
+ use_pv_kernel = vm_utils.determine_is_pv(self._session,
+ vdis['root']['ref'], disk_image_type, instance.os_type)
vm_mode = use_pv_kernel and 'pv' or 'hvm'
if instance.vm_mode != vm_mode:
@@ -398,18 +386,17 @@ class VMOps(object):
db.instance_update(nova_context.get_admin_context(),
instance['id'], {'vm_mode': vm_mode})
- vm_ref = VMHelper.create_vm(
+ vm_ref = vm_utils.create_vm(
self._session, instance, kernel_file, ramdisk_file,
use_pv_kernel)
# Add disks to VM
- self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref,
- vdis)
+ self._attach_disks(instance, disk_image_type, vm_ref, vdis)
# Alter the image before VM start for network injection.
if FLAGS.flat_injected:
- VMHelper.preconfigure_instance(self._session, instance,
- first_vdi_ref, network_info)
+ vm_utils.preconfigure_instance(self._session, instance,
+ vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
@@ -419,76 +406,62 @@ class VMOps(object):
return vm_ref
- def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
- vdis):
+ def _attach_disks(self, instance, disk_image_type, vm_ref, vdis):
ctx = nova_context.get_admin_context()
- # device 0 reserved for RW disk
- userdevice = 0
-
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
- cd_vdi_ref = first_vdi_ref
- first_vdi_ref = VMHelper.fetch_blank_disk(self._session,
- instance.instance_type_id)
+ cd_vdi = vdis.pop('root')
+ root_vdi = vm_utils.fetch_blank_disk(self._session,
+ instance.instance_type_id)
+ vdis['root'] = root_vdi
- VMHelper.create_vbd(self._session, vm_ref, first_vdi_ref,
- userdevice, bootable=False)
+ vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
+ DEVICE_ROOT, bootable=False)
- # device 1 reserved for rescue disk and we've used '0'
- userdevice = 2
- VMHelper.create_vbd(self._session, vm_ref, cd_vdi_ref,
- userdevice, vbd_type='CD', bootable=True)
-
- # set user device to next free value
- userdevice += 1
+ vm_utils.create_vbd(self._session, vm_ref, cd_vdi['ref'],
+ DEVICE_CD, vbd_type='CD', bootable=True)
else:
+ root_vdi = vdis['root']
+
if instance.auto_disk_config:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
instance_type = db.instance_type_get(ctx,
instance.instance_type_id)
- VMHelper.auto_configure_disk(self._session,
- first_vdi_ref,
+ vm_utils.auto_configure_disk(self._session,
+ root_vdi['ref'],
instance_type['root_gb'])
- VMHelper.create_vbd(self._session, vm_ref, first_vdi_ref,
- userdevice, bootable=True)
+ vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
+ DEVICE_ROOT, bootable=True)
- # set user device to next free value
- # userdevice 1 is reserved for rescue and we've used '0'
- userdevice = 2
+ # Attach (optional) swap disk
+ swap_vdi = vdis.get('swap')
instance_type = db.instance_type_get(ctx, instance.instance_type_id)
swap_mb = instance_type['swap']
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
if generate_swap:
- VMHelper.generate_swap(self._session, instance,
- vm_ref, userdevice, swap_mb)
- userdevice += 1
-
+ vm_utils.generate_swap(self._session, instance, vm_ref,
+ DEVICE_SWAP, swap_mb)
+
+ if swap_vdi:
+ # We won't be using packaged swap VDI, so destroy it
+ vm_utils.destroy_vdi(self._session, swap_vdi['ref'])
+ elif swap_vdi:
+ # Attach packaged swap VDI to VM
+ vm_utils.create_vbd(self._session, vm_ref, swap_vdi['ref'],
+ DEVICE_SWAP, bootable=False)
+
+ # Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
- VMHelper.generate_ephemeral(self._session, instance,
- vm_ref, userdevice, ephemeral_gb)
- userdevice += 1
-
- # Attach any other disks
- for vdi in vdis[1:]:
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
- vdi['vdi_uuid'])
-
- if generate_swap and vdi['vdi_type'] == 'swap':
- # We won't be using it, so don't let it leak
- VMHelper.destroy_vdi(self._session, vdi_ref)
- continue
-
- VMHelper.create_vbd(self._session, vm_ref, vdi_ref,
- userdevice, bootable=False)
- userdevice += 1
+ vm_utils.generate_ephemeral(self._session, instance, vm_ref,
+ DEVICE_EPHEMERAL, ephemeral_gb)
def _boot_new_instance(self, instance, vm_ref):
"""Boot a new instance and configure it."""
@@ -574,7 +547,7 @@ class VMOps(object):
'weight', str(vcpu_weight))
def _get_vm_opaque_ref(self, instance):
- vm_ref = VMHelper.lookup(self._session, instance['name'])
+ vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
@@ -623,7 +596,7 @@ class VMOps(object):
_snapshot_info = self._create_snapshot(instance)
template_vm_ref, template_vdi_uuids = _snapshot_info
# call plugin to ship snapshot off to glance
- VMHelper.upload_image(context,
+ vm_utils.upload_image(context,
self._session, instance, template_vdi_uuids, image_id)
finally:
if template_vm_ref:
@@ -642,7 +615,7 @@ class VMOps(object):
label = "%s-snapshot" % instance.name
try:
- template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
+ template_vm_ref, template_vdi_uuids = vm_utils.create_snapshot(
self._session, instance, vm_ref, label)
return template_vm_ref, template_vdi_uuids
except self.XenAPI.Failure, exc:
@@ -718,11 +691,11 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
base_copy_uuid = template_vdi_uuids['image']
- _vdi_info = VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
+ _vdi_info = vm_utils.get_vdi_for_vm_safely(self._session, vm_ref)
vdi_ref, vm_vdi_rec = _vdi_info
cow_uuid = vm_vdi_rec['uuid']
- sr_path = VMHelper.get_sr_path(self._session)
+ sr_path = vm_utils.get_sr_path(self._session)
if (instance['auto_disk_config'] and
instance['root_gb'] > instance_type['root_gb']):
@@ -742,7 +715,7 @@ class VMOps(object):
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
- new_ref, new_uuid = VMHelper.resize_disk(self._session,
+ new_ref, new_uuid = vm_utils.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
@@ -757,7 +730,7 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# Clean up VDI now that it's been copied
- VMHelper.destroy_vdi(self._session, new_ref)
+ vm_utils.destroy_vdi(self._session, new_ref)
vdis = {'base_copy': new_uuid}
else:
@@ -794,7 +767,7 @@ class VMOps(object):
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
- VMHelper.set_vm_name_label(self._session, vm_ref, name_label)
+ vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,
@@ -808,7 +781,7 @@ class VMOps(object):
new_base_copy_uuid = str(uuid.uuid4())
params = {'instance_uuid': instance['uuid'],
- 'sr_path': VMHelper.get_sr_path(self._session),
+ 'sr_path': vm_utils.get_sr_path(self._session),
'old_base_copy_uuid': base_copy_uuid,
'new_base_copy_uuid': new_base_copy_uuid}
@@ -826,15 +799,17 @@ class VMOps(object):
{'params': pickle.dumps(params)})
# Now we rescan the SR so we find the VHDs
- VMHelper.scan_default_sr(self._session)
+ vm_utils.scan_default_sr(self._session)
# Set name-label so we can find if we need to clean up a failed
# migration
- VMHelper.set_vdi_name(self._session, new_uuid, instance.name, 'root')
+ vm_utils.set_vdi_name(self._session, new_uuid, instance.name, 'root')
+
+ new_ref = self._session.call_xenapi('VDI.get_by_uuid', new_uuid)
- return new_uuid
+ return {'uuid': new_uuid, 'ref': new_ref}
- def _resize_instance(self, instance, vdi_uuid):
+ def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance.root_gb * 1024 * 1024 * 1024
@@ -842,9 +817,8 @@ class VMOps(object):
return
# Get current size of VDI
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
- vdi_ref)
+ root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
@@ -852,13 +826,14 @@ class VMOps(object):
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
+ vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
if self._session.product_version[0] > 5:
resize_func_name = 'VDI.resize'
else:
resize_func_name = 'VDI.resize_online'
- self._session.call_xenapi(resize_func_name, vdi_ref,
+ self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
@@ -990,7 +965,7 @@ class VMOps(object):
def _shutdown(self, instance, vm_ref, hard=True):
"""Shutdown an instance."""
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- state = VMHelper.compile_info(vm_rec)['state']
+ state = vm_utils.compile_info(vm_rec)['state']
if state == power_state.SHUTDOWN:
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
@@ -1014,7 +989,7 @@ class VMOps(object):
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
- if vbd["userdevice"] == "0":
+ if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
@@ -1023,7 +998,7 @@ class VMOps(object):
"""Destroys the requested VDIs, logging any StorageError exceptions."""
for vdi_ref in vdi_refs:
try:
- VMHelper.destroy_vdi(self._session, vdi_ref)
+ vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
@@ -1061,7 +1036,7 @@ class VMOps(object):
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
+ (kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
self._destroy_kernel_ramdisk_plugin_call(kernel, ramdisk)
@@ -1081,12 +1056,12 @@ class VMOps(object):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
- state = VMHelper.compile_info(vm_rec)['state']
+ state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
- vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
+ vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
self._safe_destroy_vdis(vdi_refs)
@@ -1106,9 +1081,9 @@ class VMOps(object):
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
- vm_ref = VMHelper.lookup(self._session, instance['name'])
+ vm_ref = vm_utils.lookup(self._session, instance['name'])
- rescue_vm_ref = VMHelper.lookup(self._session,
+ rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance.name)
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
@@ -1129,11 +1104,11 @@ class VMOps(object):
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
- is_snapshot = VMHelper.is_snapshot(self._session, vm_ref)
+ is_snapshot = vm_utils.is_snapshot(self._session, vm_ref)
self._shutdown(instance, vm_ref)
# Destroy VDIs
- vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)
+ vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
self._safe_destroy_vdis(vdi_refs)
if destroy_kernel_ramdisk:
@@ -1175,7 +1150,7 @@ class VMOps(object):
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
- rescue_vm_ref = VMHelper.lookup(self._session,
+ rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance.name)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
@@ -1187,11 +1162,12 @@ class VMOps(object):
instance._rescue = True
self.spawn(context, instance, image_meta, network_info)
# instance.name now has -rescue appended because of magic
- rescue_vm_ref = VMHelper.lookup(self._session, instance.name)
+ rescue_vm_ref = vm_utils.lookup(self._session, instance.name)
vdi_ref = self._find_root_vdi_ref(vm_ref)
- rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
- vdi_ref, 1, bootable=False)
+ rescue_vbd_ref = vm_utils.create_vbd(self._session, rescue_vm_ref,
+ vdi_ref, DEVICE_RESCUE,
+ bootable=False)
self._session.call_xenapi('VBD.plug', rescue_vbd_ref)
def unrescue(self, instance):
@@ -1202,7 +1178,7 @@ class VMOps(object):
- release the bootlock to allow the instance VM to start.
"""
- rescue_vm_ref = VMHelper.lookup(self._session,
+ rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance.name)
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(instance_id=instance.uuid)
@@ -1283,14 +1259,14 @@ class VMOps(object):
for instance in self.list_instances():
if instance.endswith("-rescue"):
rescue_vms.append(dict(name=instance,
- vm_ref=VMHelper.lookup(self._session,
+ vm_ref=vm_utils.lookup(self._session,
instance)))
for vm in rescue_vms:
rescue_vm_ref = vm["vm_ref"]
original_name = vm["name"].split("-rescue", 1)[0]
- original_vm_ref = VMHelper.lookup(self._session, original_name)
+ original_vm_ref = vm_utils.lookup(self._session, original_name)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
@@ -1302,19 +1278,19 @@ class VMOps(object):
"""Return data about VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- return VMHelper.compile_info(vm_rec)
+ return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- return VMHelper.compile_diagnostics(vm_rec)
+ return vm_utils.compile_diagnostics(vm_rec)
def get_all_bw_usage(self, start_time, stop_time=None):
"""Return bandwidth usage info for each interface on each
running VM"""
try:
- metrics = VMHelper.compile_metrics(start_time, stop_time)
+ metrics = vm_utils.compile_metrics(start_time, stop_time)
except exception.CouldNotFetchMetrics:
LOG.exception(_("Could not get bandwidth info."))
return {}
@@ -1424,7 +1400,7 @@ class VMOps(object):
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
- what VMHelper.lookup(session, instance.name) will find (ex: rescue)
+ what vm_utils.lookup(session, instance.name) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 56e78984a..531033a52 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -38,305 +38,298 @@ class StorageError(Exception):
super(StorageError, self).__init__(message)
-class VolumeHelper(xenapi.HelperBase):
- """
- The class that wraps the helper methods together.
- """
+def create_sr(session, label, params):
+ LOG.debug(_("creating sr within volume_utils"))
+ type = params['sr_type']
+ del params['sr_type']
+ LOG.debug(_('type is = %s') % type)
+ if 'name_description' in params:
+ desc = params['name_description']
+ LOG.debug(_('name = %s') % desc)
+ del params['name_description']
+ else:
+ desc = ''
+ if 'id' in params:
+ del params['id']
+ LOG.debug(params)
+
+ try:
+ sr_ref = session.call_xenapi("SR.create",
+ session.get_xenapi_host(),
+ params,
+ '0', label, desc, type, '', False, {})
+ LOG.debug(_('Created %(label)s as %(sr_ref)s.') % locals())
+ return sr_ref
- @classmethod
- def create_sr(cls, session, label, params):
-
- LOG.debug(_("creating sr within volume_utils"))
- type = params['sr_type']
- del params['sr_type']
- LOG.debug(_('type is = %s') % type)
- if 'name_description' in params:
- desc = params['name_description']
- LOG.debug(_('name = %s') % desc)
- del params['name_description']
- else:
- desc = ''
- if 'id' in params:
- del params['id']
- LOG.debug(params)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to create Storage Repository'))
- try:
- sr_ref = session.call_xenapi("SR.create",
- session.get_xenapi_host(),
- params,
- '0', label, desc, type, '', False, {})
- LOG.debug(_('Created %(label)s as %(sr_ref)s.') % locals())
- return sr_ref
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to create Storage Repository'))
+def introduce_sr(session, sr_uuid, label, params):
+ LOG.debug(_("introducing sr within volume_utils"))
+ type = params['sr_type']
+ del params['sr_type']
+ LOG.debug(_('type is = %s') % type)
+ if 'name_description' in params:
+ desc = params['name_description']
+ LOG.debug(_('name = %s') % desc)
+ del params['name_description']
+ else:
+ desc = ''
+ if 'id' in params:
+ del params['id']
+ LOG.debug(params)
+
+ try:
+ sr_ref = session.call_xenapi("SR.introduce",
+ sr_uuid,
+ label,
+ desc,
+ type,
+ '',
+ False,
+ params,)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
+
+ #Create pbd
+ LOG.debug(_('Creating pbd for SR'))
+ pbd_ref = create_pbd(session, sr_ref, params)
+ LOG.debug(_('Plugging SR'))
+ #Plug pbd
+ session.call_xenapi("PBD.plug", pbd_ref)
+ session.call_xenapi("SR.scan", sr_ref)
+ return sr_ref
- @classmethod
- def introduce_sr(cls, session, sr_uuid, label, params):
- LOG.debug(_("introducing sr within volume_utils"))
- type = params['sr_type']
- del params['sr_type']
- LOG.debug(_('type is = %s') % type)
- if 'name_description' in params:
- desc = params['name_description']
- LOG.debug(_('name = %s') % desc)
- del params['name_description']
- else:
- desc = ''
- if 'id' in params:
- del params['id']
- LOG.debug(params)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to introduce Storage Repository'))
- try:
- sr_ref = session.call_xenapi("SR.introduce",
- sr_uuid,
- label,
- desc,
- type,
- '',
- False,
- params,)
- LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
- #Create pbd
- LOG.debug(_('Creating pbd for SR'))
- pbd_ref = cls.create_pbd(session, sr_ref, params)
- LOG.debug(_('Plugging SR'))
- #Plug pbd
- session.call_xenapi("PBD.plug", pbd_ref)
- session.call_xenapi("SR.scan", sr_ref)
- return sr_ref
+def forget_sr(session, sr_uuid):
+ """
+ Forgets the storage repository without destroying the VDIs within
+ """
+ try:
+ sr_ref = session.call_xenapi("SR.get_by_uuid", sr_uuid)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to get SR using uuid'))
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to introduce Storage Repository'))
+ LOG.debug(_('Forgetting SR %s...') % sr_ref)
- @classmethod
- def forget_sr(cls, session, sr_uuid):
- """
- Forgets the storage repository without destroying the VDIs within
- """
- try:
- sr_ref = session.call_xenapi("SR.get_by_uuid", sr_uuid)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to get SR using uuid'))
+ try:
+ unplug_pbds(session, sr_ref)
+ sr_ref = session.call_xenapi("SR.forget", sr_ref)
- LOG.debug(_('Forgetting SR %s...') % sr_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to forget Storage Repository'))
- try:
- cls.unplug_pbds(session, sr_ref)
- sr_ref = session.call_xenapi("SR.forget", sr_ref)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to forget Storage Repository'))
-
- @classmethod
- def find_sr_by_uuid(cls, session, sr_uuid):
- """
- Return the storage repository given a uuid.
- """
- for sr_ref, sr_rec in cls.get_all_refs_and_recs(session, 'SR'):
- if sr_rec['uuid'] == sr_uuid:
- return sr_ref
- return None
-
- @classmethod
- def create_iscsi_storage(cls, session, info, label, description):
- """
- Create an iSCSI storage repository that will be used to mount
- the volume for the specified instance
- """
- sr_ref = session.call_xenapi("SR.get_by_name_label", label)
- if len(sr_ref) == 0:
- LOG.debug(_('Introducing %s...'), label)
- record = {}
- if 'chapuser' in info and 'chappassword' in info:
- record = {'target': info['targetHost'],
- 'port': info['targetPort'],
- 'targetIQN': info['targetIQN'],
- 'chapuser': info['chapuser'],
- 'chappassword': info['chappassword']}
- else:
- record = {'target': info['targetHost'],
- 'port': info['targetPort'],
- 'targetIQN': info['targetIQN']}
- try:
- LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
- return sr_ref
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to create Storage Repository'))
- else:
- return sr_ref[0]
-
- @classmethod
- def find_sr_from_vbd(cls, session, vbd_ref):
- """Find the SR reference from the VBD reference"""
- try:
- vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
- sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
- return sr_ref
+def find_sr_by_uuid(session, sr_uuid):
+ """
+ Return the storage repository given a uuid.
+ """
+ for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
+ if sr_rec['uuid'] == sr_uuid:
+ return sr_ref
+ return None
- @classmethod
- def create_pbd(cls, session, sr_ref, params):
- pbd_rec = {}
- pbd_rec['host'] = session.get_xenapi_host()
- pbd_rec['SR'] = sr_ref
- pbd_rec['device_config'] = params
- pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
- return pbd_ref
-
- @classmethod
- def unplug_pbds(cls, session, sr_ref):
- pbds = []
- try:
- pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
- except session.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
- ' for %(sr_ref)s') % locals())
- for pbd in pbds:
- try:
- session.call_xenapi("PBD.unplug", pbd)
- except session.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %(exc)s when unplugging'
- ' PBD %(pbd)s') % locals())
-
- @classmethod
- def introduce_vdi(cls, session, sr_ref, vdi_uuid=None, target_lun=None):
- """Introduce VDI in the host"""
- try:
- session.call_xenapi("SR.scan", sr_ref)
- if vdi_uuid:
- LOG.debug("vdi_uuid: %s" % vdi_uuid)
- vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
- elif target_lun:
- vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
- for curr_ref in vdi_refs:
- curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
- if ('sm_config' in curr_rec and
- 'LUNid' in curr_rec['sm_config'] and
- curr_rec['sm_config']['LUNid'] == str(target_lun)):
- vdi_ref = curr_ref
- break
- else:
- vdi_ref = (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
+def create_iscsi_storage(session, info, label, description):
+ """
+ Create an iSCSI storage repository that will be used to mount
+ the volume for the specified instance
+ """
+ sr_ref = session.call_xenapi("SR.get_by_name_label", label)
+ if len(sr_ref) == 0:
+ LOG.debug(_('Introducing %s...'), label)
+ record = {}
+ if 'chapuser' in info and 'chappassword' in info:
+ record = {'target': info['targetHost'],
+ 'port': info['targetPort'],
+ 'targetIQN': info['targetIQN'],
+ 'chapuser': info['chapuser'],
+ 'chappassword': info['chappassword']}
+ else:
+ record = {'target': info['targetHost'],
+ 'port': info['targetPort'],
+ 'targetIQN': info['targetIQN']}
try:
- vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
- LOG.debug(vdi_rec)
- LOG.debug(type(vdi_rec))
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
+ return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
- raise StorageError(_('Unable to get record'
- ' of VDI %s on') % vdi_ref)
-
- if vdi_rec['managed']:
- # We do not need to introduce the vdi
- return vdi_ref
-
+ raise StorageError(_('Unable to create Storage Repository'))
+ else:
+ return sr_ref[0]
+
+
+def find_sr_from_vbd(session, vbd_ref):
+ """Find the SR reference from the VBD reference"""
+ try:
+ vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
+ sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
+ return sr_ref
+
+
+def create_pbd(session, sr_ref, params):
+ pbd_rec = {}
+ pbd_rec['host'] = session.get_xenapi_host()
+ pbd_rec['SR'] = sr_ref
+ pbd_rec['device_config'] = params
+ pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
+ return pbd_ref
+
+
+def unplug_pbds(session, sr_ref):
+ pbds = []
+ try:
+ pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
+ ' for %(sr_ref)s') % locals())
+ for pbd in pbds:
try:
- return session.call_xenapi("VDI.introduce",
- vdi_rec['uuid'],
- vdi_rec['name_label'],
- vdi_rec['name_description'],
- vdi_rec['SR'],
- vdi_rec['type'],
- vdi_rec['sharable'],
- vdi_rec['read_only'],
- vdi_rec['other_config'],
- vdi_rec['location'],
- vdi_rec['xenstore_data'],
- vdi_rec['sm_config'])
+ session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to introduce VDI for SR %s')
- % sr_ref)
-
- @classmethod
- def purge_sr(cls, session, sr_ref):
- try:
- sr_rec = session.call_xenapi("SR.get_record", sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when unplugging'
+ ' PBD %(pbd)s') % locals())
+
+
+def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
+ """Introduce VDI in the host"""
+ try:
+ session.call_xenapi("SR.scan", sr_ref)
+ if vdi_uuid:
+ LOG.debug("vdi_uuid: %s" % vdi_uuid)
+ vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
+ elif target_lun:
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
+ for curr_ref in vdi_refs:
+ curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
+ if ('sm_config' in curr_rec and
+ 'LUNid' in curr_rec['sm_config'] and
+ curr_rec['sm_config']['LUNid'] == str(target_lun)):
+ vdi_ref = curr_ref
+ break
+ else:
+ vdi_ref = (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
+
+ try:
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ LOG.debug(vdi_rec)
+ LOG.debug(type(vdi_rec))
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to get record'
+ ' of VDI %s on') % vdi_ref)
+
+ if vdi_rec['managed']:
+ # We do not need to introduce the vdi
+ return vdi_ref
+
+ try:
+ return session.call_xenapi("VDI.introduce",
+ vdi_rec['uuid'],
+ vdi_rec['name_label'],
+ vdi_rec['name_description'],
+ vdi_rec['SR'],
+ vdi_rec['type'],
+ vdi_rec['sharable'],
+ vdi_rec['read_only'],
+ vdi_rec['other_config'],
+ vdi_rec['location'],
+ vdi_rec['xenstore_data'],
+ vdi_rec['sm_config'])
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to introduce VDI for SR %s')
+ % sr_ref)
+
+
+def purge_sr(session, sr_ref):
+ try:
+ sr_rec = session.call_xenapi("SR.get_record", sr_ref)
+ vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
+ except StorageError, ex:
+ LOG.exception(ex)
+ raise StorageError(_('Error finding vdis in SR %s') % sr_ref)
+
+ for vdi_ref in vdi_refs:
+ try:
+ vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
except StorageError, ex:
LOG.exception(ex)
- raise StorageError(_('Error finding vdis in SR %s') % sr_ref)
-
- for vdi_ref in vdi_refs:
- try:
- vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
- except StorageError, ex:
- LOG.exception(ex)
- raise StorageError(_('Unable to find vbd for vdi %s') %
- vdi_ref)
- if len(vbd_refs) > 0:
- return
-
- cls.forget_sr(session, sr_rec['uuid'])
-
- @classmethod
- def parse_volume_info(cls, connection_info, mountpoint):
- """
- Parse device_path and mountpoint as they can be used by XenAPI.
- In particular, the mountpoint (e.g. /dev/sdc) must be translated
- into a numeric literal.
- FIXME(armando):
- As for device_path, currently cannot be used as it is,
- because it does not contain target information. As for interim
- solution, target details are passed either via Flags or obtained
- by iscsiadm. Long-term solution is to add a few more fields to the
- db in the iscsi_target table with the necessary info and modify
- the iscsi driver to set them.
- """
- device_number = VolumeHelper.mountpoint_to_number(mountpoint)
- data = connection_info['data']
- volume_id = data['volume_id']
- target_portal = data['target_portal']
- target_host = _get_target_host(target_portal)
- target_port = _get_target_port(target_portal)
- target_iqn = data['target_iqn']
- LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- volume_id, target_host, target_port, target_iqn)
- if (device_number < 0 or
- volume_id is None or
- target_host is None or
- target_iqn is None):
- raise StorageError(_('Unable to obtain target information'
- ' %(data)s, %(mountpoint)s') % locals())
- volume_info = {}
- volume_info['id'] = volume_id
- volume_info['target'] = target_host
- volume_info['port'] = target_port
- volume_info['targetIQN'] = target_iqn
- if ('auth_method' in connection_info and
- connection_info['auth_method'] == 'CHAP'):
- volume_info['chapuser'] = connection_info['auth_username']
- volume_info['chappassword'] = connection_info['auth_password']
-
- return volume_info
-
- @classmethod
- def mountpoint_to_number(cls, mountpoint):
- """Translate a mountpoint like /dev/sdc into a numeric"""
- if mountpoint.startswith('/dev/'):
- mountpoint = mountpoint[5:]
- if re.match('^[hs]d[a-p]$', mountpoint):
- return (ord(mountpoint[2:3]) - ord('a'))
- elif re.match('^x?vd[a-p]$', mountpoint):
- return (ord(mountpoint[-1]) - ord('a'))
- elif re.match('^[0-9]+$', mountpoint):
- return string.atoi(mountpoint, 10)
- else:
- LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
- return -1
+ raise StorageError(_('Unable to find vbd for vdi %s') %
+ vdi_ref)
+ if len(vbd_refs) > 0:
+ return
+
+ forget_sr(session, sr_rec['uuid'])
+
+
+def parse_volume_info(connection_info, mountpoint):
+ """
+ Parse device_path and mountpoint as they can be used by XenAPI.
+ In particular, the mountpoint (e.g. /dev/sdc) must be translated
+ into a numeric literal.
+ FIXME(armando):
+ As for device_path, currently cannot be used as it is,
+ because it does not contain target information. As for interim
+ solution, target details are passed either via Flags or obtained
+ by iscsiadm. Long-term solution is to add a few more fields to the
+ db in the iscsi_target table with the necessary info and modify
+ the iscsi driver to set them.
+ """
+ device_number = mountpoint_to_number(mountpoint)
+ data = connection_info['data']
+ volume_id = data['volume_id']
+ target_portal = data['target_portal']
+ target_host = _get_target_host(target_portal)
+ target_port = _get_target_port(target_portal)
+ target_iqn = data['target_iqn']
+ LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id, target_host, target_port, target_iqn)
+ if (device_number < 0 or
+ volume_id is None or
+ target_host is None or
+ target_iqn is None):
+ raise StorageError(_('Unable to obtain target information'
+ ' %(data)s, %(mountpoint)s') % locals())
+ volume_info = {}
+ volume_info['id'] = volume_id
+ volume_info['target'] = target_host
+ volume_info['port'] = target_port
+ volume_info['targetIQN'] = target_iqn
+ if ('auth_method' in connection_info and
+ connection_info['auth_method'] == 'CHAP'):
+ volume_info['chapuser'] = connection_info['auth_username']
+ volume_info['chappassword'] = connection_info['auth_password']
+
+ return volume_info
+
+
+def mountpoint_to_number(mountpoint):
+ """Translate a mountpoint like /dev/sdc into a numeric"""
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+ if re.match('^[hs]d[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^x?vd[a-p]$', mountpoint):
+ return (ord(mountpoint[-1]) - ord('a'))
+ elif re.match('^[0-9]+$', mountpoint):
+ return string.atoi(mountpoint, 10)
+ else:
+ LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
+ return -1
def _get_volume_id(path_or_id):
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 1bf62bfa7..bab0b4d2c 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -49,8 +49,8 @@ class VolumeOps(object):
label = 'vol-' + hex(volume['id'])[:-1]
# size presented to xenapi is in bytes, while euca api is in GB
vdi_size = volume['size'] * 1024 * 1024 * 1024
- vdi_ref = vm_utils.VMHelper.create_vdi(self._session,
- sr_ref, label, vdi_size, False)
+ vdi_ref = vm_utils.create_vdi(self._session, sr_ref, label,
+ vdi_size, False)
vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
return sm_vol_rec
@@ -60,12 +60,11 @@ class VolumeOps(object):
if vdi_ref is None:
raise exception.NovaException(_('Could not find VDI ref'))
- vm_utils.VMHelper.destroy_vdi(self._session, vdi_ref)
+ vm_utils.destroy_vdi(self._session, vdi_ref)
def create_sr(self, label, params):
LOG.debug(_("Creating SR %s") % label)
- sr_ref = volume_utils.VolumeHelper.create_sr(self._session,
- label, params)
+ sr_ref = volume_utils.create_sr(self._session, label, params)
if sr_ref is None:
raise exception.NovaException(_('Could not create SR'))
sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
@@ -76,34 +75,31 @@ class VolumeOps(object):
# Checks if sr has already been introduced to this host
def introduce_sr(self, sr_uuid, label, params):
LOG.debug(_("Introducing SR %s") % label)
- sr_ref = volume_utils.VolumeHelper.find_sr_by_uuid(self._session,
- sr_uuid)
+ sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if sr_ref:
LOG.debug(_('SR found in xapi database. No need to introduce'))
return sr_ref
- sr_ref = volume_utils.VolumeHelper.introduce_sr(self._session,
- sr_uuid, label, params)
+ sr_ref = volume_utils.introduce_sr(self._session, sr_uuid, label,
+ params)
if sr_ref is None:
raise exception.NovaException(_('Could not introduce SR'))
return sr_ref
def is_sr_on_host(self, sr_uuid):
LOG.debug(_('Checking for SR %s') % sr_uuid)
- sr_ref = volume_utils.VolumeHelper.find_sr_by_uuid(self._session,
- sr_uuid)
+ sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if sr_ref:
return True
return False
# Checks if sr has been introduced
def forget_sr(self, sr_uuid):
- sr_ref = volume_utils.VolumeHelper.find_sr_by_uuid(self._session,
- sr_uuid)
+ sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if sr_ref is None:
LOG.INFO(_('SR %s not found in the xapi database') % sr_uuid)
return
try:
- volume_utils.VolumeHelper.forget_sr(self._session, sr_uuid)
+ volume_utils.forget_sr(self._session, sr_uuid)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise exception.NovaException(_('Could not forget SR'))
@@ -111,7 +107,7 @@ class VolumeOps(object):
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance"""
# Before we start, check that the VM exists
- vm_ref = vm_utils.VMHelper.lookup(self._session, instance_name)
+ vm_ref = vm_utils.lookup(self._session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE: No Resource Pool concept so far
@@ -136,8 +132,8 @@ class VolumeOps(object):
LOG.debug(connection_info)
sr_params = {}
if u'sr_uuid' not in data:
- sr_params = volume_utils.VolumeHelper.parse_volume_info(
- connection_info, mountpoint)
+ sr_params = volume_utils.parse_volume_info(connection_info,
+ mountpoint)
uuid = "FA15E-D15C-" + str(sr_params['id'])
sr_params['sr_type'] = 'iscsi'
else:
@@ -167,19 +163,18 @@ class VolumeOps(object):
# Introduce VDI and attach VBD to VM
try:
- vdi_ref = volume_utils.VolumeHelper.introduce_vdi(self._session,
- sr_ref, vdi_uuid, target_lun)
+ vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref,
+ vdi_uuid, target_lun)
except volume_utils.StorageError, exc:
LOG.exception(exc)
self.forget_sr(uuid)
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
- dev_number = volume_utils.VolumeHelper.mountpoint_to_number(mountpoint)
+ dev_number = volume_utils.mountpoint_to_number(mountpoint)
try:
- vbd_ref = vm_utils.VMHelper.create_vbd(self._session, vm_ref,
- vdi_ref, dev_number,
- bootable=False)
+ vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
+ dev_number, bootable=False)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
self.forget_sr(uuid)
@@ -200,37 +195,35 @@ class VolumeOps(object):
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
# Before we start, check that the VM exists
- vm_ref = vm_utils.VMHelper.lookup(self._session, instance_name)
+ vm_ref = vm_utils.lookup(self._session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
- device_number = volume_utils.VolumeHelper.mountpoint_to_number(
- mountpoint)
+ device_number = volume_utils.mountpoint_to_number(mountpoint)
try:
- vbd_ref = vm_utils.VMHelper.find_vbd_by_number(self._session,
- vm_ref, device_number)
+ vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
+ device_number)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
try:
- sr_ref = volume_utils.VolumeHelper.find_sr_from_vbd(self._session,
- vbd_ref)
- vm_utils.VMHelper.unplug_vbd(self._session, vbd_ref)
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
+ vm_utils.unplug_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
try:
- vm_utils.VMHelper.destroy_vbd(self._session, vbd_ref)
+ vm_utils.destroy_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to destroy vbd %s') % mountpoint)
# Forget SR only if no other volumes on this host are using it
try:
- volume_utils.VolumeHelper.purge_sr(self._session, sr_ref)
+ volume_utils.purge_sr(self._session, sr_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Error purging SR %s') % sr_ref)
diff --git a/openstack-common.conf b/openstack-common.conf
index 61690b895..504500632 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,excutils,local,importutils,iniparser,jsonutils,setup
+modules=cfg,excutils,local,importutils,iniparser,jsonutils,setup,policy
# The base module to hold the copy of openstack.common
base=nova
diff --git a/setup.py b/setup.py
index 929223f25..4ff26d568 100644
--- a/setup.py
+++ b/setup.py
@@ -72,26 +72,27 @@ setuptools.setup(name='nova',
packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
test_suite='nose.collector',
- scripts=['bin/clear_rabbit_queues',
- 'bin/instance-usage-audit',
- 'bin/nova-all',
+ scripts=['bin/nova-all',
'bin/nova-api',
'bin/nova-api-ec2',
'bin/nova-api-metadata',
'bin/nova-api-os-compute',
'bin/nova-api-os-volume',
+ 'bin/nova-rpc-zmq-receiver',
'bin/nova-cert',
+ 'bin/nova-clear-rabbit-queues',
'bin/nova-compute',
'bin/nova-console',
'bin/nova-consoleauth',
'bin/nova-dhcpbridge',
+ 'bin/nova-instance-usage-audit',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-rootwrap',
'bin/nova-scheduler',
'bin/nova-volume',
+ 'bin/nova-volume-usage-audit',
'bin/nova-xvpvncproxy',
- 'bin/volume-usage-audit',
],
py_modules=[])