summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--etc/nova/policy.json3
-rw-r--r--nova/api/metadata/password.py8
-rw-r--r--nova/api/openstack/common.py3
-rw-r--r--nova/api/openstack/compute/contrib/certificates.py13
-rw-r--r--nova/api/openstack/compute/contrib/coverage_ext.py3
-rw-r--r--nova/api/openstack/compute/contrib/flavormanage.py2
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py144
-rw-r--r--nova/api/openstack/compute/extensions.py5
-rw-r--r--nova/api/openstack/compute/plugins/v3/certificates.py97
-rw-r--r--nova/api/openstack/compute/plugins/v3/evacuate.py101
-rw-r--r--nova/api/openstack/compute/plugins/v3/fixed_ips.py2
-rw-r--r--nova/api/openstack/compute/plugins/v3/keypairs.py3
-rw-r--r--nova/api/openstack/compute/plugins/v3/rescue.py100
-rw-r--r--nova/api/openstack/extensions.py40
-rw-r--r--nova/cells/messaging.py10
-rw-r--r--nova/cells/scheduler.py8
-rw-r--r--nova/cmd/baremetal_deploy_helper.py4
-rw-r--r--nova/compute/api.py31
-rw-r--r--nova/compute/flavors.py14
-rwxr-xr-xnova/compute/manager.py154
-rw-r--r--nova/compute/utils.py3
-rw-r--r--nova/conductor/manager.py15
-rw-r--r--nova/conductor/rpcapi.py6
-rw-r--r--nova/db/api.py5
-rw-r--r--nova/db/sqlalchemy/api.py7
-rw-r--r--nova/exception.py19
-rw-r--r--nova/manager.py10
-rw-r--r--nova/network/quantumv2/api.py2
-rw-r--r--nova/network/security_group/quantum_driver.py49
-rw-r--r--nova/objects/__init__.py1
-rw-r--r--nova/objects/instance.py2
-rw-r--r--nova/openstack/common/gettextutils.py176
-rw-r--r--nova/openstack/common/plugin/__init__.py14
-rw-r--r--nova/openstack/common/plugin/callbackplugin.py93
-rw-r--r--nova/openstack/common/plugin/plugin.py86
-rw-r--r--nova/openstack/common/plugin/pluginmanager.py78
-rw-r--r--nova/quota.py2
-rw-r--r--nova/scheduler/utils.py11
-rw-r--r--nova/tests/api/ec2/test_cloud.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_evacuate.py20
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_manage.py15
-rw-r--r--nova/tests/api/openstack/compute/extensions/test_plugin_api_extensions.py87
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_certificates.py77
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py198
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_rescue.py126
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py3
-rw-r--r--nova/tests/api/openstack/compute/test_v3_extensions.py37
-rw-r--r--nova/tests/cells/test_cells_scheduler.py20
-rw-r--r--nova/tests/compute/test_compute.py162
-rw-r--r--nova/tests/conductor/test_conductor.py20
-rw-r--r--nova/tests/db/test_db_api.py9
-rw-r--r--nova/tests/fake_policy.py3
-rw-r--r--nova/tests/network/test_quantumv2.py7
-rw-r--r--nova/tests/objects/test_instance.py2
-rw-r--r--nova/tests/test_cinder.py5
-rw-r--r--nova/tests/test_wsgi.py15
-rw-r--r--nova/tests/utils.py2
-rw-r--r--nova/tests/virt/baremetal/test_pxe.py26
-rwxr-xr-xnova/tests/virt/baremetal/test_tilera.py7
-rw-r--r--nova/tests/virt/xenapi/test_vmops.py11
-rw-r--r--nova/tests/virt/xenapi/test_xenapi.py104
-rw-r--r--nova/virt/baremetal/base.py2
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py3
-rwxr-xr-xnova/virt/baremetal/driver.py3
-rw-r--r--nova/virt/baremetal/fake.py2
-rw-r--r--nova/virt/baremetal/pxe.py56
-rw-r--r--nova/virt/baremetal/pxe_config.template2
-rwxr-xr-xnova/virt/baremetal/tilera.py28
-rw-r--r--nova/virt/baremetal/utils.py29
-rw-r--r--nova/virt/disk/vfs/localfs.py8
-rwxr-xr-x[-rw-r--r--]nova/virt/libvirt/driver.py158
-rw-r--r--nova/virt/libvirt/firewall.py9
-rw-r--r--nova/virt/libvirt/vif.py21
-rw-r--r--nova/virt/xenapi/agent.py174
-rw-r--r--nova/virt/xenapi/fake.py9
-rw-r--r--nova/virt/xenapi/vm_utils.py4
-rw-r--r--nova/virt/xenapi/vmops.py6
-rw-r--r--openstack-common.conf1
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py8
-rw-r--r--requirements.txt3
-rw-r--r--setup.cfg3
82 files changed, 1879 insertions, 940 deletions
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index a9a584237..e79ff8380 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -34,6 +34,7 @@
"compute_extension:baremetal_nodes": "rule:admin_api",
"compute_extension:cells": "rule:admin_api",
"compute_extension:certificates": "",
+ "compute_extension:v3:os-certificates": "",
"compute_extension:cloudpipe": "rule:admin_api",
"compute_extension:cloudpipe_update": "rule:admin_api",
"compute_extension:console_output": "",
@@ -43,6 +44,7 @@
"compute_extension:deferred_delete": "",
"compute_extension:disk_config": "",
"compute_extension:evacuate": "rule:admin_api",
+ "compute_extension:v3:os-evacuate": "rule:admin_api",
"compute_extension:extended_server_attributes": "rule:admin_api",
"compute_extension:extended_status": "",
"compute_extension:extended_availability_zone": "",
@@ -86,6 +88,7 @@
"compute_extension:quotas:delete": "rule:admin_api",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
+ "compute_extension:v3:os-rescue": "",
"compute_extension:security_group_default_rules": "rule:admin_api",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "rule:admin_api",
diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py
index 50f6c94ac..793dcc0a7 100644
--- a/nova/api/metadata/password.py
+++ b/nova/api/metadata/password.py
@@ -27,10 +27,10 @@ MAX_SIZE = CHUNKS * CHUNK_LENGTH
def extract_password(instance):
result = ''
- for datum in sorted(instance.get('system_metadata', []),
- key=lambda x: x['key']):
- if datum['key'].startswith('password_'):
- result += datum['value']
+ sys_meta = utils.instance_sys_meta(instance)
+ for key in sorted(sys_meta.keys()):
+ if key.startswith('password_'):
+ result += sys_meta[key]
return result or None
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index bec919f4b..dd746e23d 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -358,9 +358,12 @@ def raise_http_conflict_for_instance_invalid_state(exc, action):
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
+ not_launched = exc.kwargs.get('not_launched')
if attr and state:
msg = _("Cannot '%(action)s' while instance is in %(attr)s "
"%(state)s") % {'action': action, 'attr': attr, 'state': state}
+ elif not_launched:
+ msg = _("Cannot '%s' an instance which has never been active") % action
else:
# At least give some meaningful message
msg = _("Instance is in an invalid state for '%s'") % action
diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py
index 64a6e26fe..4fe49aadf 100644
--- a/nova/api/openstack/compute/contrib/certificates.py
+++ b/nova/api/openstack/compute/contrib/certificates.py
@@ -38,15 +38,6 @@ class CertificateTemplate(xmlutil.TemplateBuilder):
return xmlutil.MasterTemplate(root, 1)
-class CertificatesTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('certificates')
- elem = xmlutil.SubTemplateElement(root, 'certificate',
- selector='certificates')
- make_certificate(elem)
- return xmlutil.MasterTemplate(root, 1)
-
-
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
@@ -64,7 +55,7 @@ class CertificatesController(object):
@wsgi.serializers(xml=CertificateTemplate)
def show(self, req, id):
- """Return a list of certificates."""
+ """Return certificate information."""
context = req.environ['nova.context']
authorize(context)
if id != 'root':
@@ -76,7 +67,7 @@ class CertificatesController(object):
@wsgi.serializers(xml=CertificateTemplate)
def create(self, req, body=None):
- """Return a list of certificates."""
+ """Create a certificate."""
context = req.environ['nova.context']
authorize(context)
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py
index 154699470..578e2e79d 100644
--- a/nova/api/openstack/compute/contrib/coverage_ext.py
+++ b/nova/api/openstack/compute/contrib/coverage_ext.py
@@ -134,11 +134,12 @@ class CoverageController(object):
# doesn't resolve to 127.0.0.1. Currently backdoors only open on
# loopback so this is for covering the common single host use case
except socket.error as e:
+ exc_info = sys.exc_info()
if 'ECONNREFUSED' in e and service['host'] == self.host:
service['telnet'] = telnetlib.Telnet('127.0.0.1',
service['port'])
else:
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
self.services.append(service)
self._start_coverage_telnet(service['telnet'], service['service'])
diff --git a/nova/api/openstack/compute/contrib/flavormanage.py b/nova/api/openstack/compute/contrib/flavormanage.py
index 43d5d2110..441858c25 100644
--- a/nova/api/openstack/compute/contrib/flavormanage.py
+++ b/nova/api/openstack/compute/contrib/flavormanage.py
@@ -76,6 +76,8 @@ class FlavorManageController(wsgi.Controller):
except (exception.InstanceTypeExists,
exception.InstanceTypeIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
+ except exception.InvalidInput as exc:
+ raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
return self._view_builder.show(req, flavor)
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index b34a77cab..e2862be4e 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -16,6 +16,7 @@
"""The security groups extension."""
+import contextlib
import json
import webob
from webob import exc
@@ -177,6 +178,25 @@ class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
return sg_rule
+@contextlib.contextmanager
+def translate_exceptions():
+ """Translate nova exceptions to http exceptions."""
+ try:
+ yield
+ except exception.Invalid as exp:
+ msg = exp.format_message()
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.SecurityGroupNotFound as exp:
+ msg = exp.format_message()
+ raise exc.HTTPNotFound(explanation=msg)
+ except exception.InstanceNotFound as exp:
+ msg = exp.format_message()
+ raise exc.HTTPNotFound(explanation=msg)
+ except exception.SecurityGroupLimitExceeded as exp:
+ msg = exp.format_message()
+ raise exc.HTTPRequestEntityTooLarge(explanation=msg)
+
+
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
@@ -196,8 +216,9 @@ class SecurityGroupControllerBase(object):
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
- source_group = self.security_group_api.get(context,
- id=rule['group_id'])
+ with translate_exceptions():
+ source_group = self.security_group_api.get(context,
+ id=rule['group_id'])
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
else:
@@ -233,10 +254,10 @@ class SecurityGroupController(SecurityGroupControllerBase):
"""Return data about the given security group."""
context = _authorize_context(req)
- id = self.security_group_api.validate_id(id)
-
- security_group = self.security_group_api.get(context, None, id,
- map_exception=True)
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ security_group = self.security_group_api.get(context, None, id,
+ map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
@@ -245,12 +266,11 @@ class SecurityGroupController(SecurityGroupControllerBase):
"""Delete a security group."""
context = _authorize_context(req)
- id = self.security_group_api.validate_id(id)
-
- security_group = self.security_group_api.get(context, None, id,
- map_exception=True)
-
- self.security_group_api.destroy(context, security_group)
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ security_group = self.security_group_api.get(context, None, id,
+ map_exception=True)
+ self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@@ -262,9 +282,11 @@ class SecurityGroupController(SecurityGroupControllerBase):
search_opts = {}
search_opts.update(req.GET)
- raw_groups = self.security_group_api.list(context,
- project=context.project_id,
- search_opts=search_opts)
+ with translate_exceptions():
+ project_id = context.project_id
+ raw_groups = self.security_group_api.list(context,
+ project=project_id,
+ search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
@@ -285,16 +307,12 @@ class SecurityGroupController(SecurityGroupControllerBase):
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
- self.security_group_api.validate_property(group_name, 'name', None)
- self.security_group_api.validate_property(group_description,
- 'description', None)
-
- try:
+ with translate_exceptions():
+ self.security_group_api.validate_property(group_name, 'name', None)
+ self.security_group_api.validate_property(group_description,
+ 'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
- except exception.SecurityGroupLimitExceeded as err:
- raise exc.HTTPRequestEntityTooLarge(
- explanation=err.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
@@ -304,21 +322,21 @@ class SecurityGroupController(SecurityGroupControllerBase):
"""Update a security group."""
context = _authorize_context(req)
- id = self.security_group_api.validate_id(id)
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ security_group = self.security_group_api.get(context, None, id,
+ map_exception=True)
- security_group = self.security_group_api.get(context, None, id,
- map_exception=True)
security_group_data = self._from_body(body, 'security_group')
-
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
- self.security_group_api.validate_property(group_name, 'name', None)
- self.security_group_api.validate_property(group_description,
- 'description', None)
-
- group_ref = self.security_group_api.update_security_group(
- context, security_group, group_name, group_description)
+ with translate_exceptions():
+ self.security_group_api.validate_property(group_name, 'name', None)
+ self.security_group_api.validate_property(group_description,
+ 'description', None)
+ group_ref = self.security_group_api.update_security_group(
+ context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
@@ -333,11 +351,12 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
sg_rule = self._from_body(body, 'security_group_rule')
- parent_group_id = self.security_group_api.validate_id(
- sg_rule.get('parent_group_id', None))
-
- security_group = self.security_group_api.get(context, None,
- parent_group_id, map_exception=True)
+ with translate_exceptions():
+ parent_group_id = self.security_group_api.validate_id(
+ sg_rule.get('parent_group_id', None))
+ security_group = self.security_group_api.get(context, None,
+ parent_group_id,
+ map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
@@ -360,13 +379,10 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
- try:
+ with translate_exceptions():
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
- except exception.SecurityGroupLimitExceeded as err:
- raise exc.HTTPRequestEntityTooLarge(
- explanation=err.format_message())
return {"security_group_rule": self._format_security_group_rule(
context,
@@ -390,17 +406,15 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
def delete(self, req, id):
context = _authorize_context(req)
- id = self.security_group_api.validate_id(id)
-
- rule = self.security_group_api.get_rule(context, id)
-
- group_id = rule['parent_group_id']
-
- security_group = self.security_group_api.get(context, None, group_id,
- map_exception=True)
-
- self.security_group_api.remove_rules(context, security_group,
- [rule['id']])
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ rule = self.security_group_api.get_rule(context, id)
+ group_id = rule['parent_group_id']
+ security_group = self.security_group_api.get(context, None,
+ group_id,
+ map_exception=True)
+ self.security_group_api.remove_rules(context, security_group,
+ [rule['id']])
return webob.Response(status_int=202)
@@ -414,13 +428,11 @@ class ServerSecurityGroupController(SecurityGroupControllerBase):
self.security_group_api.ensure_default(context)
- try:
+ with translate_exceptions():
instance = self.compute_api.get(context, server_id)
- except exception.InstanceNotFound as exp:
- raise exc.HTTPNotFound(explanation=exp.format_message())
+ groups = self.security_group_api.get_instance_security_groups(
+ context, instance['uuid'], True)
- groups = self.security_group_api.get_instance_security_groups(
- context, instance['uuid'], True)
result = [self._format_security_group(context, group)
for group in groups]
@@ -455,15 +467,9 @@ class SecurityGroupActionController(wsgi.Controller):
return group_name
def _invoke(self, method, context, id, group_name):
- try:
+ with translate_exceptions():
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
- except exception.SecurityGroupNotFound as exp:
- raise exc.HTTPNotFound(explanation=exp.format_message())
- except exception.InstanceNotFound as exp:
- raise exc.HTTPNotFound(explanation=exp.format_message())
- except exception.Invalid as exp:
- raise exc.HTTPBadRequest(explanation=exp.format_message())
return webob.Response(status_int=202)
@@ -642,15 +648,15 @@ class Security_groups(extensions.ExtensionDescriptor):
class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
- raise exc.HTTPBadRequest(explanation=msg)
+ raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
- raise exc.HTTPBadRequest(explanation=msg)
+ raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
- raise exc.HTTPBadRequest(explanation=msg)
+ raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
@@ -662,7 +668,7 @@ class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_not_found(msg):
- raise exc.HTTPNotFound(explanation=msg)
+ raise exception.SecurityGroupNotFound(msg)
class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions,
diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py
index 5e6633f1d..ac8c04aa0 100644
--- a/nova/api/openstack/compute/extensions.py
+++ b/nova/api/openstack/compute/extensions.py
@@ -19,7 +19,6 @@ from oslo.config import cfg
from nova.api.openstack import extensions as base_extensions
from nova.openstack.common import log as logging
-from nova.openstack.common.plugin import pluginmanager
ext_opts = [
cfg.MultiStrOpt('osapi_compute_extension',
@@ -38,10 +37,6 @@ class ExtensionManager(base_extensions.ExtensionManager):
def __init__(self):
LOG.audit(_('Initializing extension manager.'))
self.cls_list = CONF.osapi_compute_extension
- self.PluginManager = pluginmanager.PluginManager('nova',
- 'compute-extensions')
- self.PluginManager.load_plugins()
- self.cls_list.append(self.PluginManager.plugin_extension_factory)
self.extensions = {}
self.sorted_ext_list = []
self._load_extensions()
diff --git a/nova/api/openstack/compute/plugins/v3/certificates.py b/nova/api/openstack/compute/plugins/v3/certificates.py
new file mode 100644
index 000000000..175780f9c
--- /dev/null
+++ b/nova/api/openstack/compute/plugins/v3/certificates.py
@@ -0,0 +1,97 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+import nova.cert.rpcapi
+from nova import network
+
+ALIAS = "os-certificates"
+authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
+
+
+def make_certificate(elem):
+ elem.set('data')
+ elem.set('private_key')
+
+
+class CertificateTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('certificate',
+ selector='certificate')
+ make_certificate(root)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+def _translate_certificate_view(certificate, private_key=None):
+ return {
+ 'data': certificate,
+ 'private_key': private_key,
+ }
+
+
+class CertificatesController(object):
+ """The x509 Certificates API controller for the OpenStack API."""
+
+ def __init__(self):
+ self.network_api = network.API()
+ self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
+ super(CertificatesController, self).__init__()
+
+ @wsgi.serializers(xml=CertificateTemplate)
+ def show(self, req, id):
+ """Return certificate information."""
+ context = req.environ['nova.context']
+ authorize(context)
+ if id != 'root':
+ msg = _("Only root certificate can be retrieved.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+ cert = self.cert_rpcapi.fetch_ca(context,
+ project_id=context.project_id)
+ return {'certificate': _translate_certificate_view(cert)}
+
+ @wsgi.serializers(xml=CertificateTemplate)
+ def create(self, req, body=None):
+ """Create a certificate."""
+ context = req.environ['nova.context']
+ authorize(context)
+ pk, cert = self.cert_rpcapi.generate_x509_cert(context,
+ user_id=context.user_id, project_id=context.project_id)
+ context = req.environ['nova.context']
+ return {'certificate': _translate_certificate_view(cert, pk)}
+
+
+class Certificates(extensions.V3APIExtensionBase):
+ """Certificates support."""
+
+ name = "Certificates"
+ alias = ALIAS
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "certificates/api/v3")
+ version = 1
+
+ def get_resources(self):
+ resources = [
+ extensions.ResourceExtension('os-certificates',
+ CertificatesController(),
+ member_actions={})]
+ return resources
+
+ def get_controller_extensions(self):
+ return []
diff --git a/nova/api/openstack/compute/plugins/v3/evacuate.py b/nova/api/openstack/compute/plugins/v3/evacuate.py
new file mode 100644
index 000000000..86e90e03e
--- /dev/null
+++ b/nova/api/openstack/compute/plugins/v3/evacuate.py
@@ -0,0 +1,101 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import compute
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.openstack.common import strutils
+from nova import utils
+
+LOG = logging.getLogger(__name__)
+ALIAS = "os-evacuate"
+authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
+
+
+class EvacuateController(wsgi.Controller):
+ def __init__(self, *args, **kwargs):
+ super(EvacuateController, self).__init__(*args, **kwargs)
+ self.compute_api = compute.API()
+
+ @wsgi.action('evacuate')
+ def _evacuate(self, req, id, body):
+ """
+ Permit admins to evacuate a server from a failed host
+ to a new one.
+ """
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ try:
+ if len(body) != 1:
+ raise exc.HTTPBadRequest(_("Malformed request body"))
+
+ evacuate_body = body["evacuate"]
+ host = evacuate_body["host"]
+ on_shared_storage = strutils.bool_from_string(
+ evacuate_body["onSharedStorage"])
+
+ password = None
+ if 'adminPass' in evacuate_body:
+ # check that if requested to evacuate server on shared storage
+ # password not specified
+ if on_shared_storage:
+ msg = _("admin password can't be changed on existing disk")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ password = evacuate_body['adminPass']
+ elif not on_shared_storage:
+ password = utils.generate_password()
+
+ except (TypeError, KeyError):
+ msg = _("host and onSharedStorage must be specified.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ instance = self.compute_api.get(context, id)
+ self.compute_api.evacuate(context, instance, host,
+ on_shared_storage, password)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'evacuate')
+ except Exception as e:
+ msg = _("Error in evacuate, %s") % e
+ LOG.exception(msg, instance=instance)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if password:
+ return {'adminPass': password}
+
+
+class Evacuate(extensions.V3APIExtensionBase):
+ """Enables server evacuation."""
+
+ name = "Evacuate"
+ alias = ALIAS
+ namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v3"
+ version = 1
+
+ def get_resources(self):
+ return []
+
+ def get_controller_extensions(self):
+ controller = EvacuateController()
+ extension = extensions.ControllerExtension(self, 'servers', controller)
+ return [extension]
diff --git a/nova/api/openstack/compute/plugins/v3/fixed_ips.py b/nova/api/openstack/compute/plugins/v3/fixed_ips.py
index e98b830bd..5fa4ae3c2 100644
--- a/nova/api/openstack/compute/plugins/v3/fixed_ips.py
+++ b/nova/api/openstack/compute/plugins/v3/fixed_ips.py
@@ -28,6 +28,7 @@ authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class FixedIPController(object):
+ @extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given fixed ip."""
context = req.environ['nova.context']
@@ -55,6 +56,7 @@ class FixedIPController(object):
return fixed_ip_info
+ @extensions.expected_errors((400, 404))
def action(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py
index bf740641e..ab40b051c 100644
--- a/nova/api/openstack/compute/plugins/v3/keypairs.py
+++ b/nova/api/openstack/compute/plugins/v3/keypairs.py
@@ -55,6 +55,7 @@ class KeypairController(object):
self.api = compute_api.KeypairAPI()
@wsgi.serializers(xml=KeypairTemplate)
+ @extensions.expected_errors((400, 409, 413))
def create(self, req, body):
"""
Create or import keypair.
@@ -100,6 +101,7 @@ class KeypairController(object):
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
+ @extensions.expected_errors(404)
def delete(self, req, id):
"""
Delete a keypair with a given name
@@ -113,6 +115,7 @@ class KeypairController(object):
return webob.Response(status_int=202)
@wsgi.serializers(xml=KeypairTemplate)
+ @extensions.expected_errors(404)
def show(self, req, id):
"""Return data for the given key name."""
context = req.environ['nova.context']
diff --git a/nova/api/openstack/compute/plugins/v3/rescue.py b/nova/api/openstack/compute/plugins/v3/rescue.py
new file mode 100644
index 000000000..ded18bb1a
--- /dev/null
+++ b/nova/api/openstack/compute/plugins/v3/rescue.py
@@ -0,0 +1,100 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The rescue mode extension."""
+
+from oslo.config import cfg
+import webob
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import compute
+from nova import exception
+from nova import utils
+
+
+ALIAS = "os-rescue"
+CONF = cfg.CONF
+authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
+
+
+class RescueController(wsgi.Controller):
+ def __init__(self, *args, **kwargs):
+ super(RescueController, self).__init__(*args, **kwargs)
+ self.compute_api = compute.API()
+
+ def _get_instance(self, context, instance_id):
+ try:
+ return self.compute_api.get(context, instance_id)
+ except exception.InstanceNotFound:
+ msg = _("Server not found")
+ raise exc.HTTPNotFound(msg)
+
+ @wsgi.action('rescue')
+ def _rescue(self, req, id, body):
+ """Rescue an instance."""
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ if body['rescue'] and 'adminPass' in body['rescue']:
+ password = body['rescue']['adminPass']
+ else:
+ password = utils.generate_password()
+
+ instance = self._get_instance(context, id)
+ try:
+ self.compute_api.rescue(context, instance,
+ rescue_password=password)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'rescue')
+ except exception.InvalidVolume as volume_error:
+ raise exc.HTTPConflict(explanation=volume_error.format_message())
+ except exception.InstanceNotRescuable as non_rescuable:
+ raise exc.HTTPBadRequest(
+ explanation=non_rescuable.format_message())
+
+ return {'adminPass': password}
+
+ @wsgi.action('unrescue')
+ def _unrescue(self, req, id, body):
+ """Unrescue an instance."""
+ context = req.environ["nova.context"]
+ authorize(context)
+ instance = self._get_instance(context, id)
+ try:
+ self.compute_api.unrescue(context, instance)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'unrescue')
+ return webob.Response(status_int=202)
+
+
+class Rescue(extensions.V3APIExtensionBase):
+ """Instance rescue mode."""
+
+ name = "Rescue"
+ alias = ALIAS
+ namespace = "http://docs.openstack.org/compute/ext/rescue/api/v3"
+ version = 1
+
+ def get_resources(self):
+ return []
+
+ def get_controller_extensions(self):
+ controller = RescueController()
+ extension = extensions.ControllerExtension(self, 'servers', controller)
+ return [extension]
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 6cbc5bb78..6fea5d35e 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -17,6 +17,7 @@
# under the License.
import abc
+import functools
import os
import webob.dec
@@ -451,3 +452,42 @@ class V3APIExtensionBase(object):
def version(self):
"""Version of the extension."""
pass
+
+
+def expected_errors(errors):
+ """Decorator for v3 API methods which specifies expected exceptions.
+
+ Specify which exceptions may occur when an API method is called. If an
+ unexpected exception occurs then return a 500 instead and ask the user
+ of the API to file a bug report.
+ """
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception as exc:
+ if isinstance(exc, webob.exc.WSGIHTTPException):
+ if isinstance(errors, int):
+ t_errors = (errors,)
+ else:
+ t_errors = errors
+ if exc.code in t_errors:
+ raise
+ elif isinstance(exc, exception.PolicyNotAuthorized):
+ # Note(cyeoh): Special case to handle
+ # PolicyNotAuthorized exceptions so every
+ # extension method does not need to wrap authorize
+ # calls. ResourceExceptionHandler silently
+ # converts NotAuthorized to HTTPForbidden
+ raise
+
+ LOG.exception(_("Unexpected exception in API method"))
+ msg = _('Unexpected API Error. Please report this at '
+ 'http://bugs.launchpad.net/nova/ and attach the Nova '
+ 'API log if possible.\n%s') % type(exc)
+ raise webob.exc.HTTPInternalServerError(explanation=msg)
+
+ return wrapped
+
+ return decorator
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 319067836..6f4183f5d 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -813,12 +813,10 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
info_cache.pop('id', None)
info_cache.pop('instance', None)
- # Fixup system_metadata (should be a dict for update, not a list)
- if ('system_metadata' in instance and
- isinstance(instance['system_metadata'], list)):
- sys_metadata = dict([(md['key'], md['value'])
- for md in instance['system_metadata']])
- instance['system_metadata'] = sys_metadata
+ if 'system_metadata' in instance:
+ # Make sure we have the dict form that we need for
+ # instance_update.
+ instance['system_metadata'] = utils.instance_sys_meta(instance)
LOG.debug(_("Got update for instance: %(instance)s"),
{'instance': instance}, instance_uuid=instance_uuid)
diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py
index c54b9b578..08f485189 100644
--- a/nova/cells/scheduler.py
+++ b/nova/cells/scheduler.py
@@ -175,9 +175,10 @@ class CellsScheduler(base.Base):
"""Attempt to build instance(s) or send msg to child cell."""
ctxt = message.ctxt
instance_properties = build_inst_kwargs['instances'][0]
- instance_type = build_inst_kwargs['instance_type']
+ filter_properties = build_inst_kwargs['filter_properties']
+ instance_type = filter_properties['instance_type']
image = build_inst_kwargs['image']
- security_groups = build_inst_kwargs['security_group']
+ security_groups = build_inst_kwargs['security_groups']
block_device_mapping = build_inst_kwargs['block_device_mapping']
LOG.debug(_("Building instances with routing_path=%(routing_path)s"),
@@ -214,7 +215,8 @@ class CellsScheduler(base.Base):
instance_uuids = [inst['uuid'] for inst in
build_inst_kwargs['instances']]
instances = build_inst_kwargs['instances']
- request_spec = scheduler_utils.build_request_spec(image, instances)
+ request_spec = scheduler_utils.build_request_spec(message.ctxt,
+ image, instances)
filter_properties = copy.copy(build_inst_kwargs['filter_properties'])
filter_properties.update({'context': message.ctxt,
'scheduler': self,
diff --git a/nova/cmd/baremetal_deploy_helper.py b/nova/cmd/baremetal_deploy_helper.py
index c28c0eef1..7b9b51e11 100644
--- a/nova/cmd/baremetal_deploy_helper.py
+++ b/nova/cmd/baremetal_deploy_helper.py
@@ -288,10 +288,14 @@ class BareMetalDeploy(object):
port = q.get('p', '3260')
iqn = q['n']
lun = q.get('l', '1')
+ err_msg = q.get('e')
except KeyError as e:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return "parameter '%s' is not defined" % e
+ if err_msg:
+ LOG.error('Deploy agent error message: ' + err_msg)
+
context = nova_context.get_admin_context()
d = db.bm_node_get(context, node_id)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 1e24e8ce5..0a9b0e67b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -110,10 +110,12 @@ RO_SECURITY_GROUPS = ['default']
SM_IMAGE_PROP_PREFIX = "image_"
-def check_instance_state(vm_state=None, task_state=(None,)):
+def check_instance_state(vm_state=None, task_state=(None,),
+ must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
- If the instance is in the wrong state, the wrapper will raise an exception.
+ If the instance is in the wrong state, or has not been sucessfully started
+ at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
@@ -137,6 +139,13 @@ def check_instance_state(vm_state=None, task_state=(None,)):
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
+ if must_have_launched and not instance['launched_at']:
+ raise exception.InstanceInvalidState(
+ attr=None,
+ not_launched=True,
+ instance_uuid=instance['uuid'],
+ state=instance['vm_state'],
+ method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
@@ -1305,7 +1314,8 @@ class API(base.Base):
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
- @check_instance_state(vm_state=None, task_state=None)
+ @check_instance_state(vm_state=None, task_state=None,
+ must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
@@ -1329,7 +1339,8 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
- @check_instance_state(vm_state=None, task_state=None)
+ @check_instance_state(vm_state=None, task_state=None,
+ must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
@@ -1369,7 +1380,8 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.SOFT_DELETED])
+ @check_instance_state(vm_state=[vm_states.SOFT_DELETED],
+ must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
@@ -1790,7 +1802,8 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.PAUSED, vm_states.SUSPENDED],
+ vm_states.PAUSED, vm_states.SUSPENDED,
+ vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
@@ -1826,7 +1839,8 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
@@ -2224,7 +2238,8 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
diff --git a/nova/compute/flavors.py b/nova/compute/flavors.py
index 59d5d5715..a18b375d8 100644
--- a/nova/compute/flavors.py
+++ b/nova/compute/flavors.py
@@ -95,20 +95,20 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
# Some attributes are positive ( > 0) integers
for option in ['memory_mb', 'vcpus']:
try:
+ assert int(str(kwargs[option])) > 0
kwargs[option] = int(kwargs[option])
- assert kwargs[option] > 0
- except (ValueError, AssertionError):
- msg = _("'%s' argument must be greater than 0") % option
+ except (ValueError, AssertionError, TypeError):
+ msg = _("'%s' argument must be a positive integer") % option
raise exception.InvalidInput(reason=msg)
# Some attributes are non-negative ( >= 0) integers
for option in ['root_gb', 'ephemeral_gb', 'swap']:
try:
+ assert int(str(kwargs[option])) >= 0
kwargs[option] = int(kwargs[option])
- assert kwargs[option] >= 0
- except (ValueError, AssertionError):
- msg = _("'%s' argument must be greater than or equal"
- " to 0") % option
+ except (ValueError, AssertionError, TypeError):
+ msg = _("'%s' argument must be an integer greater than or"
+ " equal to 0") % option
raise exception.InvalidInput(reason=msg)
# rxtx_factor should be a positive float
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index bd4e6f772..1a6479f7f 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -878,7 +878,7 @@ class ComputeManager(manager.SchedulerDependentManager):
extra_usage_info = {}
- def notify(status, msg=None):
+ def notify(status, msg=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
@@ -886,7 +886,7 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = ""
info['message'] = msg
self._notify_about_instance_usage(context, instance, type_,
- extra_usage_info=info)
+ extra_usage_info=info, **kwargs)
try:
image_meta = self._prebuild_instance(context, instance)
@@ -895,10 +895,11 @@ class ComputeManager(manager.SchedulerDependentManager):
notify("start") # notify that build is starting
- instance = self._build_instance(context, request_spec,
- filter_properties, requested_networks, injected_files,
- admin_password, is_first_time, node, instance, image_meta)
- notify("end", msg=_("Success")) # notify that build is done
+ instance, network_info = self._build_instance(context,
+ request_spec, filter_properties, requested_networks,
+ injected_files, admin_password, is_first_time, node,
+ instance, image_meta)
+ notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
@@ -1028,7 +1029,7 @@ class ComputeManager(manager.SchedulerDependentManager):
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
- return instance
+ return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
type_, value, tb = exc_info
@@ -1446,7 +1447,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
- system_meta = utils.metadata_to_dict(instance['system_metadata'])
+ system_meta = utils.instance_sys_meta(instance)
self.conductor_api.instance_destroy(context, instance)
except Exception:
with excutils.save_and_reraise_exception():
@@ -2071,7 +2072,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_rescue_image_ref(self, context, instance):
"""Determine what image should be used to boot the rescue VM."""
- system_meta = utils.metadata_to_dict(instance['system_metadata'])
+ system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
@@ -2173,7 +2174,7 @@ class ComputeManager(manager.SchedulerDependentManager):
Returns the updated system_metadata as a dict, as well as the
post-cleanup current instance type.
"""
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
if restore_old:
instance_type = flavors.extract_flavor(instance, 'old_')
sys_meta = flavors.save_flavor_info(sys_meta, instance_type)
@@ -2427,9 +2428,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
- flavors.save_flavor_info(sys_meta, instance_type,
- prefix='new_')
+ sys_meta = utils.instance_sys_meta(instance)
+ flavors.save_flavor_info(sys_meta, instance_type, prefix='new_')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance['vm_state']
@@ -2599,7 +2599,7 @@ class ComputeManager(manager.SchedulerDependentManager):
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = flavors.extract_flavor(instance)
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_sate is not set we need to default
# to ACTIVE for backwards compatibility
@@ -3614,20 +3614,20 @@ class ComputeManager(manager.SchedulerDependentManager):
while not instance or instance['host'] != self.host:
if instance_uuids:
try:
- instance = self.conductor_api.instance_get_by_uuid(context,
- instance_uuids.pop(0))
+ instance = instance_obj.Instance.get_by_uuid(
+ context, instance_uuids.pop(0))
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
- db_instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
+ db_instances = instance_obj.InstanceList.get_by_host(
+ context, self.host, expected_attrs=[])
if not db_instances:
# None.. just return.
return
- instance = db_instances.pop(0)
- instance_uuids = [inst['uuid'] for inst in db_instances]
+ instance = db_instances[0]
+ instance_uuids = [inst['uuid'] for inst in db_instances[1:]]
self._instance_uuids_to_heal = instance_uuids
# We have an instance now and it's ours
@@ -3676,62 +3676,64 @@ class ComputeManager(manager.SchedulerDependentManager):
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
- if CONF.resize_confirm_window > 0:
- capi = self.conductor_api
- migrations = capi.migration_get_unconfirmed_by_dest_compute(
- context, CONF.resize_confirm_window, self.host)
-
- migrations_info = dict(migration_count=len(migrations),
- confirm_window=CONF.resize_confirm_window)
-
- if migrations_info["migration_count"] > 0:
- LOG.info(_("Found %(migration_count)d unconfirmed migrations "
- "older than %(confirm_window)d seconds"),
- migrations_info)
-
- def _set_migration_to_error(migration, reason, **kwargs):
- LOG.warn(_("Setting migration %(migration_id)s to error: "
- "%(reason)s"),
- {'migration_id': migration['id'], 'reason': reason},
- **kwargs)
- self.conductor_api.migration_update(context, migration,
- 'error')
-
- for migration in migrations:
- instance_uuid = migration['instance_uuid']
- LOG.info(_("Automatically confirming migration "
- "%(migration_id)s for instance %(instance_uuid)s"),
- {'migration_id': migration['id'],
- 'instance_uuid': instance_uuid})
- try:
- instance = self.conductor_api.instance_get_by_uuid(
- context, instance_uuid)
- except exception.InstanceNotFound:
- reason = (_("Instance %s not found") %
- instance_uuid)
- _set_migration_to_error(migration, reason)
- continue
- if instance['vm_state'] == vm_states.ERROR:
- reason = _("In ERROR state")
- _set_migration_to_error(migration, reason,
- instance=instance)
- continue
- vm_state = instance['vm_state']
- task_state = instance['task_state']
- if vm_state != vm_states.RESIZED or task_state is not None:
- reason = (_("In states %(vm_state)s/%(task_state)s, not "
- "RESIZED/None") %
- {'vm_state': vm_state,
- 'task_state': task_state})
- _set_migration_to_error(migration, reason,
- instance=instance)
- continue
- try:
- self.conductor_api.compute_confirm_resize(
- context, instance, migration_ref=migration)
- except Exception as e:
- LOG.error(_("Error auto-confirming resize: %s. "
- "Will retry later.") % e, instance=instance)
+ if CONF.resize_confirm_window == 0:
+ return
+
+ capi = self.conductor_api
+ migrations = capi.migration_get_unconfirmed_by_dest_compute(
+ context, CONF.resize_confirm_window, self.host)
+
+ migrations_info = dict(migration_count=len(migrations),
+ confirm_window=CONF.resize_confirm_window)
+
+ if migrations_info["migration_count"] > 0:
+ LOG.info(_("Found %(migration_count)d unconfirmed migrations "
+ "older than %(confirm_window)d seconds"),
+ migrations_info)
+
+ def _set_migration_to_error(migration, reason, **kwargs):
+ LOG.warn(_("Setting migration %(migration_id)s to error: "
+ "%(reason)s"),
+ {'migration_id': migration['id'], 'reason': reason},
+ **kwargs)
+ self.conductor_api.migration_update(context, migration,
+ 'error')
+
+ for migration in migrations:
+ instance_uuid = migration['instance_uuid']
+ LOG.info(_("Automatically confirming migration "
+ "%(migration_id)s for instance %(instance_uuid)s"),
+ {'migration_id': migration['id'],
+ 'instance_uuid': instance_uuid})
+ try:
+ instance = instance_obj.Instance.get_by_uuid(context,
+ instance_uuid)
+ except exception.InstanceNotFound:
+ reason = (_("Instance %s not found") %
+ instance_uuid)
+ _set_migration_to_error(migration, reason)
+ continue
+ if instance['vm_state'] == vm_states.ERROR:
+ reason = _("In ERROR state")
+ _set_migration_to_error(migration, reason,
+ instance=instance)
+ continue
+ vm_state = instance['vm_state']
+ task_state = instance['task_state']
+ if vm_state != vm_states.RESIZED or task_state is not None:
+ reason = (_("In states %(vm_state)s/%(task_state)s, not "
+ "RESIZED/None") %
+ {'vm_state': vm_state,
+ 'task_state': task_state})
+ _set_migration_to_error(migration, reason,
+ instance=instance)
+ continue
+ try:
+ self.conductor_api.compute_confirm_resize(
+ context, instance, migration_ref=migration)
+ except Exception as e:
+ LOG.error(_("Error auto-confirming resize: %s. "
+ "Will retry later.") % e, instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 9637d8773..3db4a14f2 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -203,8 +203,7 @@ def notify_usage_exists(context, instance_ref, current_period=False,
ignore_missing_network_data)
if system_metadata is None:
- system_metadata = utils.metadata_to_dict(
- instance_ref['system_metadata'])
+ system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 6693e7fee..cc1b05cc4 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -26,7 +26,6 @@ from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
-from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
@@ -67,7 +66,7 @@ class ConductorManager(manager.Manager):
namespace. See the ComputeTaskManager class for details.
"""
- RPC_API_VERSION = '1.51'
+ RPC_API_VERSION = '1.52'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -343,9 +342,6 @@ class ConductorManager(manager.Manager):
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
- # The session object is needed here, as the vol_usage object returned
- # needs to bound to it in order to refresh its data
- session = db_session.get_session()
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
@@ -353,8 +349,7 @@ class ConductorManager(manager.Manager):
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
- update_totals,
- session)
+ update_totals)
# We have just updated the database, so send the notification now
notifier.notify(context, 'conductor.%s' % self.host, 'volume.usage',
@@ -480,6 +475,9 @@ class ConductorManager(manager.Manager):
self.compute_api.stop(context, instance, do_cast)
def compute_confirm_resize(self, context, instance, migration_ref):
+ if isinstance(instance, nova_object.NovaObject):
+ # NOTE(danms): Remove this at RPC API v2.0
+ instance = dict(instance.items())
self.compute_api.confirm_resize(context, instance, migration_ref)
def compute_unrescue(self, context, instance):
@@ -546,7 +544,8 @@ class ComputeTaskManager(object):
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping):
- request_spec = scheduler_utils.build_request_spec(image, instances)
+ request_spec = scheduler_utils.build_request_spec(context, image,
+ instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index bb66ca8b2..fcbb87f0e 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -102,6 +102,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.50 - Added object_action() and object_class_action()
1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
+ 1.52 - Pass instance objects for compute_confirm_resize
"""
BASE_RPC_API_VERSION = '1.0'
@@ -459,11 +460,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
return self.call(context, msg, version='1.43')
def compute_confirm_resize(self, context, instance, migration_ref):
- instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration_ref)
- msg = self.make_msg('compute_confirm_resize', instance=instance_p,
+ msg = self.make_msg('compute_confirm_resize', instance=instance,
migration_ref=migration_p)
- return self.call(context, msg, version='1.46')
+ return self.call(context, msg, version='1.52')
def compute_unrescue(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/db/api.py b/nova/db/api.py
index ceab5fcd8..973be1a26 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1561,7 +1561,7 @@ def vol_get_usage_by_time(context, begin):
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
- update_totals=False, session=None):
+ update_totals=False):
"""Update cached volume usage for a volume
Creates new record if needed.
@@ -1569,8 +1569,7 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
wr_bytes, instance_id, project_id, user_id,
availability_zone,
- update_totals=update_totals,
- session=session)
+ update_totals=update_totals)
###################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 69157a86a..fd79ae215 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -4396,9 +4396,8 @@ def vol_get_usage_by_time(context, begin):
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
- update_totals=False, session=None):
- if not session:
- session = get_session()
+ update_totals=False):
+ session = get_session()
refreshed = timeutils.utcnow()
@@ -4471,6 +4470,8 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
+ current_usage.save(session=session)
+ session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
diff --git a/nova/exception.py b/nova/exception.py
index c774b56cc..e5924b831 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -25,6 +25,7 @@ SHOULD include dedicated exception logging.
"""
import functools
+import sys
from oslo.config import cfg
import webob.exc
@@ -127,7 +128,8 @@ class NovaException(Exception):
try:
message = self.message % kwargs
- except Exception as e:
+ except Exception:
+ exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
@@ -135,7 +137,7 @@ class NovaException(Exception):
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
else:
# at least get the core message out if something happened
message = self.message
@@ -1241,6 +1243,19 @@ class CoreAPIMissing(NovaException):
message = _("Core API extensions are missing: %(missing_apis)s")
+class AgentError(NovaException):
+ message = _('Error during following call to agent: %(method)s')
+
+
+class AgentTimeout(AgentError):
+ message = _('Unable to contact guest agent. '
+ 'The following call timed out: %(method)s')
+
+
+class AgentNotImplemented(AgentError):
+ message = _('Agent does not support the call: %(method)s')
+
+
class InstanceGroupNotFound(NotFound):
message = _("Instance group %(group_uuid)s could not be found.")
diff --git a/nova/manager.py b/nova/manager.py
index d1cd26a4d..8fb5db81f 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -60,7 +60,6 @@ from nova.db import base
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
-from nova.openstack.common.plugin import pluginmanager
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.scheduler import rpcapi as scheduler_rpcapi
@@ -78,15 +77,10 @@ class Manager(base.Base, periodic_task.PeriodicTasks):
if not host:
host = CONF.host
self.host = host
- self.load_plugins()
self.backdoor_port = None
self.service_name = service_name
super(Manager, self).__init__(db_driver)
- def load_plugins(self):
- pluginmgr = pluginmanager.PluginManager('nova', self.__class__)
- pluginmgr.load_plugins()
-
def create_rpc_dispatcher(self, backdoor_port=None, additional_apis=None):
'''Get the rpc dispatcher for this manager.
@@ -151,10 +145,6 @@ class SchedulerDependentManager(Manager):
super(SchedulerDependentManager, self).__init__(host, db_driver,
service_name)
- def load_plugins(self):
- pluginmgr = pluginmanager.PluginManager('nova', self.service_name)
- pluginmgr.load_plugins()
-
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
if not isinstance(capabilities, list):
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index e155f694a..c7f4ffd58 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -538,7 +538,7 @@ class API(base.Base):
def get_all(self, context):
"""Get all networks for client."""
client = quantumv2.get_client(context)
- networks = client.list_networks().get('networks') or {}
+ networks = client.list_networks().get('networks')
for network in networks:
network['label'] = network['name']
return networks
diff --git a/nova/network/security_group/quantum_driver.py b/nova/network/security_group/quantum_driver.py
index 7709da970..6e62d796d 100644
--- a/nova/network/security_group/quantum_driver.py
+++ b/nova/network/security_group/quantum_driver.py
@@ -17,6 +17,8 @@
#
# @author: Aaron Rosen, Nicira Networks, Inc.
+import sys
+
from oslo.config import cfg
from quantumclient.common import exceptions as q_exc
from quantumclient.quantum import v2_0 as quantumv20
@@ -50,6 +52,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
security_group = quantum.create_security_group(
body).get('security_group')
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
LOG.exception(_("Quantum Error creating security group %s"),
name)
if e.status_code == 401:
@@ -57,7 +60,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
def update_security_group(self, context, security_group,
@@ -68,6 +71,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
security_group = quantum.update_security_group(
security_group['id'], body).get('security_group')
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
LOG.exception(_("Quantum Error updating security group %s"),
name)
if e.status_code == 401:
@@ -75,7 +79,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
@@ -120,12 +124,13 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
quantum, 'security_group', name)
group = quantum.show_security_group(id).get('security_group')
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug(_("Quantum security group %s not found"), name)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(group)
@@ -143,9 +148,9 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
try:
security_groups = quantum.list_security_groups(**search_opts).get(
'security_groups')
- except q_exc.QuantumClientException as e:
- LOG.exception(_("Quantum Error getting security groups"))
- raise e
+ except q_exc.QuantumClientException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Quantum Error getting security groups"))
converted_rules = []
for security_group in security_groups:
converted_rules.append(
@@ -165,13 +170,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
try:
quantum.delete_security_group(security_group['id'])
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
if e.status_code == 404:
self.raise_not_found(e.message)
elif e.status_code == 409:
self.raise_invalid_property(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
@@ -188,13 +194,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
rules = quantum.create_security_group_rule(
body).get('security_group_rules')
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
if e.status_code == 409:
LOG.exception(_("Quantum Error getting security group %s"),
name)
self.raise_not_found(e.message)
else:
LOG.exception(_("Quantum Error:"))
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
converted_rules = []
for rule in rules:
converted_rules.append(
@@ -246,9 +253,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
for rule_id in range(0, len(rule_ids)):
quantum.delete_security_group_rule(rule_ids.pop())
except q_exc.QuantumClientException as e:
- LOG.exception(_("Quantum Error unable to delete %s"),
- rule_ids)
- raise e
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Quantum Error unable to delete %s"), rule_ids)
def get_rule(self, context, id):
quantum = quantumv2.get_client(context)
@@ -256,12 +262,13 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
rule = quantum.show_security_group_rule(
id).get('security_group_rule')
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug(_("Quantum security group rule %s not found"), id)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_rule_format(rule)
def get_instances_security_groups_bindings(self, context):
@@ -350,19 +357,20 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
- except q_exc.QuantumClientException as e:
+ except q_exc.QuantumClientException:
+ with excutils.save_and_reraise_exception():
LOG.exception(_("Quantum Error:"))
- raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
@@ -398,19 +406,20 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
+ exc_info = sys.exc_info()
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
- raise e
+ raise exc_info[0], exc_info[1], exc_info[2]
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
- except q_exc.QuantumClientException as e:
+ except q_exc.QuantumClientException:
+ with excutils.save_and_reraise_exception():
LOG.exception(_("Quantum Error:"))
- raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
@@ -438,8 +447,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
quantum.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
- LOG.exception(_("Quantum Error:"))
- raise e
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Quantum Error:"))
if not found_security_group:
msg = (_("Security group %(security_group_name)s not assocaited "
"with the instance %(instance)s"),
diff --git a/nova/objects/__init__.py b/nova/objects/__init__.py
index e39f0154c..00f8240f1 100644
--- a/nova/objects/__init__.py
+++ b/nova/objects/__init__.py
@@ -18,3 +18,4 @@ def register_all():
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('nova.objects.instance')
+ __import__('nova.objects.instance_info_cache')
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index c1b765f29..de47f648f 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -191,7 +191,7 @@ class Instance(base.NovaObject):
return instance
@base.remotable_classmethod
- def get_by_uuid(cls, context, uuid=None, expected_attrs=None):
+ def get_by_uuid(cls, context, uuid, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py
index 2d2e94a7c..96f9b49db 100644
--- a/nova/openstack/common/gettextutils.py
+++ b/nova/openstack/common/gettextutils.py
@@ -2,6 +2,7 @@
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
+# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -23,8 +24,11 @@ Usual usage in an openstack.common module:
from nova.openstack.common.gettextutils import _
"""
+import copy
import gettext
+import logging.handlers
import os
+import UserString
_localedir = os.environ.get('nova'.upper() + '_LOCALEDIR')
_t = gettext.translation('nova', localedir=_localedir, fallback=True)
@@ -48,3 +52,175 @@ def install(domain):
gettext.install(domain,
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
unicode=True)
+
+
+"""
+Lazy gettext functionality.
+
+The following is an attempt to introduce a deferred way
+to do translations on messages in OpenStack. We attempt to
+override the standard _() function and % (format string) operation
+to build Message objects that can later be translated when we have
+more information. Also included is an example LogHandler that
+translates Messages to an associated locale, effectively allowing
+many logs, each with their own locale.
+"""
+
+
+def get_lazy_gettext(domain):
+ """Assemble and return a lazy gettext function for a given domain.
+
+ Factory method for a project/module to get a lazy gettext function
+ for its own translation domain (i.e. nova, glance, cinder, etc.)
+ """
+
+ def _lazy_gettext(msg):
+ """Create and return a Message object.
+
+ Message encapsulates a string so that we can translate it later when
+ needed.
+ """
+ return Message(msg, domain)
+
+ return _lazy_gettext
+
+
+class Message(UserString.UserString, object):
+ """Class used to encapsulate translatable messages."""
+ def __init__(self, msg, domain):
+ # _msg is the gettext msgid and should never change
+ self._msg = msg
+ self._left_extra_msg = ''
+ self._right_extra_msg = ''
+ self.params = None
+ self.locale = None
+ self.domain = domain
+
+ @property
+ def data(self):
+ # NOTE(mrodden): this should always resolve to a unicode string
+ # that best represents the state of the message currently
+
+ localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
+ if self.locale:
+ lang = gettext.translation(self.domain,
+ localedir=localedir,
+ languages=[self.locale],
+ fallback=True)
+ else:
+ # use system locale for translations
+ lang = gettext.translation(self.domain,
+ localedir=localedir,
+ fallback=True)
+
+ full_msg = (self._left_extra_msg +
+ lang.ugettext(self._msg) +
+ self._right_extra_msg)
+
+ if self.params is not None:
+ full_msg = full_msg % self.params
+
+ return unicode(full_msg)
+
+ def _save_parameters(self, other):
+ # we check for None later to see if
+ # we actually have parameters to inject,
+ # so encapsulate if our parameter is actually None
+ if other is None:
+ self.params = (other, )
+ else:
+ self.params = copy.deepcopy(other)
+
+ return self
+
+ # overrides to be more string-like
+ def __unicode__(self):
+ return self.data
+
+ def __str__(self):
+ return self.data.encode('utf-8')
+
+ def __getstate__(self):
+ to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
+ 'domain', 'params', 'locale']
+ new_dict = self.__dict__.fromkeys(to_copy)
+ for attr in to_copy:
+ new_dict[attr] = copy.deepcopy(self.__dict__[attr])
+
+ return new_dict
+
+ def __setstate__(self, state):
+ for (k, v) in state.items():
+ setattr(self, k, v)
+
+ # operator overloads
+ def __add__(self, other):
+ copied = copy.deepcopy(self)
+ copied._right_extra_msg += other.__str__()
+ return copied
+
+ def __radd__(self, other):
+ copied = copy.deepcopy(self)
+ copied._left_extra_msg += other.__str__()
+ return copied
+
+ def __mod__(self, other):
+ # do a format string to catch and raise
+ # any possible KeyErrors from missing parameters
+ self.data % other
+ copied = copy.deepcopy(self)
+ return copied._save_parameters(other)
+
+ def __mul__(self, other):
+ return self.data * other
+
+ def __rmul__(self, other):
+ return other * self.data
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __getslice__(self, start, end):
+ return self.data.__getslice__(start, end)
+
+ def __getattribute__(self, name):
+ # NOTE(mrodden): handle lossy operations that we can't deal with yet
+ # These override the UserString implementation, since UserString
+ # uses our __class__ attribute to try and build a new message
+ # after running the inner data string through the operation.
+ # At that point, we have lost the gettext message id and can just
+ # safely resolve to a string instead.
+ ops = ['capitalize', 'center', 'decode', 'encode',
+ 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
+ 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
+ if name in ops:
+ return getattr(self.data, name)
+ else:
+ return UserString.UserString.__getattribute__(self, name)
+
+
+class LocaleHandler(logging.Handler):
+ """Handler that can have a locale associated to translate Messages.
+
+ A quick example of how to utilize the Message class above.
+ LocaleHandler takes a locale and a target logging.Handler object
+ to forward LogRecord objects to after translating the internal Message.
+ """
+
+ def __init__(self, locale, target):
+ """Initialize a LocaleHandler
+
+ :param locale: locale to use for translating messages
+ :param target: logging.Handler object to forward
+ LogRecord objects to after translation
+ """
+ logging.Handler.__init__(self)
+ self.locale = locale
+ self.target = target
+
+ def emit(self, record):
+ if isinstance(record.msg, Message):
+ # set the locale and resolve to a string
+ record.msg.locale = self.locale
+
+ self.target.emit(record)
diff --git a/nova/openstack/common/plugin/__init__.py b/nova/openstack/common/plugin/__init__.py
deleted file mode 100644
index b706747cf..000000000
--- a/nova/openstack/common/plugin/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/nova/openstack/common/plugin/callbackplugin.py b/nova/openstack/common/plugin/callbackplugin.py
deleted file mode 100644
index c08ee69fb..000000000
--- a/nova/openstack/common/plugin/callbackplugin.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from nova.openstack.common.plugin import plugin
-
-
-LOG = logging.getLogger(__name__)
-
-
-class _CallbackNotifier(object):
- """Manages plugin-defined notification callbacks.
-
- For each Plugin, a CallbackNotifier will be added to the
- notification driver list. Calls to notify() with appropriate
- messages will be hooked and prompt callbacks.
-
- A callback should look like this:
- def callback(context, message, user_data)
- """
-
- def __init__(self):
- self._callback_dict = {}
-
- def _add_callback(self, event_type, callback, user_data):
- callback_list = self._callback_dict.get(event_type, [])
- callback_list.append({'function': callback,
- 'user_data': user_data})
- self._callback_dict[event_type] = callback_list
-
- def _remove_callback(self, callback):
- for callback_list in self._callback_dict.values():
- for entry in callback_list:
- if entry['function'] == callback:
- callback_list.remove(entry)
-
- def notify(self, context, message):
- if message.get('event_type') not in self._callback_dict:
- return
-
- for entry in self._callback_dict[message.get('event_type')]:
- entry['function'](context, message, entry.get('user_data'))
-
- def callbacks(self):
- return self._callback_dict
-
-
-class CallbackPlugin(plugin.Plugin):
- """ Plugin with a simple callback interface.
-
- This class is provided as a convenience for producing a simple
- plugin that only watches a couple of events. For example, here's
- a subclass which prints a line the first time an instance is created.
-
- class HookInstanceCreation(CallbackPlugin):
-
- def __init__(self, _service_name):
- super(HookInstanceCreation, self).__init__()
- self._add_callback(self.magic, 'compute.instance.create.start')
-
- def magic(self):
- print "An instance was created!"
- self._remove_callback(self, self.magic)
- """
-
- def __init__(self, service_name):
- super(CallbackPlugin, self).__init__(service_name)
- self._callback_notifier = _CallbackNotifier()
- self._add_notifier(self._callback_notifier)
-
- def _add_callback(self, callback, event_type, user_data=None):
- """Add callback for a given event notification.
-
- Subclasses can call this as an alternative to implementing
- a fullblown notify notifier.
- """
- self._callback_notifier._add_callback(event_type, callback, user_data)
-
- def _remove_callback(self, callback):
- """Remove all notification callbacks to specified function."""
- self._callback_notifier._remove_callback(callback)
diff --git a/nova/openstack/common/plugin/plugin.py b/nova/openstack/common/plugin/plugin.py
deleted file mode 100644
index 4d6d9a464..000000000
--- a/nova/openstack/common/plugin/plugin.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Plugin(object):
- """Defines an interface for adding functionality to an OpenStack service.
-
- A plugin interacts with a service via the following pathways:
-
- - An optional set of notifiers, managed by calling add_notifier()
- or by overriding _notifiers()
-
- - A set of api extensions, managed via add_api_extension_descriptor()
-
- - Direct calls to service functions.
-
- - Whatever else the plugin wants to do on its own.
-
- This is the reference implementation.
- """
-
- # The following functions are provided as convenience methods
- # for subclasses. Subclasses should call them but probably not
- # override them.
- def _add_api_extension_descriptor(self, descriptor):
- """Subclass convenience method which adds an extension descriptor.
-
- Subclass constructors should call this method when
- extending a project's REST interface.
-
- Note that once the api service has loaded, the
- API extension set is more-or-less fixed, so
- this should mainly be called by subclass constructors.
- """
- self._api_extension_descriptors.append(descriptor)
-
- def _add_notifier(self, notifier):
- """Subclass convenience method which adds a notifier.
-
- Notifier objects should implement the function notify(message).
- Each notifier receives a notify() call whenever an openstack
- service broadcasts a notification.
-
- Best to call this during construction. Notifiers are enumerated
- and registered by the pluginmanager at plugin load time.
- """
- self._notifiers.append(notifier)
-
- # The following methods are called by OpenStack services to query
- # plugin features. Subclasses should probably not override these.
- def _notifiers(self):
- """Returns list of notifiers for this plugin."""
- return self._notifiers
-
- notifiers = property(_notifiers)
-
- def _api_extension_descriptors(self):
- """Return a list of API extension descriptors.
-
- Called by a project API during its load sequence.
- """
- return self._api_extension_descriptors
-
- api_extension_descriptors = property(_api_extension_descriptors)
-
- # Most plugins will override this:
- def __init__(self, service_name):
- self._notifiers = []
- self._api_extension_descriptors = []
diff --git a/nova/openstack/common/plugin/pluginmanager.py b/nova/openstack/common/plugin/pluginmanager.py
deleted file mode 100644
index e651546fe..000000000
--- a/nova/openstack/common/plugin/pluginmanager.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pkg_resources
-
-from oslo.config import cfg
-
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
-from nova.openstack.common.notifier import api as notifier_api
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-class PluginManager(object):
- """Manages plugin entrypoints and loading.
-
- For a service to implement this plugin interface for callback purposes:
-
- - Make use of the openstack-common notifier system
- - Instantiate this manager in each process (passing in
- project and service name)
-
- For an API service to extend itself using this plugin interface,
- it needs to query the plugin_extension_factory provided by
- the already-instantiated PluginManager.
- """
-
- def __init__(self, project_name, service_name):
- """ Construct Plugin Manager; load and initialize plugins.
-
- project_name (e.g. 'nova' or 'glance') is used
- to construct the entry point that identifies plugins.
-
- The service_name (e.g. 'compute') is passed on to
- each plugin as a raw string for it to do what it will.
- """
- self._project_name = project_name
- self._service_name = service_name
- self.plugins = []
-
- def load_plugins(self):
- self.plugins = []
-
- for entrypoint in pkg_resources.iter_entry_points('%s.plugin' %
- self._project_name):
- try:
- pluginclass = entrypoint.load()
- plugin = pluginclass(self._service_name)
- self.plugins.append(plugin)
- except Exception as exc:
- LOG.error(_("Failed to load plugin %(plug)s: %(exc)s") %
- {'plug': entrypoint, 'exc': exc})
-
- # Register individual notifiers.
- for plugin in self.plugins:
- for notifier in plugin.notifiers:
- notifier_api.add_driver(notifier)
-
- def plugin_extension_factory(self, ext_mgr):
- for plugin in self.plugins:
- descriptors = plugin.api_extension_descriptors
- for descriptor in descriptors:
- ext_mgr.load_extension(descriptor)
diff --git a/nova/quota.py b/nova/quota.py
index a1c877ecc..d0241de1c 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -993,6 +993,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
+ return
LOG.debug(_("Committed reservations %(reservations)s") % locals())
def rollback(self, context, reservations, project_id=None):
@@ -1015,6 +1016,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % locals())
+ return
LOG.debug(_("Rolled back reservations %(reservations)s") % locals())
def usage_reset(self, context, resources):
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index 028c72ff7..b707f424c 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -15,19 +15,26 @@
"""Utility methods for scheduling."""
from nova.compute import flavors
+from nova import db
from nova.openstack.common import jsonutils
-def build_request_spec(image, instances):
+def build_request_spec(ctxt, image, instances):
"""Build a request_spec for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
type.
"""
instance = instances[0]
+ instance_type = flavors.extract_flavor(instance)
+ # NOTE(comstud): This is a bit ugly, but will get cleaned up when
+ # we're passing an InstanceType internal object.
+ extra_specs = db.instance_type_extra_specs_get(ctxt,
+ instance_type['flavorid'])
+ instance_type['extra_specs'] = extra_specs
request_spec = {
'image': image,
'instance_properties': instance,
- 'instance_type': flavors.extract_flavor(instance),
+ 'instance_type': instance_type,
'instance_uuids': [inst['uuid'] for inst in instances]}
return jsonutils.to_primitive(request_spec)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 543bf4a62..22a6947f2 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -46,6 +46,7 @@ from nova.network import api as network_api
from nova.network import quantumv2
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
+from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack.compute.contrib import (
test_quantum_security_groups as test_quantum)
@@ -880,6 +881,7 @@ class CloudTestCase(test.TestCase):
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1),
'system_metadata': sys_meta
@@ -891,6 +893,7 @@ class CloudTestCase(test.TestCase):
'instance_type_id': 1,
'host': 'host2',
'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2),
'system_metadata': sys_meta
@@ -2442,6 +2445,7 @@ class CloudTestCase(test.TestCase):
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
@@ -2492,6 +2496,7 @@ class CloudTestCase(test.TestCase):
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
@@ -2501,6 +2506,7 @@ class CloudTestCase(test.TestCase):
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
}
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index 5e64bdf94..39eebbcd1 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -26,6 +26,7 @@ from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
+from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
@@ -41,6 +42,7 @@ INSTANCE = {
"tenant_id": 'fake_tenant_id',
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "launched_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"security_groups": [{"id": 1, "name": "test"}],
"progress": 0,
"image_ref": 'http://foo.com/123',
@@ -61,7 +63,7 @@ def fake_compute_api_raises_invalid_state(*args, **kwargs):
def fake_compute_api_get(self, context, instance_id):
return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE,
- 'task_state': None}
+ 'task_state': None, 'launched_at': timeutils.utcnow()}
class AdminActionsTest(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
index 816bac565..f2ebf9d78 100644
--- a/nova/tests/api/openstack/compute/contrib/test_evacuate.py
+++ b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
@@ -76,8 +76,8 @@ class EvacuateTest(test.TestCase):
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
- uuid = self.UUID
- req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
@@ -105,8 +105,8 @@ class EvacuateTest(test.TestCase):
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
- uuid = self.UUID
- req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
@@ -132,8 +132,8 @@ class EvacuateTest(test.TestCase):
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
- uuid = self.UUID
- req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
@@ -161,8 +161,8 @@ class EvacuateTest(test.TestCase):
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
- uuid = self.UUID
- req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
@@ -184,8 +184,8 @@ class EvacuateTest(test.TestCase):
def test_not_admin(self):
ctxt = context.RequestContext('fake', 'fake', is_admin=False)
app = fakes.wsgi_app(fake_auth_context=ctxt)
- uuid = self.UUID
- req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
index df2c3d392..459dae932 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
@@ -218,3 +218,18 @@ class FlavorManageTest(test.TestCase):
req.body = jsonutils.dumps(expected)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
+
+ def test_invalid_memory_mb(self):
+ """Check negative and decimal number can't be accepted."""
+
+ self.stubs.UnsetAll()
+ self.assertRaises(exception.InvalidInput, flavors.create, "abc",
+ -512, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
+ 512.2, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
+ None, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
+ 512, 2, None, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
+ "test_memory_mb", 2, None, 1, 1234, 512, 1, True)
diff --git a/nova/tests/api/openstack/compute/extensions/test_plugin_api_extensions.py b/nova/tests/api/openstack/compute/extensions/test_plugin_api_extensions.py
deleted file mode 100644
index 3aac638c6..000000000
--- a/nova/tests/api/openstack/compute/extensions/test_plugin_api_extensions.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pkg_resources
-
-from nova.api.openstack.compute import extensions as computeextensions
-from nova.api.openstack import extensions
-from nova.openstack.common.plugin import plugin
-from nova import test
-
-
-class StubController(object):
-
- def i_am_the_stub(self):
- pass
-
-
-class StubControllerExtension(extensions.ExtensionDescriptor):
- """This is a docstring. We need it."""
- name = 'stubextension'
- alias = 'stubby'
-
- def get_resources(self):
- resources = []
- res = extensions.ResourceExtension('testme',
- StubController())
- resources.append(res)
- return resources
-
-
-service_list = []
-
-
-class TestPluginClass(plugin.Plugin):
-
- def __init__(self, service_name):
- super(TestPluginClass, self).__init__(service_name)
- self._add_api_extension_descriptor(StubControllerExtension)
- service_list.append(service_name)
-
-
-class MockEntrypoint(pkg_resources.EntryPoint):
- def load(self):
- return TestPluginClass
-
-
-class APITestCase(test.TestCase):
- """Test case for the plugin api extension interface."""
- def test_add_extension(self):
- def mock_load(_s):
- return TestPluginClass()
-
- def mock_iter_entry_points(_t):
- return [MockEntrypoint("fake", "fake", ["fake"])]
-
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- mock_iter_entry_points)
- global service_list
- service_list = []
-
- # Marking out the default extension paths makes this test MUCH faster.
- self.flags(osapi_compute_extension=[])
-
- found = False
- mgr = computeextensions.ExtensionManager()
- for res in mgr.get_resources():
- # We have to use this weird 'dir' check because
- # the plugin framework muddies up the classname
- # such that 'isinstance' doesn't work right.
- if 'i_am_the_stub' in dir(res.controller):
- found = True
-
- self.assertTrue(found)
- self.assertEqual(len(service_list), 1)
- self.assertEqual(service_list[0], 'compute-extensions')
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_certificates.py b/nova/tests/api/openstack/compute/plugins/v3/test_certificates.py
new file mode 100644
index 000000000..222087872
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_certificates.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from nova.api.openstack.compute.plugins.v3 import certificates
+from nova import context
+from nova.openstack.common import rpc
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+def fake_get_root_cert(context, *args, **kwargs):
+ return 'fakeroot'
+
+
+def fake_create_cert(context, *args, **kwargs):
+ return 'fakepk', 'fakecert'
+
+
+class CertificatesTest(test.TestCase):
+ def setUp(self):
+ super(CertificatesTest, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.controller = certificates.CertificatesController()
+
+ def test_translate_certificate_view(self):
+ pk, cert = fake_create_cert(self.context)
+ view = certificates._translate_certificate_view(cert, pk)
+ self.assertEqual(view['data'], cert)
+ self.assertEqual(view['private_key'], pk)
+
+ def test_certificates_show_root(self):
+ self.stubs.Set(rpc, 'call', fake_get_root_cert)
+ req = fakes.HTTPRequestV3.blank('/os-certificates/root')
+ res_dict = self.controller.show(req, 'root')
+
+ cert = fake_get_root_cert(self.context)
+ response = {'certificate': {'data': cert, 'private_key': None}}
+ self.assertEqual(res_dict, response)
+
+ def test_certificates_create_certificate(self):
+ self.stubs.Set(rpc, 'call', fake_create_cert)
+ req = fakes.HTTPRequestV3.blank('/os-certificates/')
+ res_dict = self.controller.create(req)
+
+ pk, cert = fake_create_cert(self.context)
+ response = {'certificate': {'data': cert, 'private_key': pk}}
+ self.assertEqual(res_dict, response)
+
+
+class CertificatesSerializerTest(test.TestCase):
+ def test_index_serializer(self):
+ serializer = certificates.CertificateTemplate()
+ text = serializer.serialize(dict(
+ certificate=dict(
+ data='fakecert',
+ private_key='fakepk'),
+ ))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('certificate', tree.tag)
+ self.assertEqual('fakepk', tree.get('private_key'))
+ self.assertEqual('fakecert', tree.get('data'))
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py b/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py
new file mode 100644
index 000000000..72a531277
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py
@@ -0,0 +1,198 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo.config import cfg
+import webob
+
+from nova.compute import api as compute_api
+from nova.compute import vm_states
+from nova import context
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def fake_compute_api(*args, **kwargs):
+ return True
+
+
+def fake_compute_api_get(self, context, instance_id):
+ return {
+ 'id': 1,
+ 'uuid': instance_id,
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None, 'host': 'host1'
+ }
+
+
+class EvacuateTest(test.TestCase):
+
+ _methods = ('resize', 'evacuate')
+
+ def setUp(self):
+ super(EvacuateTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.UUID = uuid.uuid4()
+ for _method in self._methods:
+ self.stubs.Set(compute_api.API, _method, fake_compute_api)
+
+ def test_evacuate_instance_with_no_target(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v3/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_target(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'false',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ resp = req.get_response(app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("MyNewPass", resp_json['adminPass'])
+
+ def test_evacuate_shared_and_pass(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'True',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_not_shared_pass_generated(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'False',
+ }
+ })
+
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ resp = req.get_response(app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
+
+ def test_evacuate_shared(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'True',
+ }
+ })
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 200)
+
+ def test_not_admin(self):
+ ctxt = context.RequestContext('fake', 'fake', is_admin=False)
+ app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
+ uuid1 = self.UUID
+ req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'True',
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 403)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py b/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py
new file mode 100644
index 000000000..75733e50f
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py
@@ -0,0 +1,126 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+import webob
+
+from nova import compute
+from nova import exception
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def rescue(self, context, instance, rescue_password=None):
+ pass
+
+
+def unrescue(self, context, instance):
+ pass
+
+
+class RescueTest(test.TestCase):
+ def setUp(self):
+ super(RescueTest, self).setUp()
+
+ def fake_compute_get(*args, **kwargs):
+ uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+ return {'id': 1, 'uuid': uuid}
+
+ self.stubs.Set(compute.api.API, "get", fake_compute_get)
+ self.stubs.Set(compute.api.API, "rescue", rescue)
+ self.stubs.Set(compute.api.API, "unrescue", unrescue)
+ self.app = fakes.wsgi_app_v3(init_only=('servers', 'os-rescue'))
+
+ def test_rescue_with_preset_password(self):
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req = webob.Request.blank('/v3/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("AABBCC112233", resp_json['adminPass'])
+
+ def test_rescue_generates_password(self):
+ body = dict(rescue=None)
+ req = webob.Request.blank('/v3/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
+
+ def test_rescue_of_rescued_instance(self):
+ body = dict(rescue=None)
+
+ def fake_rescue(*args, **kwargs):
+ raise exception.InstanceInvalidState('fake message')
+
+ self.stubs.Set(compute.api.API, "rescue", fake_rescue)
+ req = webob.Request.blank('/v3/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_unrescue(self):
+ body = dict(unrescue=None)
+ req = webob.Request.blank('/v3/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_unrescue_of_active_instance(self):
+ body = dict(unrescue=None)
+
+ def fake_unrescue(*args, **kwargs):
+ raise exception.InstanceInvalidState('fake message')
+
+ self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
+ req = webob.Request.blank('/v3/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_rescue_raises_unrescuable(self):
+ body = dict(rescue=None)
+
+ def fake_rescue(*args, **kwargs):
+ raise exception.InstanceNotRescuable('fake message')
+
+ self.stubs.Set(compute.api.API, "rescue", fake_rescue)
+ req = webob.Request.blank('/v3/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index fa25ad4a3..f0548ffa0 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -26,6 +26,7 @@ from nova.compute import vm_states
import nova.db
from nova import exception
from nova.openstack.common import jsonutils
+from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
@@ -77,6 +78,7 @@ def return_server(context, server_id):
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
+ 'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE}
@@ -85,6 +87,7 @@ def return_server_by_uuid(context, server_uuid):
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
+ 'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE}
diff --git a/nova/tests/api/openstack/compute/test_v3_extensions.py b/nova/tests/api/openstack/compute/test_v3_extensions.py
index 97429ca45..ec472f38a 100644
--- a/nova/tests/api/openstack/compute/test_v3_extensions.py
+++ b/nova/tests/api/openstack/compute/test_v3_extensions.py
@@ -16,10 +16,12 @@
from oslo.config import cfg
import stevedore
+import webob.exc
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
+from nova.api.openstack import extensions
from nova import exception
from nova import test
@@ -139,3 +141,38 @@ class ExtensionLoadingTestCase(test.TestCase):
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV3)
+
+ def test_extensions_expected_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise webob.exc.HTTPNotFound()
+
+ self.assertRaises(webob.exc.HTTPNotFound, fake_func)
+
+ def test_extensions_expected_error_from_list(self):
+ @extensions.expected_errors((404, 403))
+ def fake_func():
+ raise webob.exc.HTTPNotFound()
+
+ self.assertRaises(webob.exc.HTTPNotFound, fake_func)
+
+ def test_extensions_unexpected_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise webob.exc.HTTPConflict()
+
+ self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
+
+ def test_extensions_unexpected_error_from_list(self):
+ @extensions.expected_errors((404, 413))
+ def fake_func():
+ raise webob.exc.HTTPConflict()
+
+ self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
+
+ def test_extensions_unexpected_policy_not_authorized_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise exception.PolicyNotAuthorized(action="foo")
+
+ self.assertRaises(exception.PolicyNotAuthorized, fake_func)
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
index 9cd637cdf..46be492cc 100644
--- a/nova/tests/cells/test_cells_scheduler.py
+++ b/nova/tests/cells/test_cells_scheduler.py
@@ -84,10 +84,9 @@ class CellsSchedulerTestCase(test.TestCase):
'block_device_mapping': 'fake_bdm'}
self.build_inst_kwargs = {
'instances': self.instances,
- 'instance_type': 'fake_type',
'image': 'fake_image',
- 'filter_properties': {},
- 'security_group': 'fake_sec_groups',
+ 'filter_properties': {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
def test_create_instances_here(self):
@@ -178,7 +177,7 @@ class CellsSchedulerTestCase(test.TestCase):
call_info['target_cell'] = target_cell
call_info['build_inst_kwargs'] = build_inst_kwargs
- def fake_build_request_spec(image, instances):
+ def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
@@ -264,7 +263,7 @@ class CellsSchedulerTestCase(test.TestCase):
def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
call_info['build_inst_kwargs'] = build_inst_kwargs
- def fake_build_request_spec(image, instances):
+ def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
@@ -284,10 +283,11 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.build_inst_kwargs['instances'][0],
call_info['instance_properties'])
- self.assertEqual(self.build_inst_kwargs['instance_type'],
- call_info['instance_type'])
+ self.assertEqual(
+ self.build_inst_kwargs['filter_properties']['instance_type'],
+ call_info['instance_type'])
self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
- self.assertEqual(self.build_inst_kwargs['security_group'],
+ self.assertEqual(self.build_inst_kwargs['security_groups'],
call_info['security_groups'])
self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
call_info['block_device_mapping'])
@@ -341,7 +341,7 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(vm_states.ERROR, values['vm_state'])
call_info['errored_uuids'].append(instance_uuid)
- def fake_build_request_spec(image, instances):
+ def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
@@ -385,7 +385,7 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(vm_states.ERROR, instance['vm_state'])
call_info['errored_uuids2'].append(instance['uuid'])
- def fake_build_request_spec(image, instances):
+ def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index abb4d8e39..7953f8b63 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -259,6 +259,9 @@ class BaseTestCase(test.TestCase):
inst['os_type'] = 'Linux'
inst['system_metadata'] = make_fake_sys_meta()
inst['locked'] = False
+ inst['created_at'] = timeutils.utcnow()
+ inst['updated_at'] = timeutils.utcnow()
+ inst['launched_at'] = timeutils.utcnow()
inst.update(params)
_create_service_entries(self.context.elevated(),
{'fake_zone': [inst['host']]})
@@ -1272,6 +1275,7 @@ class ComputeTestCase(BaseTestCase):
def test_run_terminate_timestamps(self):
# Make sure timestamps are set for launched and destroyed.
instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance['launched_at'] = None
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
launch = timeutils.utcnow()
@@ -2281,6 +2285,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
+ self.assertTrue('fixed_ips' in payload)
self.assertTrue(payload['launched_at'])
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
@@ -4357,9 +4362,11 @@ class ComputeTestCase(BaseTestCase):
instance_map = {}
instances = []
for x in xrange(5):
- uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': CONF.host}
- instances.append(instance_map[uuid])
+ inst_uuid = 'fake-uuid-%s' % x
+ instance_map[inst_uuid] = fake_instance.fake_db_instance(
+ uuid=inst_uuid, host=CONF.host, created_at=None)
+ # These won't be in our instance since they're not requested
+ instances.append(instance_map[inst_uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
'get_nw_info': 0, 'expected_instance': None}
@@ -4369,7 +4376,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(columns_to_join, [])
return instances[:]
- def fake_instance_get_by_uuid(context, instance_uuid):
+ def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
@@ -4381,12 +4388,13 @@ class ComputeTestCase(BaseTestCase):
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
- self.assertEqual(call_info['expected_instance'], instance)
+ self.assertEqual(call_info['expected_instance']['uuid'],
+ instance['uuid'])
call_info['get_nw_info'] += 1
- self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
+ self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
- self.stubs.Set(self.compute.conductor_api, 'instance_get_by_uuid',
+ self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
@@ -4461,19 +4469,27 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(instance)
def test_poll_unconfirmed_resizes(self):
- instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESIZED,
- 'task_state': None},
- {'uuid': 'noexist'},
- {'uuid': 'fake_uuid2', 'vm_state': vm_states.ERROR,
- 'task_state': None},
- {'uuid': 'fake_uuid3', 'vm_state': vm_states.ACTIVE,
- 'task_state': task_states.REBOOTING},
- {'uuid': 'fake_uuid4', 'vm_state': vm_states.RESIZED,
- 'task_state': None},
- {'uuid': 'fake_uuid5', 'vm_state': vm_states.ACTIVE,
- 'task_state': None},
- {'uuid': 'fake_uuid6', 'vm_state': vm_states.RESIZED,
- 'task_state': 'deleting'}]
+ instances = [
+ fake_instance.fake_db_instance(uuid='fake_uuid1',
+ vm_state=vm_states.RESIZED,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='noexist'),
+ fake_instance.fake_db_instance(uuid='fake_uuid2',
+ vm_state=vm_states.ERROR,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid3',
+ vm_state=vm_states.ACTIVE,
+ task_state=
+ task_states.REBOOTING),
+ fake_instance.fake_db_instance(uuid='fake_uuid4',
+ vm_state=vm_states.RESIZED,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid5',
+ vm_state=vm_states.ACTIVE,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid6',
+ vm_state=vm_states.RESIZED,
+ task_state='deleting')]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
@@ -5734,6 +5750,21 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_delete_if_not_launched(self):
+ instance, instance_uuid = self._run_instance(params={
+ 'host': CONF.host})
+
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_states.ERROR,
+ "launched_at": None})
+
+ self.compute_api.delete(self.context, instance)
+
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_states.DELETING)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_delete_in_resizing(self):
def fake_quotas_reserve(context, expire=None, project_id=None,
**deltas):
@@ -6008,7 +6039,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
- def test_rebuild(self):
+ def _test_rebuild(self, vm_state):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -6039,6 +6070,10 @@ class ComputeAPITestCase(BaseTestCase):
image_ref = instance["image_ref"] + '-new_image_ref'
password = "new_password"
+
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_state})
+
self.compute_api.rebuild(self.context, instance, image_ref, password)
self.assertEqual(info['image_ref'], image_ref)
@@ -6053,6 +6088,34 @@ class ComputeAPITestCase(BaseTestCase):
'preserved': 'preserve this!'})
db.instance_destroy(self.context, instance['uuid'])
+ def test_rebuild(self):
+ # Test we can rebuild an instance in the Error State
+ self._test_rebuild(vm_state=vm_states.ACTIVE)
+
+ def test_rebuild_in_error_state(self):
+ # Test we can rebuild an instance in the Error State
+ self._test_rebuild(vm_state=vm_states.ERROR)
+
+ def test_rebuild_in_error_not_launched(self):
+ instance = jsonutils.to_primitive(
+ self._create_fake_instance(params={'image_ref': ''}))
+ instance_uuid = instance['uuid']
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.compute.run_instance(self.context, instance=instance)
+
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_states.ERROR,
+ "launched_at": None})
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ instance['image_ref'],
+ "new password")
+
def test_rebuild_no_image(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': ''}))
@@ -6199,7 +6262,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(nova.virt.fake.FakeDriver, 'legacy_nwinfo',
lambda x: False)
- def test_reboot_soft(self):
+ def _test_reboot_soft(self, vm_state):
# Ensure instance can be soft rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -6216,6 +6279,9 @@ class ComputeAPITestCase(BaseTestCase):
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['task_state'], None)
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_state})
+
reboot_type = "SOFT"
self._stub_out_reboot(device_name)
self.compute_api.reboot(self.context, inst_ref, reboot_type)
@@ -6225,7 +6291,15 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
- def test_reboot_hard(self):
+ def test_soft_reboot(self):
+ # Ensure instance can be rebooted while in error state.
+ self._test_reboot_soft(vm_state=vm_states.ACTIVE)
+
+ def test_soft_reboot_of_instance_in_error(self):
+ # Ensure instance can be rebooted while in error state.
+ self._test_reboot_soft(vm_state=vm_states.ERROR)
+
+ def test_reboot_hard(self, vm_state=vm_states.ACTIVE):
# Ensure instance can be hard rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -6242,6 +6316,9 @@ class ComputeAPITestCase(BaseTestCase):
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['task_state'], None)
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_state})
+
reboot_type = "HARD"
self._stub_out_reboot(device_name)
self.compute_api.reboot(self.context, inst_ref, reboot_type)
@@ -6251,6 +6328,10 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
+ def test_hard_reboot_of_instance_in_error(self):
+ # Ensure instance can be rebooted while in error state.
+ self.test_reboot_hard(vm_state=vm_states.ERROR)
+
def test_hard_reboot_of_soft_rebooting_instance(self):
# Ensure instance can be hard rebooted while soft rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -6287,7 +6368,7 @@ class ComputeAPITestCase(BaseTestCase):
inst_ref,
reboot_type)
- def test_soft_reboot_of_rescued_instance(self):
+ def test_reboot_of_rescued_instance(self):
# Ensure instance can't be rebooted while in rescued state.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -6311,6 +6392,33 @@ class ComputeAPITestCase(BaseTestCase):
inst_ref,
'HARD')
+ def test_reboot_of_instance_in_error_not_launched(self):
+ # Ensure instance can be not rebooted while in error states
+ # if they have never been booted at least once.
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ self.compute.run_instance(self.context, instance=instance)
+
+ inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(inst_ref['task_state'], None)
+
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_states.ERROR,
+ "launched_at": None})
+
+ inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.reboot,
+ self.context,
+ inst_ref,
+ 'SOFT')
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.reboot,
+ self.context,
+ inst_ref,
+ 'HARD')
+
def test_hostname_create(self):
# Ensure instance hostname is set during creation.
inst_type = flavors.get_flavor_by_name('m1.tiny')
@@ -7750,7 +7858,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertRaises(exception.InvalidDevicePath,
self.compute_api.attach_volume,
self.context,
- {'locked': False, 'vm_state': vm_states.ACTIVE},
+ {'locked': False, 'vm_state': vm_states.ACTIVE,
+ 'launched_at': timeutils.utcnow()},
None,
'/invalid')
@@ -8057,6 +8166,7 @@ class ComputeAPITestCase(BaseTestCase):
# Ensure exception is raised while detaching an un-attached volume
instance = {'uuid': 'uuid1',
'locked': False,
+ 'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE}
volume = {'id': 1, 'attach_status': 'detached'}
@@ -8069,6 +8179,7 @@ class ComputeAPITestCase(BaseTestCase):
# instance doesn't match.
instance = {'uuid': 'uuid1',
'locked': False,
+ 'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE}
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': 'uuid2'}
@@ -8383,6 +8494,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_fail_evacuate_from_non_existing_host(self):
inst = {}
inst['vm_state'] = vm_states.ACTIVE
+ inst['launched_at'] = timeutils.utcnow()
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 7a33cfbb9..a2a015313 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -389,7 +389,7 @@ class _BaseTestCase(object):
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
- False, mox.IgnoreArg()).AndReturn('fake-usage')
+ False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
notifier_api.notify(self.context,
'conductor.%s' % self.conductor_manager.host,
@@ -1183,18 +1183,28 @@ class _BaseTaskTestCase(object):
def test_build_instances(self):
instance_type = flavors.get_default_flavor()
system_metadata = flavors.save_flavor_info({}, instance_type)
- # NOTE(alaski): instance_type -> system_metadata -> instance_type loses
- # some data (extra_specs) so we need both for testing.
- instance_type_extract = flavors.extract_flavor(
+ # NOTE(alaski): instance_type -> system_metadata -> instance_type
+ # loses some data (extra_specs). This build process is using
+ # scheduler/utils:build_request_spec() which extracts flavor from
+ # system_metadata and will re-query the DB for extra_specs.. so
+ # we need to test this properly
+ expected_instance_type = flavors.extract_flavor(
{'system_metadata': system_metadata})
+ expected_instance_type['extra_specs'] = 'fake-specs'
+
+ self.mox.StubOutWithMock(db, 'instance_type_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'run_instance')
+
+ db.instance_type_extra_specs_get(
+ self.context,
+ instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_rpcapi.run_instance(self.context,
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': {'system_metadata': system_metadata,
'uuid': 'fakeuuid'},
- 'instance_type': instance_type_extract,
+ 'instance_type': expected_instance_type,
'instance_uuids': ['fakeuuid', 'fakeuuid2'],
'block_device_mapping': 'block_device_mapping',
'security_group': 'security_groups'},
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index 81b15b4d5..279481d87 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -1299,10 +1299,11 @@ class ModelsObjectComparatorMixin(object):
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
- self.assertEqual(len(objs1), len(objs2))
- objs2 = dict([(o['id'], o) for o in objs2])
- for o1 in objs1:
- self._assertEqualObjects(o1, objs2[o1['id']], ignored_keys)
+ obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
+ sort_key = lambda d: [d[k] for k in sorted(d)]
+ conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
+
+ self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index ed8cc7424..d4264153c 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -112,6 +112,7 @@ policy_data = """
"compute_extension:baremetal_nodes": "",
"compute_extension:cells": "",
"compute_extension:certificates": "",
+ "compute_extension:v3:os-certificates": "",
"compute_extension:cloudpipe": "",
"compute_extension:cloudpipe_update": "",
"compute_extension:config_drive": "",
@@ -122,6 +123,7 @@ policy_data = """
"compute_extension:deferred_delete": "",
"compute_extension:disk_config": "",
"compute_extension:evacuate": "is_admin:True",
+ "compute_extension:v3:os-evacuate": "is_admin:True",
"compute_extension:extended_server_attributes": "",
"compute_extension:extended_status": "",
"compute_extension:extended_availability_zone": "",
@@ -166,6 +168,7 @@ policy_data = """
"compute_extension:quotas:delete": "",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
+ "compute_extension:v3:os-rescue": "",
"compute_extension:security_group_default_rules": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "",
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 7f21a2aa9..d97dcae57 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -1357,6 +1357,13 @@ class TestQuantumv2(test.TestCase):
self.assertEqual(nw_info[0]['type'], model.VIF_TYPE_BRIDGE)
self.assertEqual(nw_info[0]['network']['bridge'], 'brqnet-id')
+ def test_get_all_empty_list_networks(self):
+ api = quantumapi.API()
+ self.moxed_client.list_networks().AndReturn({'networks': []})
+ self.mox.ReplayAll()
+ networks = api.get_all(self.context)
+ self.assertEqual(networks, [])
+
class TestQuantumv2ModuleMethods(test.TestCase):
def test_ensure_requested_network_ordering_no_preference_ids(self):
diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py
index a55c5d502..09ce70355 100644
--- a/nova/tests/objects/test_instance.py
+++ b/nova/tests/objects/test_instance.py
@@ -90,7 +90,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, 'uuid', []).AndReturn(self.fake_instance)
self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(ctxt, uuid='uuid')
+ inst = instance.Instance.get_by_uuid(ctxt, 'uuid')
# Make sure these weren't loaded
for attr in instance.INSTANCE_OPTIONAL_FIELDS:
attrname = base.get_attrname(attr)
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index e8dff9a4a..98cc1c3f9 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import httplib2
import urlparse
from cinderclient import exceptions as cinder_exception
@@ -76,9 +75,9 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
status, body = getattr(self, callback)(**kwargs)
if hasattr(status, 'items'):
- return httplib2.Response(status), body
+ return status, body
else:
- return httplib2.Response({"status": status}), body
+ return {"status": status}, body
def get_volumes_1234(self, **kw):
volume = {'volume': _stub_volume(id='1234')}
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
index d1d659fe3..7118aa938 100644
--- a/nova/tests/test_wsgi.py
+++ b/nova/tests/test_wsgi.py
@@ -23,8 +23,7 @@ import tempfile
import testtools
import eventlet
-import httplib2
-import paste
+import requests
import nova.exception
from nova import test
@@ -119,16 +118,16 @@ class TestWSGIServer(test.TestCase):
server.start()
uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
- resp, _ = httplib2.Http().request(uri)
+ resp = requests.get(uri)
eventlet.sleep(0)
- self.assertNotEqual(resp.status,
- paste.httpexceptions.HTTPRequestURITooLong.code)
+ self.assertNotEqual(resp.status_code,
+ requests.codes.REQUEST_URI_TOO_LARGE)
uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
- resp, _ = httplib2.Http().request(uri)
+ resp = requests.get(uri)
eventlet.sleep(0)
- self.assertEqual(resp.status,
- paste.httpexceptions.HTTPRequestURITooLong.code)
+ self.assertEqual(resp.status_code,
+ requests.codes.REQUEST_URI_TOO_LARGE)
server.stop()
server.wait()
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 994e4f220..556d1f91d 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -212,5 +212,5 @@ def is_ipv6_supported():
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
- raise e
+ raise
return has_ipv6_support
diff --git a/nova/tests/virt/baremetal/test_pxe.py b/nova/tests/virt/baremetal/test_pxe.py
index 022f9c692..cd4e5c143 100644
--- a/nova/tests/virt/baremetal/test_pxe.py
+++ b/nova/tests/virt/baremetal/test_pxe.py
@@ -116,6 +116,7 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
'deployment_ari_path': 'eee',
'aki_path': 'fff',
'ari_path': 'ggg',
+ 'network_info': self.test_network_info,
}
config = pxe.build_pxe_config(**args)
self.assertThat(config, matchers.StartsWith('default deploy'))
@@ -140,6 +141,21 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
matchers.Not(matchers.Contains('kernel ddd')),
))
+ def test_build_pxe_network_config(self):
+ self.flags(
+ pxe_network_config=True,
+ group='baremetal',
+ )
+ net = utils.get_test_network_info(1)
+ config = pxe.build_pxe_network_config(net)
+ self.assertIn('eth0:off', config)
+ self.assertNotIn('eth1', config)
+
+ net = utils.get_test_network_info(2)
+ config = pxe.build_pxe_network_config(net)
+ self.assertIn('eth0:off', config)
+ self.assertIn('eth1:off', config)
+
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = pxe.build_network_config(net)
@@ -458,7 +474,8 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
bm_utils.random_alnum(32).AndReturn('alnum')
pxe.build_pxe_config(
self.node['id'], 'alnum', iqn,
- 'aaaa', 'bbbb', 'cccc', 'dddd').AndReturn(pxe_config)
+ 'aaaa', 'bbbb', 'cccc', 'dddd',
+ self.test_network_info).AndReturn(pxe_config)
bm_utils.write_to_file(pxe_path, pxe_config)
for mac in macs:
bm_utils.create_link_without_raise(
@@ -466,7 +483,8 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
self.mox.ReplayAll()
- self.driver.activate_bootloader(self.context, self.node, self.instance)
+ self.driver.activate_bootloader(self.context, self.node, self.instance,
+ network_info=self.test_network_info)
self.mox.VerifyAll()
@@ -515,8 +533,8 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
- self.driver.activate_bootloader(self.context, self.node,
- self.instance)
+ self.driver.activate_bootloader(self.context, self.node, self.instance,
+ network_info=self.test_network_info)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is not None)
diff --git a/nova/tests/virt/baremetal/test_tilera.py b/nova/tests/virt/baremetal/test_tilera.py
index 488cba4df..7ad5c4b6a 100755
--- a/nova/tests/virt/baremetal/test_tilera.py
+++ b/nova/tests/virt/baremetal/test_tilera.py
@@ -317,7 +317,8 @@ class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
self.mox.ReplayAll()
- self.driver.activate_bootloader(self.context, self.node, self.instance)
+ self.driver.activate_bootloader(self.context, self.node, self.instance,
+ network_info=self.test_network_info)
self.mox.VerifyAll()
@@ -334,8 +335,8 @@ class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
- self.driver.activate_bootloader(self.context, self.node,
- self.instance)
+ self.driver.activate_bootloader(self.context, self.node, self.instance,
+ network_info=self.test_network_info)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is not None)
diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py
index 674d84882..d2b5477eb 100644
--- a/nova/tests/virt/xenapi/test_vmops.py
+++ b/nova/tests/virt/xenapi/test_vmops.py
@@ -171,6 +171,17 @@ class VMOpsTestCase(test.TestCase):
self.assertEqual(self.make_plugin_call_count, 1)
+ def test_destroy_raises_when_shutdown_fails(self):
+ vm_ref = "vm_reference"
+ fake_instance = "instance"
+
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+ vm_utils.hard_shutdown_vm(self._session, fake_instance,
+ vm_ref).AndReturn(False)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstancePowerOffFailure,
+ self._vmops._destroy, fake_instance, vm_ref)
+
class GetConsoleOutputTestCase(stubs.XenAPITestBase):
def setUp(self):
diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py
index 91d4f0770..30e75d95a 100644
--- a/nova/tests/virt/xenapi/test_xenapi.py
+++ b/nova/tests/virt/xenapi/test_xenapi.py
@@ -31,6 +31,7 @@ from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
+from nova import crypto
from nova import db
from nova import exception
from nova.openstack.common import importutils
@@ -787,6 +788,11 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
Verifies that the VM and VDIs created are properly cleaned up.
"""
+ def fake_hard_shutdown_vm(session, instance, vm_ref):
+ return True
+
+ self.stubs.Set(vm_utils, 'hard_shutdown_vm',
+ fake_hard_shutdown_vm)
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
@@ -983,13 +989,14 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
- def noop(*args, **kwargs):
- pass
-
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
- self.stubs.Set(agent.XenAPIBasedAgent,
- 'set_admin_password', noop)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ self.assertEqual("fake_keydata", sshkey)
+ return "fake"
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nfake_keydata\n')
@@ -1020,6 +1027,93 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.check_vm_params_for_linux()
self.assertEquals(actual_injected_files, injected_files)
+ def test_spawn_agent_upgrade(self):
+ self.flags(xenapi_use_agent_default=True)
+ actual_injected_files = []
+
+ def fake_agent_build(_self, *args):
+ return {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf"}
+
+ self.stubs.Set(self.conn.virtapi, 'agent_build_get_by_triple',
+ fake_agent_build)
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+
+ def test_spawn_agent_upgrade_fails_silently(self):
+ self.flags(xenapi_use_agent_default=True)
+ actual_injected_files = []
+
+ def fake_agent_build(_self, *args):
+ return {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf"}
+
+ self.stubs.Set(self.conn.virtapi, 'agent_build_get_by_triple',
+ fake_agent_build)
+
+ def fake_agent_update(self, method, args):
+ raise xenapi_fake.Failure(["fake_error"])
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_agentupdate', fake_agent_update)
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+
+ def _test_spawn_fails_with(self, trigger, expected_exception):
+ self.flags(xenapi_use_agent_default=True)
+ self.flags(agent_version_timeout=0)
+ actual_injected_files = []
+
+ def fake_agent_version(self, method, args):
+ raise xenapi_fake.Failure([trigger])
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_version', fake_agent_version)
+
+ self.assertRaises(expected_exception, self._test_spawn,
+ IMAGE_VHD, None, None, os_type="linux", architecture="x86-64")
+
+ def test_spawn_fails_with_agent_timeout(self):
+ self._test_spawn_fails_with("TIMEOUT:fake", exception.AgentTimeout)
+
+ def test_spawn_fails_with_agent_not_implemented(self):
+ self._test_spawn_fails_with("NOT IMPLEMENTED:fake",
+ exception.AgentNotImplemented)
+
+ def test_spawn_fails_with_agent_error(self):
+ self._test_spawn_fails_with("fake_error", exception.AgentError)
+
+ def test_spawn_fails_with_agent_bad_return(self):
+ self.flags(xenapi_use_agent_default=True)
+ self.flags(agent_version_timeout=0)
+ actual_injected_files = []
+
+ def fake_agent_version(self, method, args):
+ return xenapi_fake.as_json(returncode='-1', message='fake')
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_version', fake_agent_version)
+
+ self.assertRaises(exception.AgentError, self._test_spawn,
+ IMAGE_VHD, None, None, os_type="linux", architecture="x86-64")
+
+ def test_spawn_fails_agent_not_implemented(self):
+ # Test spawning with injected_files.
+ self.flags(xenapi_use_agent_default=True)
+ self.flags(agent_version_timeout=0)
+ actual_injected_files = []
+
+ def fake_agent_version(self, method, args):
+ raise xenapi_fake.Failure(["NOT IMPLEMENTED:fake"])
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_version', fake_agent_version)
+
+ self.assertRaises(exception.AgentNotImplemented, self._test_spawn,
+ IMAGE_VHD, None, None, os_type="linux", architecture="x86-64")
+
def test_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
index 2029400ba..876c70b23 100644
--- a/nova/virt/baremetal/base.py
+++ b/nova/virt/baremetal/base.py
@@ -30,7 +30,7 @@ class NodeDriver(object):
def destroy_images(self, context, node, instance):
raise NotImplementedError()
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, **kwargs):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index 5c9c35184..3c140556e 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -297,8 +297,7 @@ def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
if str(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
"already in use") % vif_uuid)
- else:
- raise e
+ raise
@sqlalchemy_api.require_admin_context
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 376921360..4e8543c3e 100755
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -248,7 +248,8 @@ class BareMetalDriver(driver.ComputeDriver):
injected_files=injected_files,
network_info=network_info,
)
- self.driver.activate_bootloader(context, node, instance)
+ self.driver.activate_bootloader(context, node, instance,
+ network_info=network_info)
self.power_on(instance, node)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE)
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
index b3f39fdc3..76586ab74 100644
--- a/nova/virt/baremetal/fake.py
+++ b/nova/virt/baremetal/fake.py
@@ -28,7 +28,7 @@ class FakeDriver(base.NodeDriver):
def destroy_images(self, context, node, instance):
pass
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, **kwargs):
pass
def deactivate_bootloader(self, context, node, instance):
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 6a5a5ece5..f44a5f87a 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -54,6 +54,10 @@ pxe_opts = [
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
+ cfg.BoolOpt('pxe_network_config',
+ help='If set, pass the network configuration details to the '
+ 'initramfs via cmdline.',
+ default=False),
]
LOG = logging.getLogger(__name__)
@@ -77,9 +81,22 @@ def _get_cheetah():
return CHEETAH
+def build_pxe_network_config(network_info):
+ interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
+ template = None
+ if not CONF.use_ipv6:
+ template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off"
+ else:
+ template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:"
+ "[%(netmask_v6)s]::%(name)s:off")
+
+ net_config = [template % iface for iface in interfaces]
+ return ' '.join(net_config)
+
+
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
- aki_path, ari_path):
+ aki_path, ari_path, network_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
@@ -90,6 +107,11 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
+
+ network_config = None
+ if network_info and CONF.baremetal.pxe_network_config:
+ network_config = build_pxe_network_config(network_info)
+
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
@@ -99,6 +121,7 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
+ 'pxe_network_config': network_config,
}
cheetah = _get_cheetah()
pxe_config = str(cheetah(
@@ -110,33 +133,7 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
def build_network_config(network_info):
- # TODO(deva): fix assumption that device names begin with "eth"
- # and fix assumption about ordering
- try:
- assert isinstance(network_info, list)
- except AssertionError:
- network_info = [network_info]
- interfaces = []
- for id, (network, mapping) in enumerate(network_info):
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if CONF.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- interface = {
- 'name': 'eth%d' % id,
- 'address': mapping['ips'][0]['ip'],
- 'gateway': mapping['gateway'],
- 'netmask': mapping['ips'][0]['netmask'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6,
- }
- interfaces.append(interface)
-
+ interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
@@ -354,7 +351,7 @@ class PXE(base.NodeDriver):
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, network_info):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
@@ -398,6 +395,7 @@ class PXE(base.NodeDriver):
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
+ network_info,
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
diff --git a/nova/virt/baremetal/pxe_config.template b/nova/virt/baremetal/pxe_config.template
index f2fcc9b14..54dd98baf 100644
--- a/nova/virt/baremetal/pxe_config.template
+++ b/nova/virt/baremetal/pxe_config.template
@@ -8,4 +8,4 @@ ipappend 3
label boot
kernel ${pxe_options.aki_path}
-append initrd=${pxe_options.ari_path} root=${ROOT} ro ${pxe_options.pxe_append_params}
+append initrd=${pxe_options.ari_path} root=${ROOT} ro ${pxe_options.pxe_append_params} ${pxe_options.pxe_network_config}
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
index 64335298c..bb89a5f94 100755
--- a/nova/virt/baremetal/tilera.py
+++ b/nova/virt/baremetal/tilera.py
@@ -55,31 +55,7 @@ def _get_cheetah():
def build_network_config(network_info):
- try:
- assert isinstance(network_info, list)
- except AssertionError:
- network_info = [network_info]
- interfaces = []
- for id, (network, mapping) in enumerate(network_info):
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if CONF.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- interface = {
- 'name': 'eth%d' % id,
- 'address': mapping['ips'][0]['ip'],
- 'gateway': mapping['gateway'],
- 'netmask': mapping['ips'][0]['netmask'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6,
- }
- interfaces.append(interface)
-
+ interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
@@ -262,7 +238,7 @@ class Tilera(base.NodeDriver):
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, network_info):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
diff --git a/nova/virt/baremetal/utils.py b/nova/virt/baremetal/utils.py
index b18bfac85..96abcd41b 100644
--- a/nova/virt/baremetal/utils.py
+++ b/nova/virt/baremetal/utils.py
@@ -81,3 +81,32 @@ def random_alnum(count):
import string
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for _ in range(count))
+
+
+def map_network_interfaces(network_info, use_ipv6=False):
+ # TODO(deva): fix assumption that device names begin with "eth"
+ # and fix assumption about ordering
+ if not isinstance(network_info, list):
+ network_info = [network_info]
+
+ interfaces = []
+ for id, (network, mapping) in enumerate(network_info):
+ address_v6 = None
+ gateway_v6 = None
+ netmask_v6 = None
+ if use_ipv6:
+ address_v6 = mapping['ip6s'][0]['ip']
+ netmask_v6 = mapping['ip6s'][0]['netmask']
+ gateway_v6 = mapping['gateway_v6']
+ interface = {
+ 'name': 'eth%d' % id,
+ 'address': mapping['ips'][0]['ip'],
+ 'gateway': mapping['gateway'],
+ 'netmask': mapping['ips'][0]['netmask'],
+ 'dns': ' '.join(mapping['dns']),
+ 'address_v6': address_v6,
+ 'gateway_v6': gateway_v6,
+ 'netmask_v6': netmask_v6,
+ }
+ interfaces.append(interface)
+ return interfaces
diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py
index 10b9a1aa8..735481340 100644
--- a/nova/virt/disk/vfs/localfs.py
+++ b/nova/virt/disk/vfs/localfs.py
@@ -18,6 +18,7 @@ import os
import tempfile
from nova import exception
+from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import loop
@@ -77,10 +78,9 @@ class VFSLocalFS(vfs.VFS):
raise exception.NovaException(mount.error)
self.mount = mount
except Exception as e:
- LOG.debug(_("Failed to mount image %(ex)s)") %
- {'ex': str(e)})
- self.teardown()
- raise e
+ with excutils.save_and_reraise_exception():
+ LOG.debug(_("Failed to mount image %(ex)s)"), {'ex': str(e)})
+ self.teardown()
def teardown(self):
try:
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 72ebf0b6e..6a93d92ae 100644..100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -351,8 +351,9 @@ class LibvirtDriver(driver.ComputeDriver):
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
- LOG.warn(_("Invalid cachemode %(cache_mode)s specified "
- "for disk type %(disk_type)s.") % locals())
+ LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
+ 'for disk type %(disk_type)s.'),
+ {'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
@@ -562,8 +563,8 @@ class LibvirtDriver(driver.ComputeDriver):
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
- '%(major)i.%(minor)i.%(micro)i or greater.') %
- locals())
+ '%(major)i.%(minor)i.%(micro)i or greater.'),
+ {'major': major, 'minor': minor, 'micro': micro})
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
@@ -778,9 +779,10 @@ class LibvirtDriver(driver.ComputeDriver):
if not is_okay:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error from libvirt during destroy. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
+ LOG.error(_('Error from libvirt during destroy. '
+ 'Code=%(errcode)s Error=%(e)s'),
+ {'errcode': errcode, 'e': e},
+ instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
@@ -853,9 +855,9 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
- LOG.error(_("Error from libvirt during undefine. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
+ LOG.error(_('Error from libvirt during undefine. '
+ 'Code=%(errcode)s Error=%(e)s') %
+ {'errcode': errcode, 'e': e}, instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
@@ -879,9 +881,10 @@ class LibvirtDriver(driver.ComputeDriver):
else:
retry = False
errcode = e.get_error_code()
- LOG.error(_("Error from libvirt during unfilter. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
+ LOG.error(_('Error from libvirt during unfilter. '
+ 'Code=%(errcode)s Error=%(e)s') %
+ {'errcode': errcode, 'e': e},
+ instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
@@ -907,7 +910,7 @@ class LibvirtDriver(driver.ComputeDriver):
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
- LOG.info(_('Deleting instance files %(target)s') % locals(),
+ LOG.info(_('Deleting instance files %s'), target,
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
@@ -916,8 +919,8 @@ class LibvirtDriver(driver.ComputeDriver):
try:
shutil.rmtree(target)
except OSError as e:
- LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
- ) % locals())
+ LOG.error(_('Failed to cleanup directory %(target)s: '
+ '%(e)s'), {'target': target, 'e': e})
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
@@ -1205,6 +1208,18 @@ class LibvirtDriver(driver.ComputeDriver):
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm":
live_snapshot = True
+ # Abort is an idempotent operation, so make sure any block
+ # jobs which may have failed are ended. This operation also
+ # confims the running instance, as opposed to the system as a
+ # whole, has a new enough version of the hypervisor (bug 1193146).
+ try:
+ virt_dom.blockJobAbort(disk_path, 0)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
+ live_snapshot = False
+ else:
+ pass
else:
live_snapshot = False
@@ -1276,13 +1291,6 @@ class LibvirtDriver(driver.ComputeDriver):
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
- # Abort is an idempotent operation, so make sure any block
- # jobs which may have failed are ended.
- try:
- domain.blockJobAbort(disk_path, 0)
- except Exception:
- pass
-
def _wait_for_block_job(domain, disk_path):
status = domain.blockJobInfo(disk_path, 0)
try:
@@ -1605,7 +1613,8 @@ class LibvirtDriver(driver.ComputeDriver):
return out
def _append_to_file(self, data, fpath):
- LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
+ LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
+ {'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -1918,7 +1927,7 @@ class LibvirtDriver(driver.ComputeDriver):
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
- content=files, extra_md=extra_md)
+ content=files or [], extra_md=extra_md)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
@@ -1959,10 +1968,16 @@ class LibvirtDriver(driver.ComputeDriver):
injection_path = image('disk').path
img_id = instance['image_ref']
- for inj in ('key', 'net', 'metadata', 'admin_pass', 'files'):
- if locals()[inj]:
+ for inj, val in [('key', key),
+ ('net', net),
+ ('metadata', metadata),
+ ('admin_pass', admin_pass),
+ ('files', files)]:
+ if val:
LOG.info(_('Injecting %(inj)s into image '
- '%(img_id)s'), locals(), instance=instance)
+ '%(img_id)s'),
+ {'inj': inj, 'img_id': img_id},
+ instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
@@ -1972,7 +1987,8 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
- '%(img_id)s (%(e)s)') % locals(),
+ '%(img_id)s (%(e)s)'),
+ {'img_id': img_id, 'e': e},
instance=instance)
if CONF.libvirt_type == 'uml':
@@ -2384,12 +2400,14 @@ class LibvirtDriver(driver.ComputeDriver):
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
- LOG.debug(_("Start to_xml instance=%(instance)s "
- "network_info=%(network_info)s "
- "disk_info=%(disk_info)s "
- "image_meta=%(image_meta)s rescue=%(rescue)s"
- "block_device_info=%(block_device_info)s") %
- locals())
+ LOG.debug(_('Start to_xml instance=%(instance)s '
+ 'network_info=%(network_info)s '
+ 'disk_info=%(disk_info)s '
+ 'image_meta=%(image_meta)s rescue=%(rescue)s'
+ 'block_device_info=%(block_device_info)s'),
+ {'instance': instance, 'network_info': network_info,
+ 'disk_info': disk_info, 'image_meta': image_meta,
+ 'rescue': rescue, 'block_device_info': block_device_info})
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
@@ -2399,7 +2417,8 @@ class LibvirtDriver(driver.ComputeDriver):
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
- LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s') % locals())
+ LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s'),
+ {'instance': instance, 'xml': xml})
return xml
def _lookup_by_id(self, instance_id):
@@ -2437,8 +2456,11 @@ class LibvirtDriver(driver.ComputeDriver):
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
- msg = _("Error from libvirt while looking up %(instance_name)s: "
- "[Error Code %(error_code)s] %(ex)s") % locals()
+ msg = (_('Error from libvirt while looking up %(instance_name)s: '
+ '[Error Code %(error_code)s] %(ex)s') %
+ {'instance_name': instance_name,
+ 'error_code': error_code,
+ 'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
@@ -2911,13 +2933,14 @@ class LibvirtDriver(driver.ComputeDriver):
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
- LOG.info(_("Getting block stats failed, device might have "
- "been detached. Instance=%(instance_name)s "
- "Disk=%(disk)s Code=%(errcode)s Error=%(e)s")
- % locals())
+ LOG.info(_('Getting block stats failed, device might have '
+ 'been detached. Instance=%(instance_name)s '
+ 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
+ {'instance_name': instance_name, 'disk': disk,
+ 'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
- LOG.info(_("Could not find domain in libvirt for instance %s. "
- "Cannot get block stats for device") % instance_name)
+ LOG.info(_('Could not find domain in libvirt for instance %s. '
+ 'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""
@@ -3127,12 +3150,14 @@ class LibvirtDriver(driver.ComputeDriver):
# Check that available disk > necessary disk
if (available - necessary) < 0:
- instance_uuid = instance['uuid']
- reason = _("Unable to migrate %(instance_uuid)s: "
- "Disk of instance is too large(available"
- " on destination host:%(available)s "
- "< need:%(necessary)s)")
- raise exception.MigrationPreCheckError(reason=reason % locals())
+ reason = (_('Unable to migrate %(instance_uuid)s: '
+ 'Disk of instance is too large(available'
+ ' on destination host:%(available)s '
+ '< need:%(necessary)s)') %
+ {'instance_uuid': instance['uuid'],
+ 'available': available,
+ 'necessary': necessary})
+ raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
@@ -3172,11 +3197,11 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = e.message
- LOG.error(m % locals())
+ LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
- LOG.error(m % locals())
- raise exception.InvalidCPUInfo(reason=m % locals())
+ LOG.error(m, {'ret': ret, 'u': u})
+ raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
@@ -3304,7 +3329,7 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Live Migration failure: %(e)s") % locals(),
+ LOG.error(_("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
@@ -3518,9 +3543,12 @@ class LibvirtDriver(driver.ComputeDriver):
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
- msg = _("Error from libvirt while getting description of "
- "%(instance_name)s: [Error Code %(error_code)s] "
- "%(ex)s") % locals()
+ msg = (_('Error from libvirt while getting description of '
+ '%(instance_name)s: [Error Code %(error_code)s] '
+ '%(ex)s') %
+ {'instance_name': instance_name,
+ 'error_code': error_code,
+ 'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
@@ -3535,14 +3563,12 @@ class LibvirtDriver(driver.ComputeDriver):
path = path_node.get('file')
if disk_type != 'file':
- LOG.debug(_('skipping %(path)s since it looks like volume') %
- locals())
+ LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if not path:
- LOG.debug(_('skipping disk for %(instance_name)s as it'
- ' does not have a path') %
- locals())
+ LOG.debug(_('skipping disk for %s as it does not have a path'),
+ instance_name)
continue
# get the real disk size or
@@ -3581,8 +3607,8 @@ class LibvirtDriver(driver.ComputeDriver):
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
- LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
- locals())
+ LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
+ {'i_name': i_name, 'e': e})
else:
raise
except exception.InstanceNotFound:
@@ -3913,9 +3939,7 @@ class LibvirtDriver(driver.ComputeDriver):
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
- LOG.debug(_('Checking instance files accessability'
- '%(instance_path)s')
- % locals())
+ LOG.debug(_('Checking instance files accessability %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index f55e29df9..f9e948a5e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -245,9 +245,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
- LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
- 'is not found.') % locals(),
- instance=instance)
+ LOG.debug(_('The nwfilter(%s) is not found.'),
+ instance_filter_name, instance=instance)
def _define_filters(self, filter_name, filter_children):
self._define_filter(self._filter_container(filter_name,
@@ -269,7 +268,9 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
except libvirt.libvirtError:
name = instance['name']
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
- '%(name)s is not found.') % locals(),
+ '%(name)s is not found.'),
+ {'instance_filter_name': instance_filter_name,
+ 'name': name},
instance=instance)
return False
return True
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index b8b9df1a9..d0a2283e7 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -262,9 +262,10 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
def get_config(self, instance, network, mapping, image_meta):
vif_type = mapping.get('vif_type')
- LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
- "network=%(network)s mapping=%(mapping)s")
- % locals())
+ LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
+ 'network=%(network)s mapping=%(mapping)s'),
+ {'vif_type': vif_type, 'instance': instance,
+ 'network': network, 'mapping': mapping})
if vif_type is None:
raise exception.NovaException(
@@ -383,9 +384,10 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
network, mapping = vif
vif_type = mapping.get('vif_type')
- LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
- "network=%(network)s mapping=%(mapping)s")
- % locals())
+ LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
+ 'network=%(network)s mapping=%(mapping)s'),
+ {'vif_type': vif_type, 'instance': instance,
+ 'network': network, 'mapping': mapping})
if vif_type is None:
raise exception.NovaException(
@@ -470,9 +472,10 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
network, mapping = vif
vif_type = mapping.get('vif_type')
- LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
- "network=%(network)s mapping=%(mapping)s")
- % locals())
+ LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
+ 'network=%(network)s mapping=%(mapping)s'),
+ {'vif_type': vif_type, 'instance': instance,
+ 'network': network, 'mapping': mapping})
if vif_type is None:
raise exception.NovaException(
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index c9e011856..0bd6d776e 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -27,6 +27,7 @@ from nova.api.metadata import password
from nova.compute import api as compute_api
from nova import context
from nova import crypto
+from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
@@ -77,7 +78,7 @@ CONF.register_opts(xenapi_agent_opts)
def _call_agent(session, instance, vm_ref, method, addl_args=None,
- timeout=None):
+ timeout=None, success_code='0'):
"""Abstracts out the interaction with the agent xenapi plugin."""
if addl_args is None:
addl_args = {}
@@ -101,43 +102,39 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
- return {'returncode': 'timeout', 'message': err_msg}
+ raise exception.AgentTimeout(method=method)
elif 'NOT IMPLEMENTED:' in err_msg:
- LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
- ' supported by the agent. args=%(args)r'),
+ LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not '
+ 'supported by the agent. args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
- return {'returncode': 'notimplemented', 'message': err_msg}
+ raise exception.AgentNotImplemented(method=method)
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
{'method': method, 'args': args, 'e': e},
instance=instance)
- return {'returncode': 'error', 'message': err_msg}
- return None
+ raise exception.AgentError(method=method)
- if isinstance(ret, dict):
- return ret
- try:
- return jsonutils.loads(ret)
- except TypeError:
- LOG.error(_('The agent call to %(method)s returned an invalid '
- 'response: %(ret)r. args=%(args)r'),
+ if not isinstance(ret, dict):
+ try:
+ ret = jsonutils.loads(ret)
+ except TypeError:
+ LOG.error(_('The agent call to %(method)s returned an invalid '
+ 'response: %(ret)r. args=%(args)r'),
+ {'method': method, 'ret': ret, 'args': args},
+ instance=instance)
+ raise exception.AgentError(method=method)
+
+ if ret['returncode'] != success_code:
+ LOG.error(_('The agent call to %(method)s returned an '
+ 'an error: %(ret)r. args=%(args)r'),
{'method': method, 'ret': ret, 'args': args},
instance=instance)
- return {'returncode': 'error',
- 'message': 'unable to deserialize response'}
-
-
-def _get_agent_version(session, instance, vm_ref):
- resp = _call_agent(session, instance, vm_ref, 'version')
- if resp['returncode'] != '0':
- LOG.error(_('Failed to query agent version: %r'),
- resp, instance=instance)
- return None
+ raise exception.AgentError(method=method)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
- return resp['message'].replace('\\r\\n', '')
+ return ret['message'].replace('\\r\\n', '')
class XenAPIBasedAgent(object):
@@ -147,6 +144,11 @@ class XenAPIBasedAgent(object):
self.instance = instance
self.vm_ref = vm_ref
+ def _call_agent(self, method, addl_args=None, timeout=None,
+ success_code='0'):
+ return _call_agent(self.session, self.instance, self.vm_ref,
+ method, addl_args, timeout, success_code)
+
def get_agent_version(self):
"""Get the version of the agent running on the VM instance."""
@@ -159,31 +161,47 @@ class XenAPIBasedAgent(object):
# normal as well as watch for domid changes
expiration = time.time() + CONF.agent_version_timeout
- while time.time() < expiration:
- ret = _get_agent_version(self.session, self.instance, self.vm_ref)
- if ret:
- return ret
-
- LOG.info(_('Reached maximum time attempting to query agent version'),
- instance=self.instance)
-
- return None
+ while True:
+ try:
+ return self._call_agent('version')
+ except exception.AgentTimeout:
+ if time.time() > expiration:
+ raise
def agent_update(self, agent_build):
"""Update agent on the VM instance."""
- LOG.info(_('Updating agent to %s'), agent_build['version'],
- instance=self.instance)
+ LOG.debug(_('Updating agent to %s'), agent_build['version'],
+ instance=self.instance)
# Send the encrypted password
args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']}
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'agentupdate', args)
- if resp['returncode'] != '0':
- LOG.error(_('Failed to update agent: %r'), resp,
- instance=self.instance)
- return None
- return resp['message']
+ try:
+ self._call_agent('agentupdate', args)
+ except exception.AgentError as exc:
+ # Silently fail for agent upgrades
+ LOG.warning(_("Unable to update the agent due "
+ "to: %(exc)s") % dict(exc=exc),
+ instance=self.instance)
+
+ def _exchange_key_with_agent(self):
+ dh = SimpleDH()
+ args = {'pub': str(dh.get_public())}
+ resp = self._call_agent('key_init', args, success_code='D0')
+ agent_pub = int(resp)
+ dh.compute_shared(agent_pub)
+ return dh
+
+ def _save_instance_password_if_sshkey_present(self, new_pass):
+ sshkey = self.instance.get('key_data')
+ if sshkey:
+ ctxt = context.get_admin_context()
+ enc = crypto.ssh_encrypt_text(sshkey, new_pass)
+ sys_meta = utils.instance_sys_meta(self.instance)
+ sys_meta.update(password.convert_password(ctxt,
+ base64.b64encode(enc)))
+ self.virtapi.instance_update(ctxt, self.instance['uuid'],
+ {'system_metadata': sys_meta})
def set_admin_password(self, new_pass):
"""Set the root/admin password on the VM instance.
@@ -196,59 +214,24 @@ class XenAPIBasedAgent(object):
"""
LOG.debug(_('Setting admin password'), instance=self.instance)
- dh = SimpleDH()
-
- # Exchange keys
- args = {'pub': str(dh.get_public())}
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'key_init', args)
-
- # Successful return code from key_init is 'D0'
- if resp['returncode'] != 'D0':
- msg = _('Failed to exchange keys: %r') % resp
- LOG.error(msg, instance=self.instance)
- raise NotImplementedError(msg)
-
- # Some old versions of the Windows agent have a trailing \\r\\n
- # (ie CRLF escaped) for some reason. Strip that off.
- agent_pub = int(resp['message'].replace('\\r\\n', ''))
- dh.compute_shared(agent_pub)
-
+ dh = self._exchange_key_with_agent()
# Some old versions of Linux and Windows agent expect trailing \n
# on password to work correctly.
enc_pass = dh.encrypt(new_pass + '\n')
- # Send the encrypted password
args = {'enc_pass': enc_pass}
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'password', args)
-
- # Successful return code from password is '0'
- if resp['returncode'] != '0':
- msg = _('Failed to exchange keys: %r') % resp
- LOG.error(msg, instance=self.instance)
- raise NotImplementedError(msg)
-
- sshkey = self.instance.get('key_data')
- if sshkey:
- ctxt = context.get_admin_context()
- enc = crypto.ssh_encrypt_text(sshkey, new_pass)
- sys_meta = utils.metadata_to_dict(self.instance['system_metadata'])
- sys_meta.update(password.convert_password(ctxt,
- base64.b64encode(enc)))
- self.virtapi.instance_update(ctxt, self.instance['uuid'],
- {'system_metadata': sys_meta})
-
- return resp['message']
+ self._call_agent('password', args)
+ self._save_instance_password_if_sshkey_present(new_pass)
def inject_ssh_key(self):
sshkey = self.instance.get('key_data')
if not sshkey:
return
if self.instance['os_type'] == 'windows':
- LOG.warning(_("Skipping setting of ssh key for Windows."),
- instance=self.instance)
+ LOG.debug(_("Skipping setting of ssh key for Windows."),
+ instance=self.instance)
return
+
sshkey = str(sshkey)
keyfile = '/root/.ssh/authorized_keys'
key_data = ''.join([
@@ -268,30 +251,13 @@ class XenAPIBasedAgent(object):
b64_contents = base64.b64encode(contents)
args = {'b64_path': b64_path, 'b64_contents': b64_contents}
-
- # If the agent doesn't support file injection, a NotImplementedError
- # will be raised with the appropriate message.
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'inject_file', args)
- if resp['returncode'] != '0':
- LOG.error(_('Failed to inject file: %r'), resp,
- instance=self.instance)
- return None
-
- return resp['message']
+ return self._call_agent('inject_file', args)
def resetnetwork(self):
LOG.debug(_('Resetting network'), instance=self.instance)
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'resetnetwork',
- timeout=CONF.agent_resetnetwork_timeout)
- if resp['returncode'] != '0':
- LOG.error(_('Failed to reset network: %r'), resp,
- instance=self.instance)
- return None
-
- return resp['message']
+ return self._call_agent('resetnetwork',
+ timeout=CONF.agent_resetnetwork_timeout)
def find_guest_agent(base_dir):
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 2dd9765d1..f4eac3887 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -565,7 +565,7 @@ class SessionBase(object):
return 12 * 1024 * 1024 * 1024
def _plugin_agent_version(self, method, args):
- return as_json(returncode='0', message='1.0')
+ return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
@@ -579,6 +579,13 @@ class SessionBase(object):
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
+ def _plugin_agent_agentupdate(self, method, args):
+ url = args["url"]
+ md5 = args["md5sum"]
+ message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
+ md5=md5)
+ return as_json(returncode='0', message=message)
+
def _plugin_noop(self, method, args):
return ''
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ff6f7f266..ac8c9c58b 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -1017,7 +1017,7 @@ def _create_image(context, session, instance, name_label, image_id,
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
try:
cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
except KeyError:
@@ -1112,7 +1112,7 @@ def _image_uses_bittorrent(context, instance):
if xenapi_torrent_images == 'all':
bittorrent = True
elif xenapi_torrent_images == 'some':
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
try:
bittorrent = strutils.bool_from_string(
sys_meta['image_bittorrent'])
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 80a4fb48a..d18fc2729 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1253,7 +1253,11 @@ class VMOps(object):
instance=instance)
return
- vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
+ shutdown_success = vm_utils.hard_shutdown_vm(self._session, instance,
+ vm_ref)
+ if not shutdown_success:
+ raise exception.InstancePowerOffFailure(
+ _("XenAPI failed to power the instance off"))
if destroy_disks:
self._volumeops.detach_all(vm_ref)
diff --git a/openstack-common.conf b/openstack-common.conf
index 05fca3167..9edac2d2e 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -24,7 +24,6 @@ module=network_utils
module=notifier
module=patch_tox_venv
module=periodic_task
-module=plugin
module=policy
module=processutils
module=redhat-eventlet.patch
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
index 9c86b7cb7..a12704248 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -159,7 +159,13 @@ def delete_record(self, arg_dict):
VM and the specified path from xenstore.
"""
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
- ret, result = _run_command(cmd)
+ try:
+ ret, result = _run_command(cmd)
+ except XenstoreError, e:
+ if 'could not remove path' in e.stderr:
+ # Entry already gone. We're good to go.
+ return ''
+ raise
return result
diff --git a/requirements.txt b/requirements.txt
index 298c6b2c9..68cec7c31 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,7 +21,6 @@ paramiko
pyasn1
Babel>=0.9.6
iso8601>=0.1.4
-httplib2
requests>=1.1,<1.2.1 # order-dependent python-cinderclient req cap, bug 1182271
python-cinderclient>=1.0.1
python-quantumclient>=2.2.0,<3.0.0
@@ -30,4 +29,4 @@ python-keystoneclient>=0.2.0
six
stevedore>=0.9
websockify<0.4
-http://tarballs.openstack.org/oslo.config/oslo.config-1.2.0a2.tar.gz#egg=oslo.config-1.2.0a2
+oslo.config>=1.1.0
diff --git a/setup.cfg b/setup.cfg
index 1baa22940..85c9514f0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -54,11 +54,14 @@ console_scripts =
nova-xvpvncproxy = nova.cmd.xvpvncproxy:main
nova.api.v3.extensions =
+ certificates = nova.api.openstack.compute.plugins.v3.certificates:Certificates
consoles = nova.api.openstack.compute.plugins.v3.consoles:Consoles
+ evacuate = nova.api.openstack.compute.plugins.v3.evacuate:Evacuate
extension_info = nova.api.openstack.compute.plugins.v3.extension_info:ExtensionInfo
fixed_ips = nova.api.openstack.compute.plugins.v3.fixed_ips:FixedIPs
ips = nova.api.openstack.compute.plugins.v3.ips:IPs
keypairs = nova.api.openstack.compute.plugins.v3.keypairs:Keypairs
+ rescue = nova.api.openstack.compute.plugins.v3.rescue:Rescue
servers = nova.api.openstack.compute.plugins.v3.servers:Servers
nova.api.v3.extensions.server.create =