summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-manage24
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json1
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml1
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json1
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml1
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json3
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml3
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.json3
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.xml3
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.json3
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.xml3
-rw-r--r--doc/api_samples/os-tenant-networks/networks-post-req.json9
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--nova/api/openstack/common.py9
-rw-r--r--nova/api/openstack/compute/contrib/aggregates.py3
-rw-r--r--nova/api/openstack/compute/contrib/cells.py8
-rw-r--r--nova/api/openstack/compute/contrib/consoles.py3
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py8
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py3
-rw-r--r--nova/api/openstack/compute/contrib/quotas.py43
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py5
-rw-r--r--nova/api/openstack/compute/contrib/services.py10
-rw-r--r--nova/api/openstack/compute/contrib/simple_tenant_usage.py15
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py2
-rw-r--r--nova/api/openstack/compute/servers.py4
-rw-r--r--nova/api/openstack/wsgi.py43
-rw-r--r--nova/api/openstack/xmlutil.py61
-rw-r--r--nova/block_device.py2
-rw-r--r--nova/cells/manager.py11
-rw-r--r--nova/cells/messaging.py4
-rw-r--r--nova/cells/utils.py8
-rw-r--r--nova/compute/api.py44
-rw-r--r--nova/compute/cells_api.py9
-rwxr-xr-xnova/compute/manager.py251
-rw-r--r--nova/compute/rpcapi.py16
-rw-r--r--nova/conductor/api.py42
-rw-r--r--nova/conductor/manager.py8
-rw-r--r--nova/config.py2
-rw-r--r--nova/db/api.py6
-rw-r--r--nova/db/sqlalchemy/__init__.py8
-rw-r--r--nova/db/sqlalchemy/api.py94
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py43
-rw-r--r--nova/db/sqlalchemy/models.py9
-rw-r--r--nova/exception.py5
-rw-r--r--nova/locale/nova.pot1645
-rw-r--r--nova/network/l3.py10
-rw-r--r--nova/network/manager.py126
-rw-r--r--nova/objectstore/s3server.py17
-rw-r--r--nova/openstack/common/rpc/amqp.py59
-rw-r--r--nova/openstack/common/rpc/common.py4
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py4
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py4
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py34
-rw-r--r--nova/openstack/common/rpc/matchmaker.py169
-rw-r--r--nova/openstack/common/timeutils.py2
-rw-r--r--nova/quota.py52
-rw-r--r--nova/scheduler/manager.py9
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py33
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cells.py8
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fixed_ips.py45
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py8
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py13
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quota_classes.py23
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py38
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py22
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py18
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py27
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py8
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py8
-rw-r--r--nova/tests/api/openstack/test_common.py14
-rw-r--r--nova/tests/api/openstack/test_wsgi.py16
-rw-r--r--nova/tests/api/openstack/test_xmlutil.py64
-rw-r--r--nova/tests/cells/test_cells_messaging.py1
-rw-r--r--nova/tests/cells/test_cells_utils.py23
-rw-r--r--nova/tests/compute/test_compute.py293
-rw-r--r--nova/tests/compute/test_compute_cells.py56
-rw-r--r--nova/tests/compute/test_rpcapi.py7
-rw-r--r--nova/tests/conductor/test_conductor.py144
-rw-r--r--nova/tests/db/test_sqlite.py60
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/test_api_samples.py2
-rw-r--r--nova/tests/network/test_manager.py218
-rw-r--r--nova/tests/test_db_api.py194
-rw-r--r--nova/tests/test_hypervapi.py219
-rw-r--r--nova/tests/test_instance_types.py18
-rw-r--r--nova/tests/test_libvirt.py32
-rw-r--r--nova/tests/test_migration_utils.py18
-rw-r--r--nova/tests/test_migrations.py80
-rw-r--r--nova/tests/test_nova_manage.py14
-rw-r--r--nova/tests/test_powervm.py87
-rw-r--r--nova/tests/test_quota.py24
-rw-r--r--nova/tests/test_utils.py78
-rw-r--r--nova/tests/utils.py17
-rw-r--r--nova/utils.py58
-rwxr-xr-xnova/virt/baremetal/driver.py2
-rw-r--r--nova/virt/baremetal/virtual_power_driver_settings.py2
-rw-r--r--nova/virt/disk/mount/api.py3
-rwxr-xr-xnova/virt/driver.py5
-rwxr-xr-xnova/virt/fake.py2
-rw-r--r--nova/virt/hyperv/basevolumeutils.py50
-rwxr-xr-xnova/virt/hyperv/driver.py2
-rw-r--r--nova/virt/hyperv/livemigrationops.py14
-rw-r--r--nova/virt/hyperv/livemigrationutils.py160
-rw-r--r--nova/virt/hyperv/vmutils.py28
-rw-r--r--nova/virt/hyperv/volumeops.py68
-rw-r--r--nova/virt/hyperv/volumeutilsv2.py31
-rw-r--r--nova/virt/libvirt/designer.py11
-rwxr-xr-xnova/virt/libvirt/driver.py17
-rwxr-xr-xnova/virt/libvirt/imagebackend.py6
-rw-r--r--nova/virt/powervm/blockdev.py2
-rwxr-xr-xnova/virt/powervm/driver.py7
-rwxr-xr-xnova/virt/vmwareapi/driver.py2
-rwxr-xr-xnova/virt/xenapi/driver.py5
-rw-r--r--nova/virt/xenapi/vmops.py48
-rw-r--r--nova/virt/xenapi/volumeops.py28
-rw-r--r--tools/pip-requires8
128 files changed, 3785 insertions, 1690 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 274ae4640..96e4a4012 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -675,11 +675,13 @@ class ServiceCommands(object):
def enable(self, host, service):
"""Enable scheduling for a service."""
ctxt = context.get_admin_context()
- svc = db.service_get_by_args(ctxt, host, service)
- if not svc:
- print _("Unable to find service")
- return
- db.service_update(ctxt, svc['id'], {'disabled': False})
+ try:
+ svc = db.service_get_by_args(ctxt, host, service)
+ db.service_update(ctxt, svc['id'], {'disabled': False})
+ except exception.NotFound as ex:
+ print _("error: %s") % ex
+ sys.exit(2)
+ print _("Service %(service)s on host %(host)s enabled.") % locals()
@args('--host', dest='host', metavar='<host>', help='Host')
@args('--service', dest='service', metavar='<service>',
@@ -687,11 +689,13 @@ class ServiceCommands(object):
def disable(self, host, service):
"""Disable scheduling for a service."""
ctxt = context.get_admin_context()
- svc = db.service_get_by_args(ctxt, host, service)
- if not svc:
- print _("Unable to find service")
- return
- db.service_update(ctxt, svc['id'], {'disabled': True})
+ try:
+ svc = db.service_get_by_args(ctxt, host, service)
+ db.service_update(ctxt, svc['id'], {'disabled': True})
+ except exception.NotFound as ex:
+ print _("error: %s") % ex
+ sys.exit(2)
+ print _("Service %(service)s on host %(host)s disabled.") % locals()
@args('--host', dest='host', metavar='<host>', help='Host')
def describe_resource(self, host):
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
index e4d0a5b47..e5748a4cd 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
@@ -1,6 +1,7 @@
{
"quota_class_set": {
"cores": 20,
+ "fixed_ips": 10,
"floating_ips": 10,
"id": "test_class",
"injected_file_content_bytes": 10240,
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml
index 74532bc98..8e7444634 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml
@@ -1,6 +1,7 @@
<?xml version='1.0' encoding='UTF-8'?>
<quota_class_set id="test_class">
<cores>20</cores>
+ <fixed_ips>10</fixed_ips>
<floating_ips>10</floating_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
index 99a11f4ff..6325bb562 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
@@ -1,6 +1,7 @@
{
"quota_class_set": {
"cores": 50,
+ "fixed_ips": 10,
"floating_ips": 10,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml
index 44c658a41..26a29fc23 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml
@@ -1,6 +1,7 @@
<?xml version='1.0' encoding='UTF-8'?>
<quota_class_set>
<cores>50</cores>
+ <fixed_ips>10</fixed_ips>
<floating_ips>10</floating_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
index ee1f6a397..efc35cf00 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
@@ -1,6 +1,7 @@
{
"quota_set": {
"cores": 20,
+ "fixed_ips": 10,
"floating_ips": 10,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
@@ -13,4 +14,4 @@
"security_group_rules": 20,
"security_groups": 10
}
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
index 6a39c8506..dd4c6d66d 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
@@ -1,6 +1,7 @@
<?xml version='1.0' encoding='UTF-8'?>
<quota_set id="fake_tenant">
<cores>20</cores>
+ <fixed_ips>10</fixed_ips>
<floating_ips>10</floating_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
@@ -11,4 +12,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>10</security_groups>
-</quota_set>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
index ee1f6a397..efc35cf00 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
@@ -1,6 +1,7 @@
{
"quota_set": {
"cores": 20,
+ "fixed_ips": 10,
"floating_ips": 10,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
@@ -13,4 +14,4 @@
"security_group_rules": 20,
"security_groups": 10
}
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
index 6a39c8506..dd4c6d66d 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
@@ -1,6 +1,7 @@
<?xml version='1.0' encoding='UTF-8'?>
<quota_set id="fake_tenant">
<cores>20</cores>
+ <fixed_ips>10</fixed_ips>
<floating_ips>10</floating_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
@@ -11,4 +12,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>10</security_groups>
-</quota_set>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
index c16dc6bb5..14324e365 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
@@ -1,6 +1,7 @@
{
"quota_set": {
"cores": 20,
+ "fixed_ips": 10,
"floating_ips": 10,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
@@ -12,4 +13,4 @@
"security_group_rules": 20,
"security_groups": 45
}
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
index 126c3fced..5e6bb893e 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
@@ -1,6 +1,7 @@
<?xml version='1.0' encoding='UTF-8'?>
<quota_set>
<cores>20</cores>
+ <fixed_ips>10</fixed_ips>
<floating_ips>10</floating_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
@@ -11,4 +12,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>45</security_groups>
-</quota_set>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-tenant-networks/networks-post-req.json b/doc/api_samples/os-tenant-networks/networks-post-req.json
new file mode 100644
index 000000000..f47fc9d20
--- /dev/null
+++ b/doc/api_samples/os-tenant-networks/networks-post-req.json
@@ -0,0 +1,9 @@
+{
+ "network": {
+ "label": "public",
+ "cidr": "172.0.0.0/24",
+ "vlan_start": 1,
+ "num_networks": 1,
+ "network_size": 255
+ }
+} \ No newline at end of file
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 08d59c521..76e4e447c 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -105,3 +105,5 @@ admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
signing_dir = /tmp/keystone-signing-nova
+# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
+auth_version = v2.0
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index c6473a648..6e3d7eabc 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -31,7 +31,6 @@ from nova.compute import vm_states
from nova import exception
from nova.openstack.common import log as logging
from nova import quota
-from nova import utils
osapi_opts = [
cfg.IntOpt('osapi_max_limit',
@@ -356,7 +355,7 @@ def raise_http_conflict_for_instance_invalid_state(exc, action):
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
- dom = utils.safe_minidom_parse_string(text)
+ dom = xmlutil.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
@@ -364,7 +363,7 @@ class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
- dom = utils.safe_minidom_parse_string(text)
+ dom = xmlutil.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
@@ -382,7 +381,7 @@ class MetadataXMLDeserializer(wsgi.XMLDeserializer):
return metadata
def _extract_metadata_container(self, datastring):
- dom = utils.safe_minidom_parse_string(datastring)
+ dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
@@ -394,7 +393,7 @@ class MetadataXMLDeserializer(wsgi.XMLDeserializer):
return self._extract_metadata_container(datastring)
def update(self, datastring):
- dom = utils.safe_minidom_parse_string(datastring)
+ dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py
index 84b0358a3..b73a50f39 100644
--- a/nova/api/openstack/compute/contrib/aggregates.py
+++ b/nova/api/openstack/compute/contrib/aggregates.py
@@ -167,7 +167,8 @@ class AggregateController(object):
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
- except (exception.AggregateNotFound, exception.AggregateHostNotFound):
+ except (exception.AggregateNotFound, exception.AggregateHostNotFound,
+ exception.ComputeHostNotFound):
LOG.info(_("Cannot remove host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py
index efd2cd189..03597ff0e 100644
--- a/nova/api/openstack/compute/contrib/cells.py
+++ b/nova/api/openstack/compute/contrib/cells.py
@@ -19,7 +19,6 @@
from oslo.config import cfg
from webob import exc
-from xml.parsers import expat
from nova.api.openstack import common
from nova.api.openstack import extensions
@@ -31,7 +30,6 @@ from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
-from nova import utils
LOG = logging.getLogger(__name__)
@@ -98,11 +96,7 @@ class CellDeserializer(wsgi.XMLDeserializer):
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
- try:
- node = utils.safe_minidom_parse_string(string)
- except expat.ExpatError:
- msg = _("cannot understand XML")
- raise exception.MalformedRequestBody(reason=msg)
+ node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
diff --git a/nova/api/openstack/compute/contrib/consoles.py b/nova/api/openstack/compute/contrib/consoles.py
index 264f0b270..0431a0694 100644
--- a/nova/api/openstack/compute/contrib/consoles.py
+++ b/nova/api/openstack/compute/contrib/consoles.py
@@ -49,7 +49,8 @@ class ConsolesController(wsgi.Controller):
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
except exception.InstanceNotReady as e:
- raise webob.exc.HTTPConflict(explanation=unicode(e))
+ raise webob.exc.HTTPConflict(
+ explanation=_('Instance not yet ready'))
return {'console': {'type': console_type, 'url': output['url']}}
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 3ecfb9965..a3b3538fd 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -16,7 +16,6 @@
"""The hosts admin extension."""
import webob.exc
-from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
@@ -24,7 +23,6 @@ from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
-from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
@@ -71,11 +69,7 @@ class HostShowTemplate(xmlutil.TemplateBuilder):
class HostUpdateDeserializer(wsgi.XMLDeserializer):
def default(self, string):
- try:
- node = utils.safe_minidom_parse_string(string)
- except expat.ExpatError:
- msg = _("cannot understand XML")
- raise exception.MalformedRequestBody(reason=msg)
+ node = xmlutil.safe_minidom_parse_string(string)
updates = {}
updates_node = self.find_first_child_named(node, 'updates')
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index c6268f277..2cea0e081 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -139,6 +139,9 @@ class NetworkController(wsgi.Controller):
try:
self.network_api.add_network_to_project(
context, project_id, network_id)
+ except NotImplementedError:
+ msg = (_("VLAN support must be enabled"))
+ raise exc.HTTPNotImplemented(explanation=msg)
except Exception as ex:
msg = (_("Cannot associate network %(network)s"
" with project %(project)s: %(message)s") %
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index ddfe5bf08..c7fe87a1f 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -88,23 +88,34 @@ class QuotaSetsController(object):
context = req.environ['nova.context']
authorize_update(context)
project_id = id
+
+ bad_keys = []
+ for key in body['quota_set'].keys():
+ if (key not in QUOTAS and
+ key != 'tenant_id' and
+ key != 'id'):
+ bad_keys.append(key)
+
+ if len(bad_keys) > 0:
+ msg = _("Bad key(s) %s in quota_set") % ",".join(bad_keys)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
for key in body['quota_set'].keys():
- if key in QUOTAS:
- try:
- value = int(body['quota_set'][key])
- except (ValueError, TypeError):
- LOG.warn(_("Quota for %s should be integer.") % key)
- # NOTE(hzzhoushaoyu): Do not prevent valid value to be
- # updated. If raise BadRequest, some may be updated and
- # others may be not.
- continue
- self._validate_quota_limit(value)
- try:
- db.quota_update(context, project_id, key, value)
- except exception.ProjectQuotaNotFound:
- db.quota_create(context, project_id, key, value)
- except exception.AdminRequired:
- raise webob.exc.HTTPForbidden()
+ try:
+ value = int(body['quota_set'][key])
+ except (ValueError, TypeError):
+ LOG.warn(_("Quota for %s should be integer.") % key)
+ # NOTE(hzzhoushaoyu): Do not prevent valid value to be
+ # updated. If raise BadRequest, some may be updated and
+ # others may be not.
+ continue
+ self._validate_quota_limit(value)
+ try:
+ db.quota_update(context, project_id, key, value)
+ except exception.ProjectQuotaNotFound:
+ db.quota_create(context, project_id, key, value)
+ except exception.AdminRequired:
+ raise webob.exc.HTTPForbidden()
return {'quota_set': self._get_quotas(context, id)}
@wsgi.serializers(xml=QuotaTemplate)
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index af97a2a6b..ce6f2687f 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -32,7 +32,6 @@ from nova import exception
from nova.network.security_group import openstack_driver
from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
-from nova import utils
from nova.virt import netutils
@@ -113,7 +112,7 @@ class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
- dom = utils.safe_minidom_parse_string(string)
+ dom = xmlutil.safe_minidom_parse_string(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
@@ -134,7 +133,7 @@ class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
- dom = utils.safe_minidom_parse_string(string)
+ dom = xmlutil.safe_minidom_parse_string(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index 3afd5ff45..fb7b9d591 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -20,7 +20,7 @@ import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import availability_zones
+from nova import compute
from nova import db
from nova import exception
from nova.openstack.common import log as logging
@@ -58,6 +58,10 @@ class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
class ServiceController(object):
+
+ def __init__(self):
+ self.host_api = compute.HostAPI()
+
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""
@@ -66,8 +70,8 @@ class ServiceController(object):
context = req.environ['nova.context']
authorize(context)
now = timeutils.utcnow()
- services = db.service_get_all(context)
- services = availability_zones.set_availability_zones(context, services)
+ services = self.host_api.service_get_all(
+ context, set_zones=True)
host = ''
if 'host' in req.GET:
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index f759e90b0..0fa9b9e40 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -24,7 +24,7 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api
-from nova import exception
+from nova.compute import instance_types
from nova.openstack.common import timeutils
authorize_show = extensions.extension_authorizer('compute',
@@ -119,18 +119,7 @@ class SimpleTenantUsageController(object):
info['hours'] = self._hours_for(instance,
period_start,
period_stop)
- flavor_type = instance['instance_type_id']
-
- if not flavors.get(flavor_type):
- try:
- it_ref = compute_api.get_instance_type(context,
- flavor_type)
- flavors[flavor_type] = it_ref
- except exception.InstanceTypeNotFound:
- # can't bill if there is no instance type
- continue
-
- flavor = flavors[flavor_type]
+ flavor = instance_types.extract_instance_type(instance)
info['instance_id'] = instance['uuid']
info['name'] = instance['display_name']
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 760dc953a..93d76495f 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -154,7 +154,7 @@ class CreateDeserializer(CommonDeserializer):
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
- dom = utils.safe_minidom_parse_string(string)
+ dom = xmlutil.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 00aa35538..ce40e087b 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -317,7 +317,7 @@ class ActionDeserializer(CommonDeserializer):
"""
def default(self, string):
- dom = utils.safe_minidom_parse_string(string)
+ dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
@@ -424,7 +424,7 @@ class CreateDeserializer(CommonDeserializer):
def default(self, string):
"""Deserialize an xml-formatted server create request."""
- dom = utils.safe_minidom_parse_string(string)
+ dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 5b9900f72..79382d864 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -19,15 +19,14 @@ import inspect
import math
import time
from xml.dom import minidom
-from xml.parsers import expat
from lxml import etree
import webob
+from nova.api.openstack import xmlutil
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
-from nova import utils
from nova import wsgi
@@ -216,13 +215,8 @@ class XMLDeserializer(TextDeserializer):
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
-
- try:
- node = utils.safe_minidom_parse_string(datastring).childNodes[0]
- return {node.nodeName: self._from_xml_node(node, plurals)}
- except expat.ExpatError:
- msg = _("cannot understand XML")
- raise exception.MalformedRequestBody(reason=msg)
+ node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0]
+ return {node.nodeName: self._from_xml_node(node, plurals)}
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
@@ -238,7 +232,8 @@ class XMLDeserializer(TextDeserializer):
else:
result = dict()
for attr in node.attributes.keys():
- result[attr] = node.attributes[attr].nodeValue
+ if not attr.startswith("xmlns"):
+ result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
@@ -634,7 +629,7 @@ def action_peek_json(body):
def action_peek_xml(body):
"""Determine action to invoke."""
- dom = utils.safe_minidom_parse_string(body)
+ dom = xmlutil.safe_minidom_parse_string(body)
action_node = dom.childNodes[0]
return action_node.tagName
@@ -656,11 +651,12 @@ class ResourceExceptionHandler(object):
return True
if isinstance(ex_value, exception.NotAuthorized):
- msg = unicode(ex_value)
+ msg = unicode(ex_value.message % ex_value.kwargs)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.Invalid):
+ msg = unicode(ex_value.message % ex_value.kwargs)
raise Fault(exception.ConvertedException(
- code=ex_value.code, explanation=unicode(ex_value)))
+ code=ex_value.code, explanation=msg))
# Under python 2.6, TypeError's exception value is actually a string,
# so test # here via ex_type instead:
@@ -890,17 +886,8 @@ class Resource(wsgi.Application):
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
- try:
- return self._process_stack(request, action, action_args,
- content_type, body, accept)
- except expat.ExpatError:
- msg = _("Invalid XML in request body")
- return Fault(webob.exc.HTTPBadRequest(explanation=msg))
- except LookupError as e:
- #NOTE(Vijaya Erukala): XML input such as
- # <?xml version="1.0" encoding="TF-8"?>
- # raises LookupError: unknown encoding: TF-8
- return Fault(webob.exc.HTTPBadRequest(explanation=unicode(e)))
+ return self._process_stack(request, action, action_args,
+ content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
@@ -1172,12 +1159,8 @@ class Fault(webob.exc.HTTPException):
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
- offset = explanation.find("Traceback")
- if offset is not -1:
- LOG.debug(_("API request failed, fault raised to the top of"
- " the stack. Detailed stacktrace %s") %
- explanation)
- explanation = explanation[0:offset - 1]
+ LOG.debug(_("Returning %(code)s to user: %(explanation)s"),
+ {'code': code, 'explanation': explanation})
fault_data = {
fault_name: {
diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py
index a2f5b7506..9bcce808c 100644
--- a/nova/api/openstack/xmlutil.py
+++ b/nova/api/openstack/xmlutil.py
@@ -18,7 +18,12 @@
import os.path
from lxml import etree
+from xml.dom import minidom
+from xml.parsers import expat
+from xml import sax
+from xml.sax import expatreader
+from nova import exception
from nova import utils
@@ -905,3 +910,59 @@ def make_flat_dict(name, selector=None, subselector=None, ns=None):
# Return the template
return root
+
+
+class ProtectedExpatParser(expatreader.ExpatParser):
+ """An expat parser which disables DTD's and entities by default."""
+
+ def __init__(self, forbid_dtd=True, forbid_entities=True,
+ *args, **kwargs):
+ # Python 2.x old style class
+ expatreader.ExpatParser.__init__(self, *args, **kwargs)
+ self.forbid_dtd = forbid_dtd
+ self.forbid_entities = forbid_entities
+
+ def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
+ raise ValueError("Inline DTD forbidden")
+
+ def entity_decl(self, entityName, is_parameter_entity, value, base,
+ systemId, publicId, notationName):
+ raise ValueError("<!ENTITY> entity declaration forbidden")
+
+ def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
+ # expat 1.2
+ raise ValueError("<!ENTITY> unparsed entity forbidden")
+
+ def external_entity_ref(self, context, base, systemId, publicId):
+ raise ValueError("<!ENTITY> external entity forbidden")
+
+ def notation_decl(self, name, base, sysid, pubid):
+ raise ValueError("<!ENTITY> notation forbidden")
+
+ def reset(self):
+ expatreader.ExpatParser.reset(self)
+ if self.forbid_dtd:
+ self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
+ self._parser.EndDoctypeDeclHandler = None
+ if self.forbid_entities:
+ self._parser.EntityDeclHandler = self.entity_decl
+ self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
+ self._parser.ExternalEntityRefHandler = self.external_entity_ref
+ self._parser.NotationDeclHandler = self.notation_decl
+ try:
+ self._parser.SkippedEntityHandler = None
+ except AttributeError:
+ # some pyexpat versions do not support SkippedEntity
+ pass
+
+
+def safe_minidom_parse_string(xml_string):
+ """Parse an XML string using minidom safely."""
+ try:
+ return minidom.parseString(xml_string, parser=ProtectedExpatParser())
+ except (sax.SAXParseException, ValueError,
+ expat.ExpatError, LookupError) as e:
+ #NOTE(Vijaya Erukala): XML input such as
+ # <?xml version="1.0" encoding="TF-8"?>
+ # raises LookupError: unknown encoding: TF-8
+ raise exception.MalformedRequestBody(reason=str(e))
diff --git a/nova/block_device.py b/nova/block_device.py
index 7d43d15cb..b7a9881b1 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -52,7 +52,7 @@ _ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
- return _ephemeral.match(device_name)
+ return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index ec4bc447f..c08dfe835 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -277,12 +277,11 @@ class CellsManager(manager.Manager):
if host is None:
cell_name = None
else:
- result = cells_utils.split_cell_and_item(host)
- cell_name = result[0]
- if len(result) > 1:
- host = result[1]
- else:
- host = None
+ cell_name, host = cells_utils.split_cell_and_item(host)
+ # If no cell name was given, assume that the host name is the
+ # cell_name and that the target is all hosts
+ if cell_name is None:
+ cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index f83f141dc..82f0a6a48 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -749,8 +749,8 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
# 'metadata' is only updated in the API cell, so don't overwrite
# it based on what child cells say. Make sure to update
# 'cell_name' based on the routing path.
- items_to_remove = ['id', 'security_groups', 'instance_type',
- 'volumes', 'cell_name', 'name', 'metadata']
+ items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name',
+ 'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
instance['cell_name'] = _reverse_path(message.routing_path)
diff --git a/nova/cells/utils.py b/nova/cells/utils.py
index e9560969a..7c297e341 100644
--- a/nova/cells/utils.py
+++ b/nova/cells/utils.py
@@ -56,12 +56,18 @@ def get_instances_to_sync(context, updated_since=None, project_id=None,
def cell_with_item(cell_name, item):
"""Turn cell_name and item into <cell_name>@<item>."""
+ if cell_name is None:
+ return item
return cell_name + _CELL_ITEM_SEP + str(item)
def split_cell_and_item(cell_and_item):
"""Split a combined cell@item and return them."""
- return cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
+ result = cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
+ if len(result) == 1:
+ return (None, cell_and_item)
+ else:
+ return result
def _add_cell_to_service(service, cell_name):
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 0603b929d..4abb5e886 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1031,15 +1031,13 @@ class API(base.Base):
instance,
**attrs)
- # Avoid double-counting the quota usage reduction
- # where delete is already in progress
- if (old['vm_state'] != vm_states.SOFT_DELETED and
- old['task_state'] not in (task_states.DELETING,
- task_states.SOFT_DELETING)):
- reservations = self._create_reservations(context,
- old,
- updated,
- project_id)
+ # NOTE(comstud): If we delete the instance locally, we'll
+ # commit the reservations here. Otherwise, the manager side
+ # will commit or rollback the reservations based on success.
+ reservations = self._create_reservations(context,
+ old,
+ updated,
+ project_id)
if not host:
# Just update database, nothing else we can do
@@ -1099,17 +1097,18 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.DELETE)
- cb(context, instance, bdms)
+ cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
- if reservations:
- QUOTAS.commit(context,
- reservations,
- project_id=project_id)
+ if reservations:
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
+ reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
@@ -1210,16 +1209,18 @@ class API(base.Base):
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
- def soft_delete(context, instance, bdms):
- self.compute_rpcapi.soft_delete_instance(context, instance)
+ def soft_delete(context, instance, bdms, reservations=None):
+ self.compute_rpcapi.soft_delete_instance(context, instance,
+ reservations=reservations)
self._delete(context, instance, soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
- def terminate(context, instance, bdms):
- self.compute_rpcapi.terminate_instance(context, instance, bdms)
+ def terminate(context, instance, bdms, reservations=None):
+ self.compute_rpcapi.terminate_instance(context, instance, bdms,
+ reservations=reservations)
self._delete(context, instance, terminate,
task_state=task_states.DELETING)
@@ -1848,11 +1849,12 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
- def confirm_resize(self, context, instance):
+ def confirm_resize(self, context, instance, migration_ref=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
- migration_ref = self.db.migration_get_by_instance_and_status(elevated,
- instance['uuid'], 'finished')
+ if migration_ref is None:
+ migration_ref = self.db.migration_get_by_instance_and_status(
+ elevated, instance['uuid'], 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, migration_ref)
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index d5a07490b..fe6f5dc62 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -255,9 +255,9 @@ class ComputeCellsAPI(compute_api.API):
# broadcast a message down to all cells and hope this ends
# up resolving itself... Worse case.. the instance will
# show back up again here.
- delete_type = method == 'soft_delete' and 'soft' or 'hard'
+ delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
- instance['uuid'], delete_type)
+ instance, delete_type)
@validate_cell
def restore(self, context, instance):
@@ -615,10 +615,7 @@ class HostAPI(compute_api.HostAPI):
this call to cells, as we have instance information here in
the API cell.
"""
- try:
- cell_name, host_name = cells_utils.split_cell_and_item(host_name)
- except ValueError:
- cell_name = None
+ cell_name, host_name = cells_utils.split_cell_and_item(host_name)
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 5288ba6b9..ad82d31f2 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1,4 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# vim: tabstop=6 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -65,11 +65,10 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
+from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import paths
-from nova import quota
from nova import safe_utils
-from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
from nova.virt import event as virtevent
@@ -178,8 +177,6 @@ CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
-QUOTAS = quota.QUOTAS
-
LOG = logging.getLogger(__name__)
@@ -325,7 +322,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.26'
+ RPC_API_VERSION = '2.27'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -339,7 +336,6 @@ class ComputeManager(manager.SchedulerDependentManager):
self._last_info_cache_heal = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
- self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.conductor_api = conductor.API()
self.is_quantum_security_groups = (
openstack_driver.is_quantum_security_groups())
@@ -715,6 +711,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance,
volume,
bdm['device_name'])
+ if 'serial' not in cinfo:
+ cinfo['serial'] = bdm['volume_id']
self.conductor_api.block_device_mapping_update(
context, bdm['id'],
{'connection_info': jsonutils.dumps(cinfo)})
@@ -747,6 +745,15 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
self._check_instance_exists(context, instance)
+
+ try:
+ self._start_building(context, instance)
+ except exception.InstanceNotFound:
+ LOG.info(_("Instance disappeared before we could start it"),
+ instance=instance)
+ # Quickly bail out of here
+ return
+
image_meta = self._check_image_size(context, instance)
if node is None:
@@ -759,8 +766,6 @@ class ComputeManager(manager.SchedulerDependentManager):
else:
extra_usage_info = {}
- self._start_building(context, instance)
-
self._notify_about_instance_usage(
context, instance, "create.start",
extra_usage_info=extra_usage_info)
@@ -815,7 +820,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# try to re-schedule instance:
self._reschedule_or_reraise(context, instance, exc_info,
requested_networks, admin_password, injected_files,
- is_first_time, request_spec, filter_properties)
+ is_first_time, request_spec, filter_properties, bdms)
else:
# Spawn success:
self._notify_about_instance_usage(context, instance,
@@ -833,7 +838,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _reschedule_or_reraise(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
- request_spec, filter_properties):
+ request_spec, filter_properties, bdms=None):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
@@ -844,9 +849,16 @@ class ComputeManager(manager.SchedulerDependentManager):
instance, exc_info[1], exc_info=exc_info)
try:
- self._deallocate_network(context, instance)
+ LOG.debug(_("Clean up resource before rescheduling."),
+ instance=instance)
+ if bdms is None:
+ capi = self.conductor_api
+ bdms = capi.block_device_mapping_get_all_by_instance(context,
+ instance)
+ self._shutdown_instance(context, instance, bdms)
+ self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception:
- # do not attempt retry if network de-allocation failed:
+ # do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
@@ -1225,35 +1237,63 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(vish): bdms will be deleted on instance destroy
@hooks.add_hook("delete_instance")
- def _delete_instance(self, context, instance, bdms):
- """Delete an instance on this host."""
+ def _delete_instance(self, context, instance, bdms,
+ reservations=None):
+ """Delete an instance on this host. Commit or rollback quotas
+ as necessary.
+ """
instance_uuid = instance['uuid']
- self.conductor_api.instance_info_cache_delete(context, instance)
- self._notify_about_instance_usage(context, instance, "delete.start")
- self._shutdown_instance(context, instance, bdms)
- # NOTE(vish): We have already deleted the instance, so we have
- # to ignore problems cleaning up the volumes. It would
- # be nice to let the user know somehow that the volume
- # deletion failed, but it is not acceptable to have an
- # instance that can not be deleted. Perhaps this could
- # be reworked in the future to set an instance fault
- # the first time and to only ignore the failure if the
- # instance is already in ERROR.
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
+ was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
+ if was_soft_deleted:
+ # Instances in SOFT_DELETED vm_state have already had quotas
+ # decremented.
+ try:
+ self._quota_rollback(context, reservations,
+ project_id=project_id)
+ except Exception:
+ pass
+ reservations = None
+
try:
- self._cleanup_volumes(context, instance_uuid, bdms)
- except Exception as exc:
- LOG.warn(_("Ignoring volume cleanup failure due to %s") % exc,
- instance_uuid=instance_uuid)
- # if a delete task succeed, always update vm state and task state
- # without expecting task state to be DELETING
- instance = self._instance_update(context,
- instance_uuid,
- vm_state=vm_states.DELETED,
- task_state=None,
- terminated_at=timeutils.utcnow())
- system_meta = utils.metadata_to_dict(instance['system_metadata'])
- self.conductor_api.instance_destroy(context, instance)
+ self.conductor_api.instance_info_cache_delete(context, instance)
+ self._notify_about_instance_usage(context, instance,
+ "delete.start")
+ self._shutdown_instance(context, instance, bdms)
+ # NOTE(vish): We have already deleted the instance, so we have
+ # to ignore problems cleaning up the volumes. It
+ # would be nice to let the user know somehow that
+ # the volume deletion failed, but it is not
+ # acceptable to have an instance that can not be
+ # deleted. Perhaps this could be reworked in the
+ # future to set an instance fault the first time
+ # and to only ignore the failure if the instance
+ # is already in ERROR.
+ try:
+ self._cleanup_volumes(context, instance_uuid, bdms)
+ except Exception as exc:
+ err_str = _("Ignoring volume cleanup failure due to %s")
+ LOG.warn(err_str % exc, instance=instance)
+ # if a delete task succeed, always update vm state and task
+ # state without expecting task state to be DELETING
+ instance = self._instance_update(context,
+ instance_uuid,
+ vm_state=vm_states.DELETED,
+ task_state=None,
+ terminated_at=timeutils.utcnow())
+ system_meta = utils.metadata_to_dict(instance['system_metadata'])
+ self.conductor_api.instance_destroy(context, instance)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._quota_rollback(context, reservations,
+ project_id=project_id)
+ self._quota_commit(context, reservations, project_id=project_id)
# ensure block device mappings are not leaked
self.conductor_api.block_device_mapping_destroy(context, bdms)
@@ -1267,7 +1307,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_event
@wrap_instance_fault
- def terminate_instance(self, context, instance, bdms=None):
+ def terminate_instance(self, context, instance, bdms=None,
+ reservations=None):
"""Terminate an instance on this host."""
# Note(eglynn): we do not decorate this action with reverts_task_state
# because a failure during termination should leave the task state as
@@ -1275,7 +1316,6 @@ class ComputeManager(manager.SchedulerDependentManager):
# attempt should not result in a further decrement of the quota_usages
# in_use count (see bug 1046236).
- elevated = context.elevated()
# NOTE(danms): remove this compatibility in the future
if not bdms:
bdms = self._get_instance_volume_bdms(context, instance)
@@ -1283,7 +1323,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_terminate_instance(instance, bdms):
try:
- self._delete_instance(context, instance, bdms)
+ self._delete_instance(context, instance, bdms,
+ reservations=reservations)
except exception.InstanceTerminationFailure as error:
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance=instance)
@@ -1337,22 +1378,34 @@ class ComputeManager(manager.SchedulerDependentManager):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
- def soft_delete_instance(self, context, instance):
+ def soft_delete_instance(self, context, instance, reservations=None):
"""Soft delete an instance on this host."""
- self._notify_about_instance_usage(context, instance,
- "soft_delete.start")
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
try:
- self.driver.soft_delete(instance)
- except NotImplementedError:
- # Fallback to just powering off the instance if the hypervisor
- # doesn't implement the soft_delete method
- self.driver.power_off(instance)
- current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SOFT_DELETED,
- expected_task_state=task_states.SOFT_DELETING,
- task_state=None)
+ self._notify_about_instance_usage(context, instance,
+ "soft_delete.start")
+ try:
+ self.driver.soft_delete(instance)
+ except NotImplementedError:
+ # Fallback to just powering off the instance if the
+ # hypervisor doesn't implement the soft_delete method
+ self.driver.power_off(instance)
+ current_power_state = self._get_power_state(context, instance)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._quota_rollback(context, reservations,
+ project_id=project_id)
+ self._quota_commit(context, reservations, project_id=project_id)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1544,6 +1597,32 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info=network_info,
extra_usage_info=extra_usage_info)
+ def _handle_bad_volumes_detached(self, context, instance, bad_devices,
+ block_device_info):
+ """Handle cases where the virt-layer had to detach non-working volumes
+ in order to complete an operation.
+ """
+ for bdm in block_device_info['block_device_mapping']:
+ if bdm.get('mount_device') in bad_devices:
+ try:
+ volume_id = bdm['connection_info']['data']['volume_id']
+ except KeyError:
+ continue
+
+ # NOTE(sirp): ideally we'd just call
+ # `compute_api.detach_volume` here but since that hits the
+ # DB directly, that's off limits from within the
+ # compute-manager.
+ #
+ # API-detach
+ LOG.info(_("Detaching from volume api: %s") % volume_id)
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_detach(context, volume)
+ self.volume_api.begin_detaching(context, volume)
+
+ # Manager-detach
+ self.detach_volume(context, volume_id, instance)
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_event
@@ -1578,10 +1657,16 @@ class ComputeManager(manager.SchedulerDependentManager):
'expected: %(running)s)') % locals(),
context=context, instance=instance)
+ def bad_volumes_callback(bad_devices):
+ self._handle_bad_volumes_detached(
+ context, instance, bad_devices, block_device_info)
+
try:
self.driver.reboot(context, instance,
self._legacy_nw_info(network_info),
- reboot_type, block_device_info)
+ reboot_type,
+ block_device_info=block_device_info,
+ bad_volumes_callback=bad_volumes_callback)
except Exception, exc:
LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
context=context, instance=instance)
@@ -2080,13 +2165,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self._quota_commit(context, reservations)
- def _quota_commit(self, context, reservations):
+ def _quota_commit(self, context, reservations, project_id=None):
if reservations:
- self.conductor_api.quota_commit(context, reservations)
+ self.conductor_api.quota_commit(context, reservations,
+ project_id=project_id)
- def _quota_rollback(self, context, reservations):
+ def _quota_rollback(self, context, reservations, project_id=None):
if reservations:
- self.conductor_api.quota_rollback(context, reservations)
+ self.conductor_api.quota_rollback(context, reservations,
+ project_id=project_id)
def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
@@ -2579,6 +2666,8 @@ class ComputeManager(manager.SchedulerDependentManager):
else:
return '\n'.join(log.split('\n')[-int(length):])
+ @rpc_common.client_exceptions(exception.ConsoleTypeInvalid,
+ exception.InstanceNotReady, exception.InstanceNotFound)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
@@ -2599,14 +2688,21 @@ class ComputeManager(manager.SchedulerDependentManager):
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
- # Retrieve connect info from driver, and then decorate with our
- # access info token
- connect_info = self.driver.get_vnc_console(instance)
- connect_info['token'] = token
- connect_info['access_url'] = access_url
+ try:
+ # Retrieve connect info from driver, and then decorate with our
+ # access info token
+ connect_info = self.driver.get_vnc_console(instance)
+ connect_info['token'] = token
+ connect_info['access_url'] = access_url
+ except exception.InstanceNotFound:
+ if instance['vm_state'] != vm_states.BUILDING:
+ raise
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
+ @rpc_common.client_exceptions(exception.ConsoleTypeInvalid,
+ exception.InstanceNotReady, exception.InstanceNotFound)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
@@ -2626,14 +2722,21 @@ class ComputeManager(manager.SchedulerDependentManager):
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
- # Retrieve connect info from driver, and then decorate with our
- # access info token
- connect_info = self.driver.get_spice_console(instance)
- connect_info['token'] = token
- connect_info['access_url'] = access_url
+ try:
+ # Retrieve connect info from driver, and then decorate with our
+ # access info token
+ connect_info = self.driver.get_spice_console(instance)
+ connect_info['token'] = token
+ connect_info['access_url'] = access_url
+ except exception.InstanceNotFound:
+ if instance['vm_state'] != vm_states.BUILDING:
+ raise
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
+ @rpc_common.client_exceptions(exception.ConsoleTypeInvalid,
+ exception.InstanceNotReady, exception.InstanceNotFound)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
@@ -3322,7 +3425,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance=instance)
continue
try:
- self.compute_api.confirm_resize(context, instance)
+ self.compute_api.confirm_resize(context, instance,
+ migration_ref=migration)
except Exception, e:
msg = _("Error auto-confirming resize: %(e)s. "
"Will retry later.")
@@ -3709,6 +3813,9 @@ class ComputeManager(manager.SchedulerDependentManager):
bdms = capi.block_device_mapping_get_all_by_instance(
context, instance)
LOG.info(_('Reclaiming deleted instance'), instance=instance)
+ # NOTE(comstud): Quotas were already accounted for when
+ # the instance was soft deleted, so there's no need to
+ # pass reservations here.
self._delete_instance(context, instance, bdms)
@manager.periodic_task
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 914c45471..62c1ed9a0 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -163,6 +163,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.25 - Add attach_interface() and detach_interface()
2.26 - Add validate_console_token to ensure the service connects to
vnc on the correct port
+ 2.27 - Adds 'reservations' to terminate_instance() and
+ soft_delete_instance()
'''
#
@@ -588,13 +590,14 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def terminate_instance(self, ctxt, instance, bdms):
+ def terminate_instance(self, ctxt, instance, bdms, reservations=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('terminate_instance',
- instance=instance_p, bdms=bdms_p),
+ instance=instance_p, bdms=bdms_p,
+ reservations=reservations),
topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.4')
+ version='2.27')
def unpause_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
@@ -615,11 +618,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
- def soft_delete_instance(self, ctxt, instance):
+ def soft_delete_instance(self, ctxt, instance, reservations=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('soft_delete_instance',
- instance=instance_p),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p, reservations=reservations),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='2.27')
def restore_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 0b46c3d2f..a8a6e9f53 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -323,11 +323,23 @@ class LocalAPI(object):
instance,
migration)
- def quota_commit(self, context, reservations):
- return self._manager.quota_commit(context, reservations)
-
- def quota_rollback(self, context, reservations):
- return self._manager.quota_rollback(context, reservations)
+ def quota_commit(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota commit call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self._manager.quota_commit(context,
+ reservations=reservations)
+
+ def quota_rollback(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota rollback call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self._manager.quota_rollback(context,
+ reservations=reservations)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
@@ -656,11 +668,21 @@ class API(object):
instance,
migration)
- def quota_commit(self, context, reservations):
- return self.conductor_rpcapi.quota_commit(context, reservations)
-
- def quota_rollback(self, context, reservations):
- return self.conductor_rpcapi.quota_rollback(context, reservations)
+ def quota_commit(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota commit call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self.conductor_rpcapi.quota_commit(context, reservations)
+
+ def quota_rollback(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota rollback call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self.conductor_rpcapi.quota_rollback(context, reservations)
def get_ec2_ids(self, context, instance):
return self.conductor_rpcapi.get_ec2_ids(context, instance)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 2a0853491..5acd7b678 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -46,14 +46,13 @@ allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
-class ConductorManager(manager.SchedulerDependentManager):
+class ConductorManager(manager.Manager):
"""Mission: TBD."""
RPC_API_VERSION = '1.44'
def __init__(self, *args, **kwargs):
- super(ConductorManager, self).__init__(service_name='conductor',
- *args, **kwargs)
+ super(ConductorManager, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
@@ -306,7 +305,8 @@ class ConductorManager(manager.SchedulerDependentManager):
wr_bytes, instance['uuid'], last_refreshed,
update_totals)
- @rpc_common.client_exceptions(exception.HostBinaryNotFound)
+ @rpc_common.client_exceptions(exception.ComputeHostNotFound,
+ exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
diff --git a/nova/config.py b/nova/config.py
index ff6681b44..636045ecd 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -22,6 +22,7 @@ from oslo.config import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import rpc
from nova import paths
+from nova import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
@@ -32,4 +33,5 @@ def parse_args(argv, default_config_files=None):
rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:],
project='nova',
+ version=version.version_string(),
default_config_files=default_config_files)
diff --git a/nova/db/api.py b/nova/db/api.py
index eac31bee5..ae7b913cf 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -510,6 +510,12 @@ def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
+
+def fixed_ip_count_by_project(context, project_id, session=None):
+ """Count fixed ips used by project."""
+ return IMPL.fixed_ip_count_by_project(context, project_id,
+ session=session)
+
####################
diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py
index 747015af5..800f11071 100644
--- a/nova/db/sqlalchemy/__init__.py
+++ b/nova/db/sqlalchemy/__init__.py
@@ -15,3 +15,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+from sqlalchemy import BigInteger
+from sqlalchemy.ext.compiler import compiles
+
+
+@compiles(BigInteger, 'sqlite')
+def compile_big_int_sqlite(type_, compiler, **kw):
+ return 'INTEGER'
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 0bd9cfce7..bc88e19a1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -121,7 +121,7 @@ def require_instance_exists_using_uuid(f):
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
- db.instance_get_by_uuid(context, instance_uuid)
+ instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
@@ -136,7 +136,7 @@ def require_aggregate_exists(f):
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
- db.aggregate_get(context, aggregate_id)
+ aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
@@ -530,6 +530,11 @@ def compute_node_update(context, compute_id, values, prune_stats=False):
with session.begin():
_update_stats(context, stats, compute_id, session, prune_stats)
compute_ref = _compute_node_get(context, compute_id, session=session)
+ # Always update this, even if there's going to be no other
+ # changes in data. This ensures that we invalidate the
+ # scheduler cache of compute node data in case of races.
+ if 'updated_at' not in values:
+ values['updated_at'] = timeutils.utcnow()
convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at')
compute_ref.update(values)
return compute_ref
@@ -1164,14 +1169,15 @@ def fixed_ip_get_by_address_detailed(context, address, session=None):
if not session:
session = get_session()
- result = session.query(models.FixedIp, models.Network, models.Instance).\
- filter_by(address=address).\
- outerjoin((models.Network,
- models.Network.id ==
- models.FixedIp.network_id)).\
- outerjoin((models.Instance,
- models.Instance.uuid ==
- models.FixedIp.instance_uuid)).\
+ result = model_query(context, models.FixedIp, models.Network,
+ models.Instance, session=session).\
+ filter_by(address=address).\
+ outerjoin((models.Network,
+ models.Network.id ==
+ models.FixedIp.network_id)).\
+ outerjoin((models.Instance,
+ models.Instance.uuid ==
+ models.FixedIp.instance_uuid)).\
first()
if not result:
@@ -1239,6 +1245,18 @@ def fixed_ip_update(context, address, values):
fixed_ip_ref.save(session=session)
+@require_context
+def fixed_ip_count_by_project(context, project_id, session=None):
+ nova.context.authorize_project_context(context, project_id)
+ return model_query(context, models.FixedIp.id,
+ base_model=models.FixedIp, read_deleted="no",
+ session=session).\
+ join((models.Instance,
+ models.Instance.uuid == models.FixedIp.instance_uuid)).\
+ filter(models.Instance.uuid == project_id).\
+ count()
+
+
###################
@@ -1429,12 +1447,9 @@ def instance_create(context, values):
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
instance_ref.save(session=session)
- # NOTE(comstud): This forces instance_type to be loaded so it
- # exists in the ref when we return. Fixes lazy loading issues.
- instance_ref.instance_type
# create the instance uuid to ec2_id mapping entry for instance
- db.ec2_instance_create(context, instance_ref['uuid'])
+ ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
@@ -1473,10 +1488,12 @@ def instance_destroy(context, instance_uuid, constraint=None):
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
-
session.query(models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
return instance_ref
@@ -1521,7 +1538,6 @@ def _build_instance_get(context, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
options(joinedload('system_metadata'))
@@ -1529,7 +1545,7 @@ def _build_instance_get(context, session=None):
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups', 'metadata',
- 'instance_type', 'system_metadata']
+ 'system_metadata']
query = model_query(context, models.Instance)
for column in columns_to_join:
query = query.options(joinedload(column))
@@ -1559,7 +1575,6 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
options(joinedload('security_groups')).\
options(joinedload('system_metadata')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
# Make a copy of the filters dictionary to use going forward, as we'll
@@ -1658,7 +1673,6 @@ def instance_get_active_by_window_joined(context, begin, end=None,
query = query.options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
options(joinedload('system_metadata')).\
filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
@@ -1678,7 +1692,6 @@ def _instance_get_all_query(context, project_only=False):
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
options(joinedload('system_metadata'))
@@ -1847,13 +1860,6 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
instance_ref.update(values)
instance_ref.save(session=session)
- if 'instance_type_id' in values:
- # NOTE(comstud): It appears that sqlalchemy doesn't refresh
- # the instance_type model after you update the ID. You end
- # up with an instance_type model that only has 'id' updated,
- # but the rest of the model has the data from the old
- # instance_type.
- session.refresh(instance_ref['instance_type'])
return (old_instance_ref, instance_ref)
@@ -2830,19 +2836,14 @@ def _block_device_mapping_get_query(context, session=None):
def block_device_mapping_create(context, values):
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
-
- session = get_session()
- with session.begin():
- bdm_ref.save(session=session)
+ bdm_ref.save()
@require_context
def block_device_mapping_update(context, bdm_id, values):
- session = get_session()
- with session.begin():
- _block_device_mapping_get_query(context, session=session).\
- filter_by(id=bdm_id).\
- update(values)
+ _block_device_mapping_get_query(context).\
+ filter_by(id=bdm_id).\
+ update(values)
@require_context
@@ -2865,7 +2866,8 @@ def block_device_mapping_update_or_create(context, values):
virtual_name = values['virtual_name']
if (virtual_name is not None and
block_device.is_swap_or_ephemeral(virtual_name)):
- session.query(models.BlockDeviceMapping).\
+
+ _block_device_mapping_get_query(context, session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(virtual_name=virtual_name).\
filter(models.BlockDeviceMapping.device_name !=
@@ -2882,19 +2884,15 @@ def block_device_mapping_get_all_by_instance(context, instance_uuid):
@require_context
def block_device_mapping_destroy(context, bdm_id):
- session = get_session()
- with session.begin():
- session.query(models.BlockDeviceMapping).\
- filter_by(id=bdm_id).\
- soft_delete()
+ _block_device_mapping_get_query(context).\
+ filter_by(id=bdm_id).\
+ soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
- session = get_session()
- with session.begin():
- _block_device_mapping_get_query(context, session=session).\
+ _block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@@ -2903,9 +2901,7 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
- session = get_session()
- with session.begin():
- _block_device_mapping_get_query(context, session=session).\
+ _block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
@@ -3342,7 +3338,7 @@ def migration_get_in_progress_by_host_and_node(context, host, node,
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted'])).\
- options(joinedload('instance')).\
+ options(joinedload_all('instance.system_metadata')).\
all()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py
index 20e75a6eb..36545b435 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py
@@ -38,8 +38,9 @@ def upgrade(migrate_engine):
i = sys_meta.insert()
for values in q.execute():
for index in range(0, len(instance_type_props)):
+ value = values[index + 1]
i.execute({"key": "instance_type_%s" % instance_type_props[index],
- "value": str(values[index + 1]),
+ "value": None if value is None else str(value),
"instance_uuid": values[0]})
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py b/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py
new file mode 100644
index 000000000..bd8f22a97
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py
@@ -0,0 +1,43 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, Table
+from nova.openstack.common import timeutils
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ sys_meta = Table('instance_system_metadata', meta, autoload=True)
+
+ sys_meta.update().\
+ values(value=None).\
+ where(sys_meta.c.key != 'instance_type_name').\
+ where(sys_meta.c.key != 'instance_type_flavorid').\
+ where(sys_meta.c.key.like('instance_type_%')).\
+ where(sys_meta.c.value == 'None').\
+ execute()
+
+ now = timeutils.utcnow()
+ sys_meta.update().\
+ values(created_at=now).\
+ where(sys_meta.c.created_at == None).\
+ where(sys_meta.c.key.like('instance_type_%')).\
+ execute()
+
+
+def downgrade(migration_engine):
+ # This migration only touches data, and only metadata at that. No need
+ # to go through and delete old metadata items.
+ pass
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index a675357df..ce5f84578 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -154,7 +154,7 @@ class Instance(BASE, NovaBase):
return base_name
def _extra_keys(self):
- return ['name', 'system_metadata']
+ return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
@@ -273,13 +273,6 @@ class InstanceTypes(BASE, NovaBase):
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
- instances = relationship(Instance,
- backref=backref('instance_type', uselist=False),
- foreign_keys=id,
- primaryjoin='and_('
- 'Instance.instance_type_id == '
- 'InstanceTypes.id)')
-
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
diff --git a/nova/exception.py b/nova/exception.py
index 046df24c9..cfc237120 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -388,6 +388,7 @@ class InvalidDevicePath(Invalid):
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
+ code = 409
class DeviceIsBusy(Invalid):
@@ -1008,6 +1009,10 @@ class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
+class FixedIpLimitExceeded(QuotaError):
+ message = _("Maximum number of fixed ips exceeded")
+
+
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 31384b75a..0221e288a 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova jenkins.nova.propose.translation.update.183\n"
+"Project-Id-Version: nova jenkins.nova.propose.translation.update.187\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2013-03-11 00:03+0000\n"
+"POT-Creation-Date: 2013-03-15 00:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -17,7 +17,7 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 0.9.6\n"
-#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:98
+#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:91
#, python-format
msgid "block_device_list %s"
msgstr ""
@@ -175,7 +175,7 @@ msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
#: nova/exception.py:242 nova/api/ec2/cloud.py:463
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2728
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2733
msgid "Keypair data is invalid"
msgstr ""
@@ -191,7 +191,7 @@ msgstr ""
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:258 nova/api/openstack/compute/servers.py:1327
+#: nova/exception.py:258 nova/api/openstack/compute/servers.py:1328
#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
@@ -346,28 +346,28 @@ msgstr ""
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:394
+#: nova/exception.py:395
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:398
+#: nova/exception.py:399
msgid "Unacceptable CPU info"
msgstr ""
-#: nova/exception.py:402
+#: nova/exception.py:403
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:406
+#: nova/exception.py:407
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:412
+#: nova/exception.py:413
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -375,90 +375,90 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:419
+#: nova/exception.py:420
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:423
+#: nova/exception.py:424
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:427
+#: nova/exception.py:428
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:431
+#: nova/exception.py:432
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:435
+#: nova/exception.py:436
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:439
+#: nova/exception.py:440
#, python-format
msgid "Invalid ID received %(id)s."
msgstr ""
-#: nova/exception.py:443
+#: nova/exception.py:444
#, python-format
msgid "Unexpected argument for periodic task creation: %(arg)s."
msgstr ""
-#: nova/exception.py:447
+#: nova/exception.py:448
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:452
+#: nova/exception.py:453
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:457
+#: nova/exception.py:458
#, python-format
msgid "No agent-build associated with id %(id)s."
msgstr ""
-#: nova/exception.py:461
+#: nova/exception.py:462
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:465
+#: nova/exception.py:466
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:469
+#: nova/exception.py:470
#, python-format
msgid "No target id found for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:473
+#: nova/exception.py:474
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:477
+#: nova/exception.py:478
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:481
+#: nova/exception.py:482
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:485
+#: nova/exception.py:486
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:489
+#: nova/exception.py:490
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -466,812 +466,812 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:496
+#: nova/exception.py:497
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:500
+#: nova/exception.py:501
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:504
+#: nova/exception.py:505
#, python-format
msgid "Network %(network_id)s is duplicated."
msgstr ""
-#: nova/exception.py:508
+#: nova/exception.py:509
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:512
+#: nova/exception.py:513
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:516
+#: nova/exception.py:517
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:520
+#: nova/exception.py:521
#, python-format
msgid "Port id %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:524
+#: nova/exception.py:525
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:528
+#: nova/exception.py:529
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:532
+#: nova/exception.py:533
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:536
+#: nova/exception.py:537
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:540
+#: nova/exception.py:541
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:544
+#: nova/exception.py:545
#, python-format
msgid ""
"Either Network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:549
+#: nova/exception.py:550
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:553
+#: nova/exception.py:554
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:557
+#: nova/exception.py:558
#, python-format
msgid "Port %(port_id)s not usable for instance %(instance)s."
msgstr ""
-#: nova/exception.py:561
+#: nova/exception.py:562
#, python-format
msgid "No free port available for instance %(instance)s."
msgstr ""
-#: nova/exception.py:565
+#: nova/exception.py:566
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:569
+#: nova/exception.py:570
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:573
+#: nova/exception.py:574
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:577
+#: nova/exception.py:578
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:582
+#: nova/exception.py:583
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:586
+#: nova/exception.py:587
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:591
+#: nova/exception.py:592
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:596
+#: nova/exception.py:597
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:601
+#: nova/exception.py:602
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:605
+#: nova/exception.py:606
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:609
+#: nova/exception.py:610
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:618
+#: nova/exception.py:619
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:622
+#: nova/exception.py:623
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:626
+#: nova/exception.py:627
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:630
+#: nova/exception.py:631
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:634
+#: nova/exception.py:635
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:638
+#: nova/exception.py:639
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:642
+#: nova/exception.py:643
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:647
+#: nova/exception.py:648
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:652
+#: nova/exception.py:653
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:656
+#: nova/exception.py:657
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:660
+#: nova/exception.py:661
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:664
+#: nova/exception.py:665
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:668
+#: nova/exception.py:669
msgid "Cannot disassociate auto assigined floating ip"
msgstr ""
-#: nova/exception.py:672
+#: nova/exception.py:673
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:676
+#: nova/exception.py:677
#, python-format
msgid "Certificate %(certificate_id)s not found."
msgstr ""
-#: nova/exception.py:680
+#: nova/exception.py:681
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:684
+#: nova/exception.py:685
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:688
+#: nova/exception.py:689
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:692
+#: nova/exception.py:693
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:696
+#: nova/exception.py:697
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:700
+#: nova/exception.py:701
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:705
+#: nova/exception.py:706
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:709
+#: nova/exception.py:710
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:713
+#: nova/exception.py:714
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:717
+#: nova/exception.py:718
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:721
+#: nova/exception.py:722
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:725
+#: nova/exception.py:726
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:729
+#: nova/exception.py:730
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:733
+#: nova/exception.py:734
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:737
+#: nova/exception.py:738
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:742
+#: nova/exception.py:743
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:746
+#: nova/exception.py:747
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:751
+#: nova/exception.py:752
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:756
+#: nova/exception.py:757
#, python-format
msgid "Security group default rule (%rule_id)s not found."
msgstr ""
-#: nova/exception.py:760
+#: nova/exception.py:761
msgid ""
"Network requires port_security_enabled and subnet associated in order to "
"apply security groups."
msgstr ""
-#: nova/exception.py:765
+#: nova/exception.py:766
msgid "No Unique Match Found."
msgstr ""
-#: nova/exception.py:770
+#: nova/exception.py:771
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:774
+#: nova/exception.py:775
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:779
+#: nova/exception.py:780
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:783
+#: nova/exception.py:784
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:789
+#: nova/exception.py:790
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:793
+#: nova/exception.py:794
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:797
+#: nova/exception.py:798
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:802
+#: nova/exception.py:803
#, python-format
msgid "Invalid console type %(console_type)s"
msgstr ""
-#: nova/exception.py:806
+#: nova/exception.py:807
#, python-format
msgid "Instance type %(instance_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:811
#, python-format
msgid "Instance type with name %(instance_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:815
+#: nova/exception.py:816
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:820
#, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
-#: nova/exception.py:824
+#: nova/exception.py:825
#, python-format
msgid "Cell %(cell_name)s doesn't exist."
msgstr ""
-#: nova/exception.py:828
+#: nova/exception.py:829
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
-#: nova/exception.py:832
+#: nova/exception.py:833
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr ""
-#: nova/exception.py:836
+#: nova/exception.py:837
msgid "Timeout waiting for response from cell"
msgstr ""
-#: nova/exception.py:840
+#: nova/exception.py:841
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
-#: nova/exception.py:844
+#: nova/exception.py:845
msgid "No cells available matching scheduling criteria."
msgstr ""
-#: nova/exception.py:848
+#: nova/exception.py:849
#, python-format
msgid "Exception received during cell processing: %(exc_name)s."
msgstr ""
-#: nova/exception.py:852
+#: nova/exception.py:853
#, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:856
+#: nova/exception.py:857
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:860
+#: nova/exception.py:861
#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:865
+#: nova/exception.py:866
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:869
+#: nova/exception.py:870
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:874
+#: nova/exception.py:875
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:879
+#: nova/exception.py:880
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:884
+#: nova/exception.py:885
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:888
+#: nova/exception.py:889
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:892
+#: nova/exception.py:893
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:897
+#: nova/exception.py:898
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:901
+#: nova/exception.py:902
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:905
+#: nova/exception.py:906
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:909
+#: nova/exception.py:910
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:913
+#: nova/exception.py:914
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:917
+#: nova/exception.py:918
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:921
+#: nova/exception.py:922
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:925
+#: nova/exception.py:926
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:929
+#: nova/exception.py:930
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:933
+#: nova/exception.py:934
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:938
+#: nova/exception.py:939
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:942
+#: nova/exception.py:943
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:946
+#: nova/exception.py:947
msgid "Migration error"
msgstr ""
-#: nova/exception.py:950
+#: nova/exception.py:951
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:956
+#: nova/exception.py:957
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:960
+#: nova/exception.py:961
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:964
+#: nova/exception.py:965
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:968
+#: nova/exception.py:969
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:972
+#: nova/exception.py:973
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:976
+#: nova/exception.py:977
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:980
+#: nova/exception.py:981
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:984
+#: nova/exception.py:985
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:988
+#: nova/exception.py:989
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:992
+#: nova/exception.py:993
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:996
+#: nova/exception.py:997
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:1003
+#: nova/exception.py:1004
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:1008
+#: nova/exception.py:1009
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:1012
+#: nova/exception.py:1013
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:1016
+#: nova/exception.py:1017
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:1020
+#: nova/exception.py:1021
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:1024
+#: nova/exception.py:1025
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:1028
+#: nova/exception.py:1029
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:1032
+#: nova/exception.py:1033
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1036
+#: nova/exception.py:1037
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1041
+#: nova/exception.py:1042
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1045
+#: nova/exception.py:1046
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1049
+#: nova/exception.py:1050
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1053
+#: nova/exception.py:1054
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1058
+#: nova/exception.py:1059
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1062
+#: nova/exception.py:1063
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:1066
+#: nova/exception.py:1067
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1072
+#: nova/exception.py:1073
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1076
+#: nova/exception.py:1077
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1080
+#: nova/exception.py:1081
#, python-format
msgid "Info cache for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1085
+#: nova/exception.py:1086
#, python-format
msgid "Node %(node_id)s could not be found."
msgstr ""
-#: nova/exception.py:1089
+#: nova/exception.py:1090
#, python-format
msgid "Node with UUID %(node_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1093
+#: nova/exception.py:1094
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1097
+#: nova/exception.py:1098
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1101
+#: nova/exception.py:1102
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1105
+#: nova/exception.py:1106
#, python-format
msgid "Could not upload image %(image_id)s"
msgstr ""
-#: nova/exception.py:1109
+#: nova/exception.py:1110
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1113
+#: nova/exception.py:1114
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1117
+#: nova/exception.py:1118
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1121
+#: nova/exception.py:1122
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1126
+#: nova/exception.py:1127
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1131
+#: nova/exception.py:1132
#, python-format
msgid "Failed to attach network adapter device to %(instance)s"
msgstr ""
-#: nova/exception.py:1135
+#: nova/exception.py:1136
#, python-format
msgid "Failed to detach network adapter device from %(instance)s"
msgstr ""
-#: nova/exception.py:1139
+#: nova/exception.py:1140
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1145
+#: nova/exception.py:1146
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1149
+#: nova/exception.py:1150
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1154
+#: nova/exception.py:1155
#, python-format
msgid ""
"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
"found"
msgstr ""
-#: nova/exception.py:1159
+#: nova/exception.py:1160
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr ""
-#: nova/exception.py:1163
+#: nova/exception.py:1164
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1167
+#: nova/exception.py:1168
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1171
+#: nova/exception.py:1172
msgid "Instance recreate is not implemented by this virt driver."
msgstr ""
-#: nova/exception.py:1175
+#: nova/exception.py:1176
#, python-format
msgid "The service from servicegroup driver %(driver) is temporarily unavailable."
msgstr ""
-#: nova/exception.py:1180
+#: nova/exception.py:1181
#, python-format
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
-#: nova/exception.py:1185
+#: nova/exception.py:1186
#, python-format
msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
msgstr ""
-#: nova/exception.py:1190
+#: nova/exception.py:1191
#, python-format
msgid ""
"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
@@ -1328,27 +1328,27 @@ msgstr ""
msgid "Rule checked when requested rule is not found"
msgstr ""
-#: nova/quota.py:944
+#: nova/quota.py:962
#, python-format
msgid "Created reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:966
+#: nova/quota.py:984
#, python-format
msgid "Failed to commit reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:968
+#: nova/quota.py:986
#, python-format
msgid "Committed reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:988
+#: nova/quota.py:1006
#, python-format
msgid "Failed to roll back reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:990
+#: nova/quota.py:1008
#, python-format
msgid "Rolled back reservations %(reservations)s"
msgstr ""
@@ -1442,114 +1442,114 @@ msgstr ""
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:201 nova/openstack/common/processutils.py:90
+#: nova/utils.py:197 nova/openstack/common/processutils.py:90
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:212 nova/openstack/common/processutils.py:99
+#: nova/utils.py:208 nova/openstack/common/processutils.py:99
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:236 nova/utils.py:314
+#: nova/utils.py:232 nova/utils.py:310
#: nova/openstack/common/processutils.py:114 nova/virt/powervm/common.py:88
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:249 nova/openstack/common/processutils.py:128
+#: nova/utils.py:245 nova/openstack/common/processutils.py:128
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:289
+#: nova/utils.py:285
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:291
+#: nova/utils.py:287
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:295
+#: nova/utils.py:291
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:330
+#: nova/utils.py:326
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:492
+#: nova/utils.py:488
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:495
+#: nova/utils.py:491
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:534
+#: nova/utils.py:530
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:606
+#: nova/utils.py:602
msgid "in fixed duration looping call"
msgstr ""
-#: nova/utils.py:641
+#: nova/utils.py:637
#, python-format
msgid "Periodic task processor sleeping for %.02f seconds"
msgstr ""
-#: nova/utils.py:648
+#: nova/utils.py:644
msgid "in dynamic looping call"
msgstr ""
-#: nova/utils.py:756
+#: nova/utils.py:698
#, python-format
msgid "Unknown byte multiplier: %s"
msgstr ""
-#: nova/utils.py:885
+#: nova/utils.py:827
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:914
+#: nova/utils.py:856
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:1070
+#: nova/utils.py:1012
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:1136
+#: nova/utils.py:1078
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1268 nova/virt/configdrive.py:177
+#: nova/utils.py:1210 nova/virt/configdrive.py:177
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: nova/utils.py:1420
+#: nova/utils.py:1362
#, python-format
msgid "%s is not a string or unicode"
msgstr ""
-#: nova/utils.py:1424
+#: nova/utils.py:1366
#, python-format
msgid "%(name)s has less than %(min_length)s characters."
msgstr ""
-#: nova/utils.py:1429
+#: nova/utils.py:1371
#, python-format
msgid "%(name)s has more than %(max_length)s characters."
msgstr ""
@@ -2070,70 +2070,70 @@ msgstr ""
msgid "Extension %(ext_name)s extending resource: %(collection)s"
msgstr ""
-#: nova/api/openstack/common.py:114
+#: nova/api/openstack/common.py:113
#, python-format
msgid ""
"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
"Bad upgrade or db corrupted?"
msgstr ""
-#: nova/api/openstack/common.py:153 nova/api/openstack/common.py:187
+#: nova/api/openstack/common.py:152 nova/api/openstack/common.py:186
msgid "limit param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:156 nova/api/openstack/common.py:191
+#: nova/api/openstack/common.py:155 nova/api/openstack/common.py:190
msgid "limit param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:181
+#: nova/api/openstack/common.py:180
msgid "offset param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:195
+#: nova/api/openstack/common.py:194
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:545
+#: nova/api/openstack/common.py:229 nova/api/openstack/compute/servers.py:545
#, python-format
msgid "marker [%s] not found"
msgstr ""
-#: nova/api/openstack/common.py:270
+#: nova/api/openstack/common.py:269
#, python-format
msgid "href %s does not contain version"
msgstr ""
-#: nova/api/openstack/common.py:285
+#: nova/api/openstack/common.py:284
msgid "Image metadata limit exceeded"
msgstr ""
-#: nova/api/openstack/common.py:293
+#: nova/api/openstack/common.py:292
msgid "Image metadata key cannot be blank"
msgstr ""
-#: nova/api/openstack/common.py:296
+#: nova/api/openstack/common.py:295
msgid "Image metadata key too long"
msgstr ""
-#: nova/api/openstack/common.py:299
+#: nova/api/openstack/common.py:298
msgid "Invalid image metadata"
msgstr ""
-#: nova/api/openstack/common.py:350
+#: nova/api/openstack/common.py:349
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr ""
-#: nova/api/openstack/common.py:353
+#: nova/api/openstack/common.py:352
#, python-format
msgid "Instance is in an invalid state for '%(action)s'"
msgstr ""
-#: nova/api/openstack/common.py:433
+#: nova/api/openstack/common.py:432
msgid "Rejecting snapshot request, snapshots currently disabled"
msgstr ""
-#: nova/api/openstack/common.py:435
+#: nova/api/openstack/common.py:434
msgid "Instance snapshots are not permitted at this time."
msgstr ""
@@ -2197,57 +2197,47 @@ msgstr ""
msgid "Failed to load extension %(ext_name)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:200 nova/api/openstack/wsgi.py:622
+#: nova/api/openstack/wsgi.py:199 nova/api/openstack/wsgi.py:617
msgid "cannot understand JSON"
msgstr ""
-#: nova/api/openstack/wsgi.py:224
-#: nova/api/openstack/compute/contrib/cells.py:104
-#: nova/api/openstack/compute/contrib/hosts.py:77
-msgid "cannot understand XML"
-msgstr ""
-
-#: nova/api/openstack/wsgi.py:627
+#: nova/api/openstack/wsgi.py:622
msgid "too many body keys"
msgstr ""
-#: nova/api/openstack/wsgi.py:670
+#: nova/api/openstack/wsgi.py:666
#, python-format
msgid "Exception handling resource: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:674
+#: nova/api/openstack/wsgi.py:670
#, python-format
msgid "Fault thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:677
+#: nova/api/openstack/wsgi.py:673
#, python-format
msgid "HTTP exception thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:791
+#: nova/api/openstack/wsgi.py:787
msgid "Unrecognized Content-Type provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:795
+#: nova/api/openstack/wsgi.py:791
msgid "No Content-Type provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:799
+#: nova/api/openstack/wsgi.py:795
msgid "Empty body provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:897
-msgid "Invalid XML in request body"
-msgstr ""
-
-#: nova/api/openstack/wsgi.py:916
+#: nova/api/openstack/wsgi.py:903
#, python-format
msgid "There is no such action: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:919 nova/api/openstack/wsgi.py:936
+#: nova/api/openstack/wsgi.py:906 nova/api/openstack/wsgi.py:923
#: nova/api/openstack/compute/server_metadata.py:58
#: nova/api/openstack/compute/server_metadata.py:76
#: nova/api/openstack/compute/server_metadata.py:101
@@ -2257,45 +2247,43 @@ msgstr ""
msgid "Malformed request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:923
+#: nova/api/openstack/wsgi.py:910
#, python-format
msgid "Action: '%(action)s', body: %(body)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:924
+#: nova/api/openstack/wsgi.py:911
#, python-format
msgid "Calling method %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:933
+#: nova/api/openstack/wsgi.py:920
msgid "Unsupported Content-Type"
msgstr ""
-#: nova/api/openstack/wsgi.py:945
+#: nova/api/openstack/wsgi.py:932
msgid "Malformed request url"
msgstr ""
-#: nova/api/openstack/wsgi.py:1177
+#: nova/api/openstack/wsgi.py:1162
#, python-format
-msgid ""
-"API request failed, fault raised to the top of the stack. Detailed "
-"stacktrace %s"
+msgid "Returning %(code)s to user: %(explanation)s"
msgstr ""
-#: nova/api/openstack/xmlutil.py:265
+#: nova/api/openstack/xmlutil.py:270
msgid "element is not a child"
msgstr ""
-#: nova/api/openstack/xmlutil.py:414
+#: nova/api/openstack/xmlutil.py:419
msgid "root element selecting a list"
msgstr ""
-#: nova/api/openstack/xmlutil.py:739
+#: nova/api/openstack/xmlutil.py:744
#, python-format
msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s"
msgstr ""
-#: nova/api/openstack/xmlutil.py:858
+#: nova/api/openstack/xmlutil.py:863
msgid "subclasses must implement construct()!"
msgstr ""
@@ -2344,6 +2332,10 @@ msgstr ""
msgid "Invalid metadata key"
msgstr ""
+#: nova/api/openstack/compute/images.py:165
+msgid "You are not allowed to delete the image."
+msgstr ""
+
#: nova/api/openstack/compute/ips.py:71
msgid "Instance does not exist"
msgstr ""
@@ -2375,7 +2367,7 @@ msgid "Metadata item was not found"
msgstr ""
#: nova/api/openstack/compute/servers.py:510
-#: nova/api/openstack/compute/contrib/cells.py:278
+#: nova/api/openstack/compute/contrib/cells.py:272
msgid "Invalid changes-since value"
msgstr ""
@@ -2390,9 +2382,9 @@ msgstr ""
#: nova/api/openstack/compute/servers.py:565
#: nova/api/openstack/compute/servers.py:732
-#: nova/api/openstack/compute/servers.py:999
-#: nova/api/openstack/compute/servers.py:1105
-#: nova/api/openstack/compute/servers.py:1278
+#: nova/api/openstack/compute/servers.py:1000
+#: nova/api/openstack/compute/servers.py:1106
+#: nova/api/openstack/compute/servers.py:1279
msgid "Instance could not be found"
msgstr ""
@@ -2463,144 +2455,144 @@ msgstr ""
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:796
-#: nova/api/openstack/compute/servers.py:910
+#: nova/api/openstack/compute/servers.py:797
+#: nova/api/openstack/compute/servers.py:911
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:837
+#: nova/api/openstack/compute/servers.py:838
msgid "min_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:840
+#: nova/api/openstack/compute/servers.py:841
msgid "min_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:846
+#: nova/api/openstack/compute/servers.py:847
msgid "max_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:849
+#: nova/api/openstack/compute/servers.py:850
msgid "max_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:853
+#: nova/api/openstack/compute/servers.py:854
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/openstack/compute/servers.py:905
+#: nova/api/openstack/compute/servers.py:906
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:913
+#: nova/api/openstack/compute/servers.py:914
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:987
+#: nova/api/openstack/compute/servers.py:988
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:991
+#: nova/api/openstack/compute/servers.py:992
msgid "Personality cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1017
-#: nova/api/openstack/compute/servers.py:1037
+#: nova/api/openstack/compute/servers.py:1018
+#: nova/api/openstack/compute/servers.py:1038
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1023
+#: nova/api/openstack/compute/servers.py:1024
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1040
+#: nova/api/openstack/compute/servers.py:1041
msgid "Flavor used by the instance could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1046
+#: nova/api/openstack/compute/servers.py:1047
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1059
+#: nova/api/openstack/compute/servers.py:1060
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1063
+#: nova/api/openstack/compute/servers.py:1064
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1076
+#: nova/api/openstack/compute/servers.py:1077
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1088
+#: nova/api/openstack/compute/servers.py:1089
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1091
+#: nova/api/openstack/compute/servers.py:1092
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1115
+#: nova/api/openstack/compute/servers.py:1116
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1124
+#: nova/api/openstack/compute/servers.py:1125
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1151
+#: nova/api/openstack/compute/servers.py:1152
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1164
+#: nova/api/openstack/compute/servers.py:1165
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1168
-#: nova/api/openstack/compute/servers.py:1375
+#: nova/api/openstack/compute/servers.py:1169
+#: nova/api/openstack/compute/servers.py:1376
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1174
+#: nova/api/openstack/compute/servers.py:1175
msgid "Unable to set password on instance"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1183
+#: nova/api/openstack/compute/servers.py:1184
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1196
+#: nova/api/openstack/compute/servers.py:1197
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1199
+#: nova/api/openstack/compute/servers.py:1200
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1217
+#: nova/api/openstack/compute/servers.py:1218
#: nova/api/openstack/compute/contrib/aggregates.py:143
#: nova/api/openstack/compute/contrib/coverage_ext.py:277
#: nova/api/openstack/compute/contrib/keypairs.py:78
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1223
+#: nova/api/openstack/compute/servers.py:1224
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1285
+#: nova/api/openstack/compute/servers.py:1286
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1318
+#: nova/api/openstack/compute/servers.py:1319
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1402
+#: nova/api/openstack/compute/servers.py:1403
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
@@ -2733,13 +2725,13 @@ msgstr ""
msgid "Cannot add host %(host)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:171
-#: nova/api/openstack/compute/contrib/aggregates.py:175
+#: nova/api/openstack/compute/contrib/aggregates.py:172
+#: nova/api/openstack/compute/contrib/aggregates.py:176
#, python-format
msgid "Cannot remove host %(host)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:195
+#: nova/api/openstack/compute/contrib/aggregates.py:196
#, python-format
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr ""
@@ -2771,28 +2763,28 @@ msgstr ""
msgid "Must specify id or address"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:199
+#: nova/api/openstack/compute/contrib/cells.py:193
msgid "Cell name cannot be empty"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:203
+#: nova/api/openstack/compute/contrib/cells.py:197
msgid "Cell name cannot contain '!' or '.'"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:210
+#: nova/api/openstack/compute/contrib/cells.py:204
msgid "Cell type must be 'parent' or 'child'"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:230
-#: nova/api/openstack/compute/contrib/cells.py:250
+#: nova/api/openstack/compute/contrib/cells.py:224
+#: nova/api/openstack/compute/contrib/cells.py:244
msgid "No cell information in request"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:235
+#: nova/api/openstack/compute/contrib/cells.py:229
msgid "No cell name in request"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:272
+#: nova/api/openstack/compute/contrib/cells.py:266
msgid "Only 'updated_since' and 'project_id' are understood."
msgstr ""
@@ -2823,6 +2815,10 @@ msgstr ""
msgid "Unable to get console"
msgstr ""
+#: nova/api/openstack/compute/contrib/consoles.py:53
+msgid "Instance not yet ready"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/coverage_ext.py:114
#, python-format
msgid "Can't connect to service: %s, no portspecified\n"
@@ -2941,7 +2937,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/floating_ips.py:215
#: nova/api/openstack/compute/contrib/floating_ips.py:271
-#: nova/api/openstack/compute/contrib/security_groups.py:417
+#: nova/api/openstack/compute/contrib/security_groups.py:416
msgid "Missing parameter dict"
msgstr ""
@@ -2989,53 +2985,53 @@ msgstr ""
msgid "fping utility is not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:173
+#: nova/api/openstack/compute/contrib/hosts.py:167
#, python-format
msgid "Invalid update setting: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:176
+#: nova/api/openstack/compute/contrib/hosts.py:170
#, python-format
msgid "Invalid status: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:178
+#: nova/api/openstack/compute/contrib/hosts.py:172
#, python-format
msgid "Invalid mode: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:180
+#: nova/api/openstack/compute/contrib/hosts.py:174
msgid "'status' or 'maintenance_mode' needed for host update"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:195
+#: nova/api/openstack/compute/contrib/hosts.py:189
#, python-format
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:200
+#: nova/api/openstack/compute/contrib/hosts.py:194
msgid "Virt driver does not implement host maintenance mode."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:213
+#: nova/api/openstack/compute/contrib/hosts.py:207
#, python-format
msgid "Enabling host %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:215
+#: nova/api/openstack/compute/contrib/hosts.py:209
#, python-format
msgid "Disabling host %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:220
+#: nova/api/openstack/compute/contrib/hosts.py:214
msgid "Virt driver does not implement host disabled status."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:236
+#: nova/api/openstack/compute/contrib/hosts.py:230
msgid "Virt driver does not implement host power management."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:322
+#: nova/api/openstack/compute/contrib/hosts.py:316
msgid "Describe-resource is admin only functionality"
msgstr ""
@@ -3175,7 +3171,12 @@ msgstr ""
msgid "Quota limit must be -1 or greater."
msgstr ""
-#: nova/api/openstack/compute/contrib/quotas.py:96
+#: nova/api/openstack/compute/contrib/quotas.py:100
+#, python-format
+msgid "Bad key(s) %s in quota_set"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/quotas.py:107
#, python-format
msgid "Quota for %s should be integer."
msgstr ""
@@ -3185,7 +3186,7 @@ msgid "Malformed scheduler_hints attribute"
msgstr ""
#: nova/api/openstack/compute/contrib/security_group_default_rules.py:129
-#: nova/api/openstack/compute/contrib/security_groups.py:328
+#: nova/api/openstack/compute/contrib/security_groups.py:327
msgid "Not enough parameters to build a valid rule."
msgstr ""
@@ -3202,16 +3203,16 @@ msgstr ""
msgid "security group default rule not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:336
+#: nova/api/openstack/compute/contrib/security_groups.py:335
#, python-format
msgid "Bad prefix for network in cidr %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:420
+#: nova/api/openstack/compute/contrib/security_groups.py:419
msgid "Security group not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:424
+#: nova/api/openstack/compute/contrib/security_groups.py:423
msgid "Security group name cannot be empty"
msgstr ""
@@ -3223,7 +3224,7 @@ msgstr ""
msgid "stop instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:210
+#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:199
msgid "Invalid start time. The start time cannot occur after the end time."
msgstr ""
@@ -3381,16 +3382,16 @@ msgstr ""
msgid "Error scheduling instances %(instance_uuids)s"
msgstr ""
-#: nova/cells/state.py:264
+#: nova/cells/state.py:270
msgid "Updating cell cache from db."
msgstr ""
-#: nova/cells/state.py:309
+#: nova/cells/state.py:315
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
msgstr ""
-#: nova/cells/state.py:324
+#: nova/cells/state.py:330
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr ""
@@ -3478,176 +3479,176 @@ msgstr ""
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:1150
+#: nova/compute/api.py:1149
#, python-format
msgid "instance type %(old_inst_type_id)d not found"
msgstr ""
-#: nova/compute/api.py:1156
+#: nova/compute/api.py:1155
msgid "going to delete a resizing instance"
msgstr ""
-#: nova/compute/api.py:1166
+#: nova/compute/api.py:1165
#, python-format
msgid "instance's host %s is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:1210
+#: nova/compute/api.py:1209
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:1232
+#: nova/compute/api.py:1233
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1282
+#: nova/compute/api.py:1283
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1299
+#: nova/compute/api.py:1300
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1372
+#: nova/compute/api.py:1373
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1609
+#: nova/compute/api.py:1610
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1964
+#: nova/compute/api.py:1966
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1973
+#: nova/compute/api.py:1975
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:2015
+#: nova/compute/api.py:2017
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:2149
+#: nova/compute/api.py:2151
msgid "Cannot rescue a volume-backed instance"
msgstr ""
-#: nova/compute/api.py:2256
+#: nova/compute/api.py:2258
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:2264
+#: nova/compute/api.py:2266
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:2332
+#: nova/compute/api.py:2334
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2442
+#: nova/compute/api.py:2444
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2461
+#: nova/compute/api.py:2463
msgid "vm evacuation scheduled"
msgstr ""
-#: nova/compute/api.py:2465
+#: nova/compute/api.py:2467
#, python-format
msgid ""
"Instance compute service state on %(inst_host)s expected to be down, but "
"it was up."
msgstr ""
-#: nova/compute/api.py:2701
+#: nova/compute/api.py:2706
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2705
+#: nova/compute/api.py:2710
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2810
+#: nova/compute/api.py:2815
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2813
+#: nova/compute/api.py:2818
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2821
+#: nova/compute/api.py:2826
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2827
+#: nova/compute/api.py:2832
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2847
+#: nova/compute/api.py:2852
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2850
+#: nova/compute/api.py:2855
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2857
+#: nova/compute/api.py:2862
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2922
+#: nova/compute/api.py:2927
#, python-format
msgid "Unable to delete system group '%s'"
msgstr ""
-#: nova/compute/api.py:2927
+#: nova/compute/api.py:2932
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2935
+#: nova/compute/api.py:2940
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2938
+#: nova/compute/api.py:2943
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:3032 nova/compute/api.py:3109
+#: nova/compute/api.py:3037 nova/compute/api.py:3114
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:3048
+#: nova/compute/api.py:3053
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:3051
+#: nova/compute/api.py:3056
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:3062
+#: nova/compute/api.py:3067
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
-#: nova/compute/api.py:3116
+#: nova/compute/api.py:3121
msgid "Security group id should be integer"
msgstr ""
@@ -3737,598 +3738,611 @@ msgstr ""
msgid "Instance type %s not found for deletion"
msgstr ""
-#: nova/compute/manager.py:202
+#: nova/compute/manager.py:199
msgid "Possibly task preempted."
msgstr ""
-#: nova/compute/manager.py:357
+#: nova/compute/manager.py:353
#, python-format
msgid "%(nodename)s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:386
+#: nova/compute/manager.py:382
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:400
+#: nova/compute/manager.py:396
#, python-format
msgid "Instance %(uuid)s found in the hypervisor, but not in the database"
msgstr ""
-#: nova/compute/manager.py:416
+#: nova/compute/manager.py:412
#, python-format
msgid ""
"Instance %(driver_instance)s found in the hypervisor, but not in the "
"database"
msgstr ""
-#: nova/compute/manager.py:437
+#: nova/compute/manager.py:433
#, python-format
msgid ""
"Deleting instance as its host (%(instance_host)s) is not equal to our "
"host (%(our_host)s)."
msgstr ""
-#: nova/compute/manager.py:474
+#: nova/compute/manager.py:470
msgid "Failed to revert crashed migration"
msgstr ""
-#: nova/compute/manager.py:477
+#: nova/compute/manager.py:473
msgid "Instance found in migrating state during startup. Resetting task_state"
msgstr ""
-#: nova/compute/manager.py:488
+#: nova/compute/manager.py:484
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:493
+#: nova/compute/manager.py:489
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:507
+#: nova/compute/manager.py:503
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:512
+#: nova/compute/manager.py:508
msgid "Failed to resume instance"
msgstr ""
-#: nova/compute/manager.py:522
+#: nova/compute/manager.py:518
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:526
+#: nova/compute/manager.py:522
#, python-format
msgid "Lifecycle event %(state)d on VM %(uuid)s"
msgstr ""
-#: nova/compute/manager.py:542
+#: nova/compute/manager.py:538
#, python-format
msgid "Unexpected power state %d"
msgstr ""
-#: nova/compute/manager.py:554
+#: nova/compute/manager.py:550
#, python-format
msgid "Ignoring event %s"
msgstr ""
-#: nova/compute/manager.py:592
+#: nova/compute/manager.py:588
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:672
+#: nova/compute/manager.py:668
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:754 nova/compute/manager.py:2141
+#: nova/compute/manager.py:752
+msgid "Instance disappeared before we could start it"
+msgstr ""
+
+#: nova/compute/manager.py:761 nova/compute/manager.py:2228
#, python-format
msgid "No node specified, defaulting to %(node)s"
msgstr ""
-#: nova/compute/manager.py:803
+#: nova/compute/manager.py:808
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:809
+#: nova/compute/manager.py:814
msgid "Instance was deleted during spawn."
msgstr ""
-#: nova/compute/manager.py:830
+#: nova/compute/manager.py:835
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:865 nova/compute/manager.py:2195
+#: nova/compute/manager.py:852
+msgid "Clean up resource before rescheduling."
+msgstr ""
+
+#: nova/compute/manager.py:877 nova/compute/manager.py:2282
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:883
+#: nova/compute/manager.py:895
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:888
+#: nova/compute/manager.py:900
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:894
+#: nova/compute/manager.py:906
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:922
+#: nova/compute/manager.py:934
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:972
+#: nova/compute/manager.py:984
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:978
+#: nova/compute/manager.py:990
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:988
+#: nova/compute/manager.py:1000
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:1014
+#: nova/compute/manager.py:1026
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:1017
+#: nova/compute/manager.py:1029
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:1028
+#: nova/compute/manager.py:1040
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:1046
+#: nova/compute/manager.py:1058
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:1097
+#: nova/compute/manager.py:1109
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:1173
+#: nova/compute/manager.py:1185
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:1189
+#: nova/compute/manager.py:1201
msgid "Failed to deallocate network for instance."
msgstr ""
-#: nova/compute/manager.py:1210
+#: nova/compute/manager.py:1222
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1213
+#: nova/compute/manager.py:1225
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1220
+#: nova/compute/manager.py:1232
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:1245
+#: nova/compute/manager.py:1280
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1288 nova/compute/manager.py:2370
-#: nova/compute/manager.py:3818
+#: nova/compute/manager.py:1329 nova/compute/manager.py:2457
+#: nova/compute/manager.py:3925
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1428
+#: nova/compute/manager.py:1481
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1441
+#: nova/compute/manager.py:1494
msgid "Invalid state of instance files on shared storage"
msgstr ""
-#: nova/compute/manager.py:1445
+#: nova/compute/manager.py:1498
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
-#: nova/compute/manager.py:1449
+#: nova/compute/manager.py:1502
#, python-format
msgid "disk not on shared storagerebuilding from: '%s'"
msgstr ""
-#: nova/compute/manager.py:1533
+#: nova/compute/manager.py:1586
#, python-format
msgid "bringing vm to original state: '%s'"
msgstr ""
-#: nova/compute/manager.py:1557
+#: nova/compute/manager.py:1618
+#, python-format
+msgid "Detaching from volume api: %s"
+msgstr ""
+
+#: nova/compute/manager.py:1636
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1576
+#: nova/compute/manager.py:1655
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1586
+#: nova/compute/manager.py:1671
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1599
+#: nova/compute/manager.py:1684
msgid "Instance disappeared during reboot"
msgstr ""
-#: nova/compute/manager.py:1626
+#: nova/compute/manager.py:1711
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1632
+#: nova/compute/manager.py:1717
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1693
+#: nova/compute/manager.py:1778
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1700
+#: nova/compute/manager.py:1785
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1705
+#: nova/compute/manager.py:1790
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1733
+#: nova/compute/manager.py:1818
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1740
+#: nova/compute/manager.py:1825
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1747
+#: nova/compute/manager.py:1832
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:1762
+#: nova/compute/manager.py:1847
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1769
+#: nova/compute/manager.py:1854
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1782
+#: nova/compute/manager.py:1867
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1786
+#: nova/compute/manager.py:1871
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1806
+#: nova/compute/manager.py:1891
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:1820
+#: nova/compute/manager.py:1905
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1855
+#: nova/compute/manager.py:1940
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1876
+#: nova/compute/manager.py:1961
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:2099
+#: nova/compute/manager.py:2186
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:2105
+#: nova/compute/manager.py:2192
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:2122
+#: nova/compute/manager.py:2209
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2367
+#: nova/compute/manager.py:2454
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2430
+#: nova/compute/manager.py:2517
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2448
+#: nova/compute/manager.py:2535
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2486
+#: nova/compute/manager.py:2573
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2517
+#: nova/compute/manager.py:2604
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2537
+#: nova/compute/manager.py:2624
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2542
+#: nova/compute/manager.py:2629
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2545
+#: nova/compute/manager.py:2632
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2562
+#: nova/compute/manager.py:2649
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2587
+#: nova/compute/manager.py:2676
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2615
+#: nova/compute/manager.py:2711
msgid "Getting spice console"
msgstr ""
-#: nova/compute/manager.py:2655
+#: nova/compute/manager.py:2758
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2706
+#: nova/compute/manager.py:2809
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2715
+#: nova/compute/manager.py:2818
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2730
+#: nova/compute/manager.py:2833
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2760
+#: nova/compute/manager.py:2863
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2770
+#: nova/compute/manager.py:2873
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2777
+#: nova/compute/manager.py:2880
#, python-format
msgid "Failed to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2801
+#: nova/compute/manager.py:2904
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2839
+#: nova/compute/manager.py:2942
#, python-format
msgid "allocate_port_for_instance returned %(port)s ports"
msgstr ""
-#: nova/compute/manager.py:2859
+#: nova/compute/manager.py:2962
#, python-format
msgid "Port %(port_id)s is not attached"
msgstr ""
-#: nova/compute/manager.py:2873
+#: nova/compute/manager.py:2976
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:3002
+#: nova/compute/manager.py:3105
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:3030
+#: nova/compute/manager.py:3133
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:3085
+#: nova/compute/manager.py:3188
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:3087
+#: nova/compute/manager.py:3190
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:3101
+#: nova/compute/manager.py:3204
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:3241
+#: nova/compute/manager.py:3344
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:3286
+#: nova/compute/manager.py:3389
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:3292
+#: nova/compute/manager.py:3395
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:3301
+#: nova/compute/manager.py:3404
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:3308
+#: nova/compute/manager.py:3411
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:3312
+#: nova/compute/manager.py:3415
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:3319
+#: nova/compute/manager.py:3422
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:3327
+#: nova/compute/manager.py:3431
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:3344
+#: nova/compute/manager.py:3448
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:3363
+#: nova/compute/manager.py:3467
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:3387
+#: nova/compute/manager.py:3491
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3505
+#: nova/compute/manager.py:3609
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3523
+#: nova/compute/manager.py:3627
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3549
+#: nova/compute/manager.py:3653
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3554 nova/compute/manager.py:3603
+#: nova/compute/manager.py:3658 nova/compute/manager.py:3707
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3590
+#: nova/compute/manager.py:3694
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3628
+#: nova/compute/manager.py:3732
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3640 nova/compute/manager.py:3649
-#: nova/compute/manager.py:3679
+#: nova/compute/manager.py:3744 nova/compute/manager.py:3753
+#: nova/compute/manager.py:3783
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3644
+#: nova/compute/manager.py:3748
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3660
+#: nova/compute/manager.py:3764
msgid "Instance is paused unexpectedly. Ignore."
msgstr ""
-#: nova/compute/manager.py:3666
+#: nova/compute/manager.py:3770
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:3672
+#: nova/compute/manager.py:3776
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3688
+#: nova/compute/manager.py:3792
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3696
+#: nova/compute/manager.py:3800
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3711
+#: nova/compute/manager.py:3815
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3773
+#: nova/compute/manager.py:3880
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3780
+#: nova/compute/manager.py:3887
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3787
+#: nova/compute/manager.py:3894
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
@@ -4452,18 +4466,18 @@ msgstr ""
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/api.py:370
+#: nova/conductor/api.py:382
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service "
"start before nova-conductor?"
msgstr ""
-#: nova/conductor/manager.py:89
+#: nova/conductor/manager.py:88
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:254
+#: nova/conductor/manager.py:253
msgid "Invalid block_device_mapping_destroy invocation"
msgstr ""
@@ -4537,23 +4551,23 @@ msgstr ""
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
-#: nova/db/api.py:585
+#: nova/db/api.py:581
msgid "Failed to notify cells of instance destroy"
msgstr ""
-#: nova/db/api.py:673 nova/db/api.py:694
+#: nova/db/api.py:669 nova/db/api.py:690
msgid "Failed to notify cells of instance update"
msgstr ""
-#: nova/db/api.py:734
+#: nova/db/api.py:730
msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/db/api.py:1428
+#: nova/db/api.py:1424
msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/api.py:1582
+#: nova/db/api.py:1578
msgid "Failed to notify cells of instance fault"
msgstr ""
@@ -4571,19 +4585,19 @@ msgstr ""
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1387
+#: nova/db/sqlalchemy/api.py:1384
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:1521
+#: nova/db/sqlalchemy/api.py:1517
#, python-format
msgid "Invalid instance id %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2654
+#: nova/db/sqlalchemy/api.py:2639
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
@@ -4844,129 +4858,129 @@ msgstr ""
msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:677
+#: nova/network/linux_net.py:676
#, python-format
msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:727
+#: nova/network/linux_net.py:726
#, python-format
msgid "Removed %(num)d duplicate rules for floating ip %(float)s"
msgstr ""
-#: nova/network/linux_net.py:963
+#: nova/network/linux_net.py:962
#, python-format
msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:1008
+#: nova/network/linux_net.py:1007
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:1010
+#: nova/network/linux_net.py:1009
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:1085
+#: nova/network/linux_net.py:1084
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:1087
+#: nova/network/linux_net.py:1086
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1219
+#: nova/network/linux_net.py:1218
#, python-format
msgid "Error clearing stale veth %s"
msgstr ""
-#: nova/network/linux_net.py:1378
+#: nova/network/linux_net.py:1377
#, python-format
msgid "Starting VLAN interface %s"
msgstr ""
-#: nova/network/linux_net.py:1410
+#: nova/network/linux_net.py:1409
#, python-format
msgid "Failed unplugging VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1412
+#: nova/network/linux_net.py:1411
#, python-format
msgid "Unplugged VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1434
+#: nova/network/linux_net.py:1433
#, python-format
msgid "Starting Bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1446
+#: nova/network/linux_net.py:1445
#, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr ""
-#: nova/network/linux_net.py:1482
+#: nova/network/linux_net.py:1481
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1519
+#: nova/network/linux_net.py:1518
#, python-format
msgid "Failed unplugging bridge interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1522
+#: nova/network/linux_net.py:1521
#, python-format
msgid "Unplugged bridge interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1691
+#: nova/network/linux_net.py:1690
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1699
+#: nova/network/linux_net.py:1698
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1719
+#: nova/network/linux_net.py:1718
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1721
+#: nova/network/linux_net.py:1720
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:355
+#: nova/network/manager.py:357
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:359
+#: nova/network/manager.py:361
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:486
+#: nova/network/manager.py:488
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:493
+#: nova/network/manager.py:495
#, python-format
msgid "networks retrieved for instance: |%(networks_list)s|"
msgstr ""
-#: nova/network/manager.py:541
+#: nova/network/manager.py:543
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:804
+#: nova/network/manager.py:806
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4974,89 +4988,89 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:896
+#: nova/network/manager.py:898
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:915
+#: nova/network/manager.py:917
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:919
+#: nova/network/manager.py:921
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:928
+#: nova/network/manager.py:930
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:933
+#: nova/network/manager.py:935
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:937
+#: nova/network/manager.py:939
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:941
+#: nova/network/manager.py:943
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:960
+#: nova/network/manager.py:962
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:984
+#: nova/network/manager.py:986
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1004
+#: nova/network/manager.py:1006
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1085
+#: nova/network/manager.py:1087
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1088
+#: nova/network/manager.py:1090
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1099
+#: nova/network/manager.py:1101
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1156
+#: nova/network/manager.py:1158
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1175
+#: nova/network/manager.py:1177
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:1748
+#: nova/network/manager.py:1762
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:1755
+#: nova/network/manager.py:1769
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s networks. "
@@ -5360,77 +5374,82 @@ msgid ""
"following: [%(stack)s]."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:72
+#: nova/openstack/common/rpc/amqp.py:75
msgid "Pool creating new connection"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:197
+#: nova/openstack/common/rpc/amqp.py:200
#, python-format
msgid "no calling threads waiting for msg_id : %s, message : %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:205
+#: nova/openstack/common/rpc/amqp.py:208
#, python-format
msgid ""
"Number of call waiters is greater than warning threshhold: %d. There "
"could be a MulticallProxyWaiter leak."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:287
+#: nova/openstack/common/rpc/amqp.py:291
#, python-format
msgid "unpacked context: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:370
+#: nova/openstack/common/rpc/amqp.py:337
+#, python-format
+msgid "UNIQUE_ID is %s."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:406
#, python-format
msgid "received %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:376
+#: nova/openstack/common/rpc/amqp.py:413
#, python-format
msgid "no method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:377
+#: nova/openstack/common/rpc/amqp.py:414
#, python-format
msgid "No method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:403
-#: nova/openstack/common/rpc/impl_zmq.py:283
+#: nova/openstack/common/rpc/amqp.py:440
+#: nova/openstack/common/rpc/impl_zmq.py:284
#, python-format
msgid "Expected exception during message handling (%s)"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:409
-#: nova/openstack/common/rpc/impl_zmq.py:289
+#: nova/openstack/common/rpc/amqp.py:448
+#: nova/openstack/common/rpc/impl_zmq.py:290
msgid "Exception during message handling"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:457
+#: nova/openstack/common/rpc/amqp.py:498
msgid "Timed out waiting for RPC response."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:541
+#: nova/openstack/common/rpc/amqp.py:584
#, python-format
msgid "Making synchronous call on %s ..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:544
+#: nova/openstack/common/rpc/amqp.py:587
#, python-format
msgid "MSG_ID is %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:577
+#: nova/openstack/common/rpc/amqp.py:621
#, python-format
msgid "Making asynchronous cast on %s..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:585
+#: nova/openstack/common/rpc/amqp.py:630
msgid "Making asynchronous fanout cast..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:610
+#: nova/openstack/common/rpc/amqp.py:658
#, python-format
msgid "Sending %(event_type)s on %(topic)s"
msgstr ""
@@ -5451,27 +5470,32 @@ msgid "Timeout while waiting on RPC response."
msgstr ""
#: nova/openstack/common/rpc/common.py:129
-msgid "Invalid reuse of an RPC connection."
+#, python-format
+msgid "Found duplicate message(%(msg_id)s). Skipping it."
msgstr ""
#: nova/openstack/common/rpc/common.py:133
+msgid "Invalid reuse of an RPC connection."
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:137
#, python-format
msgid "Specified RPC version, %(version)s, not supported by this endpoint."
msgstr ""
-#: nova/openstack/common/rpc/common.py:138
+#: nova/openstack/common/rpc/common.py:142
#, python-format
msgid ""
"Specified RPC envelope version, %(version)s, not supported by this "
"endpoint."
msgstr ""
-#: nova/openstack/common/rpc/common.py:262
+#: nova/openstack/common/rpc/common.py:266
#, python-format
msgid "Failed to sanitize %(item)s. Key error %(err)s"
msgstr ""
-#: nova/openstack/common/rpc/common.py:284
+#: nova/openstack/common/rpc/common.py:288
#, python-format
msgid "Returning exception %s to caller"
msgstr ""
@@ -5547,190 +5571,207 @@ msgstr ""
msgid "Error processing message. Skipping it."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:94
+#: nova/openstack/common/rpc/impl_zmq.py:96
msgid "JSON serialization failed."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:102
+#: nova/openstack/common/rpc/impl_zmq.py:103
#, python-format
msgid "Deserializing: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:138
+#: nova/openstack/common/rpc/impl_zmq.py:139
#, python-format
msgid "Connecting to %(addr)s with %(type)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:139
+#: nova/openstack/common/rpc/impl_zmq.py:140
#, python-format
msgid "-> Subscribed to %(subscribe)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:140
+#: nova/openstack/common/rpc/impl_zmq.py:141
#, python-format
msgid "-> bind: %(bind)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:148
+#: nova/openstack/common/rpc/impl_zmq.py:149
msgid "Could not open socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:160
+#: nova/openstack/common/rpc/impl_zmq.py:161
#, python-format
msgid "Subscribing to %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:202
+#: nova/openstack/common/rpc/impl_zmq.py:203
msgid "You cannot recv on this socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:207
+#: nova/openstack/common/rpc/impl_zmq.py:208
msgid "You cannot send on this socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:271
+#: nova/openstack/common/rpc/impl_zmq.py:272
#, python-format
msgid "Running func with context: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:303
+#: nova/openstack/common/rpc/impl_zmq.py:304
msgid "Sending reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:337
+#: nova/openstack/common/rpc/impl_zmq.py:338
msgid "RPC message did not include method."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:372
+#: nova/openstack/common/rpc/impl_zmq.py:373
msgid "Registering reactor"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:384
+#: nova/openstack/common/rpc/impl_zmq.py:385
msgid "In reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:399
+#: nova/openstack/common/rpc/impl_zmq.py:400
msgid "Out reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:403
+#: nova/openstack/common/rpc/impl_zmq.py:404
msgid "Consuming socket"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:443
+#: nova/openstack/common/rpc/impl_zmq.py:444
#, python-format
msgid "CONSUMER GOT %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:455
+#: nova/openstack/common/rpc/impl_zmq.py:456
#, python-format
msgid "Creating proxy for topic: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:479
+#: nova/openstack/common/rpc/impl_zmq.py:480
#, python-format
msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:488
+#: nova/openstack/common/rpc/impl_zmq.py:489
msgid "Topic socket file creation failed."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:493
+#: nova/openstack/common/rpc/impl_zmq.py:494
#, python-format
msgid "ROUTER RELAY-OUT QUEUED %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:496
+#: nova/openstack/common/rpc/impl_zmq.py:497
#, python-format
msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:514
+#: nova/openstack/common/rpc/impl_zmq.py:516
#, python-format
msgid "Could not create IPC directory %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:524
+#: nova/openstack/common/rpc/impl_zmq.py:526
msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:559
+#: nova/openstack/common/rpc/impl_zmq.py:560
#, python-format
msgid "CONSUMER RECEIVED DATA: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:561
+#: nova/openstack/common/rpc/impl_zmq.py:562
#, python-format
msgid "ROUTER RELAY-OUT %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:583
+#: nova/openstack/common/rpc/impl_zmq.py:584
msgid "ZMQ Envelope version unsupported or unknown."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:608
+#: nova/openstack/common/rpc/impl_zmq.py:612
msgid "Skipping topic registration. Already registered."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:615
+#: nova/openstack/common/rpc/impl_zmq.py:619
#, python-format
msgid "Consumer is a zmq.%s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:662
+#: nova/openstack/common/rpc/impl_zmq.py:671
msgid "Creating payload"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:675
+#: nova/openstack/common/rpc/impl_zmq.py:684
msgid "Creating queue socket for reply waiter"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:688
+#: nova/openstack/common/rpc/impl_zmq.py:697
msgid "Sending cast"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:691
+#: nova/openstack/common/rpc/impl_zmq.py:700
msgid "Cast sent; Waiting reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:694
+#: nova/openstack/common/rpc/impl_zmq.py:703
#, python-format
msgid "Received message: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:695
+#: nova/openstack/common/rpc/impl_zmq.py:704
msgid "Unpacking response"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:704
+#: nova/openstack/common/rpc/impl_zmq.py:713
msgid "Unsupported or unknown ZMQ envelope returned."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:711
+#: nova/openstack/common/rpc/impl_zmq.py:720
msgid "RPC Message Invalid."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:735
+#: nova/openstack/common/rpc/impl_zmq.py:744
#, python-format
msgid "%(msg)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:738
+#: nova/openstack/common/rpc/impl_zmq.py:747
#, python-format
msgid "Sending message(s) to: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:742
+#: nova/openstack/common/rpc/impl_zmq.py:751
msgid "No matchmaker results. Not casting."
msgstr ""
-#: nova/openstack/common/rpc/matchmaker.py:46
+#: nova/openstack/common/rpc/impl_zmq.py:754
+msgid "No match from matchmaker."
+msgstr ""
+
+#: nova/openstack/common/rpc/matchmaker.py:53
msgid "Match not found by MatchMaker."
msgstr ""
-#: nova/openstack/common/rpc/matchmaker.py:178
-#: nova/openstack/common/rpc/matchmaker.py:196
+#: nova/openstack/common/rpc/matchmaker.py:89
+msgid "Matchmaker does not implement registration or heartbeat."
+msgstr ""
+
+#: nova/openstack/common/rpc/matchmaker.py:239
+#, python-format
+msgid "Matchmaker unregistered: %s, %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/matchmaker.py:250
+msgid "Register before starting heartbeat."
+msgstr ""
+
+#: nova/openstack/common/rpc/matchmaker.py:343
+#: nova/openstack/common/rpc/matchmaker.py:361
#, python-format
msgid "No key defining hosts for topic '%s', see ringfile"
msgstr ""
@@ -5747,7 +5788,7 @@ msgstr ""
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:62 nova/scheduler/manager.py:205
+#: nova/scheduler/driver.py:62 nova/scheduler/manager.py:204
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
@@ -5845,7 +5886,7 @@ msgstr ""
msgid "Removing dead compute node %(host)s:%(node)s from scheduler"
msgstr ""
-#: nova/scheduler/manager.py:191
+#: nova/scheduler/manager.py:190
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
@@ -6212,86 +6253,87 @@ msgstr ""
msgid "unexpected role header"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3258
+#: nova/tests/api/openstack/compute/test_servers.py:3275
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3263
+#: nova/tests/api/openstack/compute/test_servers.py:3280
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3268
+#: nova/tests/api/openstack/compute/test_servers.py:3285
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
-#: nova/tests/compute/test_compute.py:729
-#: nova/tests/compute/test_compute.py:747
-#: nova/tests/compute/test_compute.py:798
-#: nova/tests/compute/test_compute.py:825
+#: nova/tests/compute/test_compute.py:802
+#: nova/tests/compute/test_compute.py:820
#: nova/tests/compute/test_compute.py:871
-#: nova/tests/compute/test_compute.py:3062
+#: nova/tests/compute/test_compute.py:898
+#: nova/tests/compute/test_compute.py:944
+#: nova/tests/compute/test_compute.py:3260
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:735
-#: nova/tests/compute/test_compute.py:770
-#: nova/tests/compute/test_compute.py:813
+#: nova/tests/compute/test_compute.py:808
#: nova/tests/compute/test_compute.py:843
+#: nova/tests/compute/test_compute.py:886
+#: nova/tests/compute/test_compute.py:916
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1304
+#: nova/tests/compute/test_compute.py:1382
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:3073
+#: nova/tests/compute/test_compute.py:3271
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:3563
+#: nova/tests/compute/test_compute.py:3763
msgid "wrong host/node"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:175
+#: nova/tests/integrated/test_api_samples.py:174
#, python-format
-msgid "Result: %(result)s is not a dict."
+msgid "%(result_str)s: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:187
+#: nova/tests/integrated/test_api_samples.py:188
#, python-format
msgid ""
"Dictionary key mismatch:\n"
"Extra key(s) in template:\n"
"%(ex_delta)s\n"
-"Extra key(s) in response:\n"
+"Extra key(s) in %(result_str)s:\n"
"%(res_delta)s\n"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:197
+#: nova/tests/integrated/test_api_samples.py:199
#, python-format
-msgid "Result: %(result)s is not a list."
+msgid "%(result_str)s: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:215
+#: nova/tests/integrated/test_api_samples.py:218
msgid "Extra list items in template:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:219
-msgid "Extra list items in response:"
+#: nova/tests/integrated/test_api_samples.py:222
+#, python-format
+msgid "Extra list items in %(result_str)s:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:238
-#: nova/tests/integrated/test_api_samples.py:252
+#: nova/tests/integrated/test_api_samples.py:243
+#: nova/tests/integrated/test_api_samples.py:258
#, python-format
msgid ""
"Values do not match:\n"
"Template: %(expected)s\n"
-"Response: %(result)s"
+"%(result_str)s: %(result)s"
msgstr ""
#: nova/tests/integrated/test_login.py:31
@@ -6353,25 +6395,25 @@ msgstr ""
msgid "Added %(filepath)s to config drive"
msgstr ""
-#: nova/virt/driver.py:892
+#: nova/virt/driver.py:895
msgid "Event must be an instance of nova.virt.event.Event"
msgstr ""
-#: nova/virt/driver.py:898
+#: nova/virt/driver.py:901
#, python-format
msgid "Exception dispatching event %(event)s: %(ex)s"
msgstr ""
-#: nova/virt/driver.py:920
+#: nova/virt/driver.py:923
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/virt/driver.py:923
+#: nova/virt/driver.py:926
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/virt/driver.py:930
+#: nova/virt/driver.py:933
#, python-format
msgid "Unable to load the virtualization driver: %s"
msgstr ""
@@ -6850,12 +6892,12 @@ msgstr ""
msgid "Failed to mount filesystem: %s"
msgstr ""
-#: nova/virt/disk/mount/api.py:202
+#: nova/virt/disk/mount/api.py:203
#, python-format
msgid "Umount %s"
msgstr ""
-#: nova/virt/disk/mount/api.py:213
+#: nova/virt/disk/mount/api.py:214
msgid "Fail to mount, tearing back down"
msgstr ""
@@ -7079,7 +7121,7 @@ msgstr ""
msgid "Set permissions path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:75
+#: nova/virt/hyperv/basevolumeutils.py:71
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
@@ -7112,7 +7154,7 @@ msgstr ""
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:135 nova/virt/libvirt/driver.py:3667
+#: nova/virt/hyperv/hostops.py:135 nova/virt/libvirt/driver.py:3684
#: nova/virt/xenapi/host.py:148
msgid "Updating host stats"
msgstr ""
@@ -7435,12 +7477,12 @@ msgstr ""
msgid "Using config drive for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:227 nova/virt/libvirt/driver.py:1850
+#: nova/virt/hyperv/vmops.py:227 nova/virt/libvirt/driver.py:1861
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:235 nova/virt/libvirt/driver.py:1857
+#: nova/virt/hyperv/vmops.py:235 nova/virt/libvirt/driver.py:1868
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
@@ -7579,7 +7621,7 @@ msgstr ""
msgid "Detaching physical disk from instance: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:161 nova/virt/libvirt/driver.py:893
+#: nova/virt/hyperv/volumeops.py:161 nova/virt/libvirt/driver.py:894
msgid "Could not determine iscsi initiator name"
msgstr ""
@@ -7593,7 +7635,7 @@ msgstr ""
msgid "Device number: %(device_number)s, target lun: %(target_lun)s"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:52
+#: nova/virt/hyperv/volumeutils.py:53
#, python-format
msgid "An error has occurred when calling the iscsi initiator: %s"
msgstr ""
@@ -7633,215 +7675,215 @@ msgstr ""
msgid "URI %s does not support events"
msgstr ""
-#: nova/virt/libvirt/driver.py:585
+#: nova/virt/libvirt/driver.py:586
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:607 nova/virt/libvirt/driver.py:610
+#: nova/virt/libvirt/driver.py:608 nova/virt/libvirt/driver.py:611
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:628
+#: nova/virt/libvirt/driver.py:629
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:719
+#: nova/virt/libvirt/driver.py:720
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:734
+#: nova/virt/libvirt/driver.py:735
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:739
+#: nova/virt/libvirt/driver.py:740
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:750
msgid "Instance may be started again."
msgstr ""
-#: nova/virt/libvirt/driver.py:758
+#: nova/virt/libvirt/driver.py:759
msgid "Going to destroy instance again."
msgstr ""
-#: nova/virt/libvirt/driver.py:777
+#: nova/virt/libvirt/driver.py:778
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:793
+#: nova/virt/libvirt/driver.py:794
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:813
+#: nova/virt/libvirt/driver.py:814
msgid "Instance may be still running, destroy it again."
msgstr ""
-#: nova/virt/libvirt/driver.py:819
+#: nova/virt/libvirt/driver.py:820
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:847
+#: nova/virt/libvirt/driver.py:848
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:856
+#: nova/virt/libvirt/driver.py:857
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:899
+#: nova/virt/libvirt/driver.py:900
msgid "Could not determine fibre channel world wide node names"
msgstr ""
-#: nova/virt/libvirt/driver.py:906
+#: nova/virt/libvirt/driver.py:907
msgid "Could not determine fibre channel world wide port names"
msgstr ""
-#: nova/virt/libvirt/driver.py:911
+#: nova/virt/libvirt/driver.py:912
msgid "No Volume Connector found."
msgstr ""
-#: nova/virt/libvirt/driver.py:1033
+#: nova/virt/libvirt/driver.py:1034
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:1057
+#: nova/virt/libvirt/driver.py:1058
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1077
+#: nova/virt/libvirt/driver.py:1078
msgid "During detach_interface, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:1081
+#: nova/virt/libvirt/driver.py:1082
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1170
+#: nova/virt/libvirt/driver.py:1171
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1173
+#: nova/virt/libvirt/driver.py:1174
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1202
+#: nova/virt/libvirt/driver.py:1203
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:1214
+#: nova/virt/libvirt/driver.py:1215
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:1287
+#: nova/virt/libvirt/driver.py:1288
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1291
+#: nova/virt/libvirt/driver.py:1292
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:1326
+#: nova/virt/libvirt/driver.py:1327
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1334
+#: nova/virt/libvirt/driver.py:1335
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:1376
+#: nova/virt/libvirt/driver.py:1377
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1519
+#: nova/virt/libvirt/driver.py:1521
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1526 nova/virt/powervm/operator.py:219
+#: nova/virt/libvirt/driver.py:1528 nova/virt/powervm/operator.py:219
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1542
+#: nova/virt/libvirt/driver.py:1544
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1579 nova/virt/libvirt/driver.py:1605
+#: nova/virt/libvirt/driver.py:1581 nova/virt/libvirt/driver.py:1607
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:1594
+#: nova/virt/libvirt/driver.py:1596
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1665
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1667
+#: nova/virt/libvirt/driver.py:1669
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1672 nova/virt/libvirt/driver.py:1677
+#: nova/virt/libvirt/driver.py:1674 nova/virt/libvirt/driver.py:1679
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1743
+#: nova/virt/libvirt/driver.py:1750
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1841
+#: nova/virt/libvirt/driver.py:1852
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:1890
+#: nova/virt/libvirt/driver.py:1901
#, python-format
msgid "Injecting %(inj)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1900
+#: nova/virt/libvirt/driver.py:1911
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1957
+#: nova/virt/libvirt/driver.py:1968
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1963
+#: nova/virt/libvirt/driver.py:1974
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1967
+#: nova/virt/libvirt/driver.py:1978
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1971
+#: nova/virt/libvirt/driver.py:1982
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1987
+#: nova/virt/libvirt/driver.py:1998
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:2306
+#: nova/virt/libvirt/driver.py:2319
#, python-format
msgid ""
"Start to_xml instance=%(instance)s network_info=%(network_info)s "
@@ -7849,80 +7891,85 @@ msgid ""
"rescue=%(rescue)sblock_device_info=%(block_device_info)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2321
+#: nova/virt/libvirt/driver.py:2334
#, python-format
msgid "End to_xml instance=%(instance)s xml=%(xml)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2338
+#: nova/virt/libvirt/driver.py:2351
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2506
+#: nova/virt/libvirt/driver.py:2519
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2559
+#: nova/virt/libvirt/driver.py:2570
+#, python-format
+msgid "couldn't obtain the vpu count from domain id: %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2576
#, python-format
msgid "List of domains returned by libVirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2561
+#: nova/virt/libvirt/driver.py:2578
#, python-format
msgid "libVirt can't find a domain with id: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2625
+#: nova/virt/libvirt/driver.py:2642
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2708
+#: nova/virt/libvirt/driver.py:2725
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2732
+#: nova/virt/libvirt/driver.py:2749
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2736
+#: nova/virt/libvirt/driver.py:2753
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2870
+#: nova/virt/libvirt/driver.py:2887
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2878
+#: nova/virt/libvirt/driver.py:2895
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2915
+#: nova/virt/libvirt/driver.py:2932
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2940
+#: nova/virt/libvirt/driver.py:2957
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2952
+#: nova/virt/libvirt/driver.py:2969
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -7932,62 +7979,62 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2969
+#: nova/virt/libvirt/driver.py:2986
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:3017
+#: nova/virt/libvirt/driver.py:3034
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:3089
+#: nova/virt/libvirt/driver.py:3106
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3182
+#: nova/virt/libvirt/driver.py:3199
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:3292
+#: nova/virt/libvirt/driver.py:3309
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error"
" Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3309
+#: nova/virt/libvirt/driver.py:3326
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:3347
+#: nova/virt/libvirt/driver.py:3364
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3393
+#: nova/virt/libvirt/driver.py:3410
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:3452
+#: nova/virt/libvirt/driver.py:3469
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:3458
+#: nova/virt/libvirt/driver.py:3475
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3519
+#: nova/virt/libvirt/driver.py:3536
msgid "Starting finish_revert_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3640
+#: nova/virt/libvirt/driver.py:3657
#, python-format
msgid "Checking instance files accessability%(instance_path)s"
msgstr ""
@@ -8020,11 +8067,11 @@ msgstr ""
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:281
+#: nova/virt/libvirt/imagebackend.py:283
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:366
+#: nova/virt/libvirt/imagebackend.py:368
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
@@ -8203,7 +8250,7 @@ msgstr ""
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:555
+#: nova/virt/libvirt/utils.py:547
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
@@ -8422,26 +8469,26 @@ msgstr ""
msgid "File transfer from PowerVM manager failed"
msgstr ""
-#: nova/virt/powervm/driver.py:121
+#: nova/virt/powervm/driver.py:124
msgid "In get_host_ip_addr"
msgstr ""
-#: nova/virt/powervm/driver.py:124
+#: nova/virt/powervm/driver.py:127
#, python-format
msgid "Attempting to resolve %s"
msgstr ""
-#: nova/virt/powervm/driver.py:126
+#: nova/virt/powervm/driver.py:129
#, python-format
msgid "%(hostname)s was successfully resolved to %(ip_addr)s"
msgstr ""
-#: nova/virt/powervm/driver.py:171
+#: nova/virt/powervm/driver.py:174
#, python-format
msgid "%(inst_name)s captured in %(snapshot_time)s seconds"
msgstr ""
-#: nova/virt/powervm/driver.py:292
+#: nova/virt/powervm/driver.py:295
#, python-format
msgid "Unrecognized root disk information: %s"
msgstr ""
@@ -9036,7 +9083,7 @@ msgstr ""
msgid "Migrated VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1050 nova/virt/xenapi/vmops.py:1275
+#: nova/virt/vmwareapi/vmops.py:1050 nova/virt/xenapi/vmops.py:1300
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
@@ -9194,19 +9241,19 @@ msgstr ""
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1518
+#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1534
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:92 nova/virt/xenapi/vmops.py:1522
+#: nova/virt/xenapi/agent.py:92 nova/virt/xenapi/vmops.py:1538
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:97 nova/virt/xenapi/vmops.py:1527
+#: nova/virt/xenapi/agent.py:97 nova/virt/xenapi/vmops.py:1543
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
@@ -9306,24 +9353,24 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:347
+#: nova/virt/xenapi/driver.py:348
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:560
+#: nova/virt/xenapi/driver.py:561
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:620
+#: nova/virt/xenapi/driver.py:621
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:660
+#: nova/virt/xenapi/driver.py:661
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:744 nova/virt/xenapi/driver.py:758
+#: nova/virt/xenapi/driver.py:745 nova/virt/xenapi/driver.py:759
#, python-format
msgid "Got exception: %s"
msgstr ""
@@ -9888,201 +9935,203 @@ msgstr ""
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:134 nova/virt/xenapi/vmops.py:752
+#: nova/virt/xenapi/vmops.py:133 nova/virt/xenapi/vmops.py:768
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:169
+#: nova/virt/xenapi/vmops.py:168
#, python-format
msgid "Importing image upload handler: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:182
+#: nova/virt/xenapi/vmops.py:181
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:278
+#: nova/virt/xenapi/vmops.py:277
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:347
+#: nova/virt/xenapi/vmops.py:363
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:420
+#: nova/virt/xenapi/vmops.py:436
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:451
+#: nova/virt/xenapi/vmops.py:467
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:530
+#: nova/virt/xenapi/vmops.py:546
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:547
+#: nova/virt/xenapi/vmops.py:563
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:593
+#: nova/virt/xenapi/vmops.py:609
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:599
+#: nova/virt/xenapi/vmops.py:615
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:613
+#: nova/virt/xenapi/vmops.py:629
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:616
+#: nova/virt/xenapi/vmops.py:632
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:627
+#: nova/virt/xenapi/vmops.py:643
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:655
+#: nova/virt/xenapi/vmops.py:671
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:663
+#: nova/virt/xenapi/vmops.py:679
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:721
+#: nova/virt/xenapi/vmops.py:737
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:725
+#: nova/virt/xenapi/vmops.py:741
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:733
+#: nova/virt/xenapi/vmops.py:749
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:770
+#: nova/virt/xenapi/vmops.py:786
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:776 nova/virt/xenapi/vmops.py:826
+#: nova/virt/xenapi/vmops.py:792 nova/virt/xenapi/vmops.py:842
msgid "Clean shutdown did not complete successfully, trying hard shutdown."
msgstr ""
-#: nova/virt/xenapi/vmops.py:855
+#: nova/virt/xenapi/vmops.py:871
msgid "Resize down not allowed without auto_disk_config"
msgstr ""
-#: nova/virt/xenapi/vmops.py:900
+#: nova/virt/xenapi/vmops.py:916
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:905
+#: nova/virt/xenapi/vmops.py:921
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:949
+#: nova/virt/xenapi/vmops.py:965
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1040
+#: nova/virt/xenapi/vmops.py:971
+msgid ""
+"Reboot failed due to bad volumes, detaching bad volumes and starting "
+"halted instance"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1065
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1044
+#: nova/virt/xenapi/vmops.py:1069
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1071
+#: nova/virt/xenapi/vmops.py:1096
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1078
+#: nova/virt/xenapi/vmops.py:1103
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1085
+#: nova/virt/xenapi/vmops.py:1110
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1112
+#: nova/virt/xenapi/vmops.py:1137
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1141
+#: nova/virt/xenapi/vmops.py:1166
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1192
+#: nova/virt/xenapi/vmops.py:1217
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1226
+#: nova/virt/xenapi/vmops.py:1251
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1279
+#: nova/virt/xenapi/vmops.py:1304
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1339
-msgid "Fetching VM ref while BUILDING failed"
-msgstr ""
-
-#: nova/virt/xenapi/vmops.py:1422
+#: nova/virt/xenapi/vmops.py:1438
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1441
+#: nova/virt/xenapi/vmops.py:1457
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1450
+#: nova/virt/xenapi/vmops.py:1466
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1453
+#: nova/virt/xenapi/vmops.py:1469
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1481
+#: nova/virt/xenapi/vmops.py:1497
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1577
+#: nova/virt/xenapi/vmops.py:1593
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1609
+#: nova/virt/xenapi/vmops.py:1625
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1658
+#: nova/virt/xenapi/vmops.py:1674
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1694
+#: nova/virt/xenapi/vmops.py:1710
msgid "Migrate Send failed"
msgstr ""
diff --git a/nova/network/l3.py b/nova/network/l3.py
index 9ca6b6a43..7511f7ba4 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -79,7 +79,13 @@ class LinuxNetL3(L3Driver):
if self.initialized:
return
LOG.debug("Initializing linux_net L3 driver")
- linux_net.init_host()
+ fixed_range = kwargs.get('fixed_range', False)
+ networks = kwargs.get('networks', None)
+ if not fixed_range and networks is not None:
+ for network in networks:
+ self.initialize_network(network['cidr'])
+ else:
+ linux_net.init_host()
linux_net.ensure_metadata_ip()
linux_net.metadata_forward()
self.initialized = True
@@ -88,7 +94,7 @@ class LinuxNetL3(L3Driver):
return self.initialized
def initialize_network(self, cidr):
- linux_net.add_snat_rule(cidr)
+ linux_net.init_host(cidr)
def initialize_gateway(self, network_ref):
mac_address = utils.generate_mac_address()
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 482744a8b..678bfdba8 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -69,11 +69,14 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
+from nova import quota
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
+QUOTAS = quota.QUOTAS
+
network_opts = [
cfg.StrOpt('flat_network_bridge',
@@ -106,9 +109,12 @@ network_opts = [
cfg.IntOpt('network_size',
default=256,
help='Number of addresses in each private subnet'),
+ # TODO(mathrock): Deprecate in Grizzly, remove in Havana
cfg.StrOpt('fixed_range',
default='10.0.0.0/8',
- help='Fixed IP address block'),
+ help='DEPRECATED - Fixed IP address block.'
+ 'If set to an empty string, the subnet range(s) will be '
+ 'automatically determined and configured.'),
cfg.StrOpt('fixed_range_v6',
default='fd00::/48',
help='Fixed IPv6 address block'),
@@ -249,7 +255,7 @@ class RPCAllocateFixedIP(object):
self.network_rpcapi.deallocate_fixed_ip(context, address, host)
-class NetworkManager(manager.SchedulerDependentManager):
+class NetworkManager(manager.Manager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
@@ -298,8 +304,7 @@ class NetworkManager(manager.SchedulerDependentManager):
l3_lib = kwargs.get("l3_lib", CONF.l3_lib)
self.l3driver = importutils.import_object(l3_lib)
- super(NetworkManager, self).__init__(service_name='network',
- *args, **kwargs)
+ super(NetworkManager, self).__init__(*args, **kwargs)
def _import_ipam_lib(self, ipam_lib):
self.ipam = importutils.import_module(ipam_lib).get_ipam_lib(self)
@@ -821,47 +826,69 @@ class NetworkManager(manager.SchedulerDependentManager):
# network_get_by_compute_host
address = None
- if network['cidr']:
- address = kwargs.get('address', None)
- if address:
- address = self.db.fixed_ip_associate(context,
- address,
- instance_id,
- network['id'])
- else:
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network['id'],
- instance_id)
- self._do_trigger_security_group_members_refresh_for_instance(
- instance_id)
- self._do_trigger_security_group_handler(
- 'instance_add_security_group', instance_id)
- get_vif = self.db.virtual_interface_get_by_instance_and_network
- vif = get_vif(context, instance_id, network['id'])
- values = {'allocated': True,
- 'virtual_interface_id': vif['id']}
- self.db.fixed_ip_update(context, address, values)
+ # Check the quota; can't put this in the API because we get
+ # called into from other places
+ try:
+ reservations = QUOTAS.reserve(context, fixed_ips=1)
+ except exception.OverQuota:
+ pid = context.project_id
+ LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate "
+ "fixed IP") % locals())
+ raise exception.FixedIpLimitExceeded()
- # NOTE(vish) This db query could be removed if we pass az and name
- # (or the whole instance object).
- instance = self.db.instance_get_by_uuid(context, instance_id)
- name = instance['display_name']
+ try:
+ if network['cidr']:
+ address = kwargs.get('address', None)
+ if address:
+ address = self.db.fixed_ip_associate(context,
+ address,
+ instance_id,
+ network['id'])
+ else:
+ address = self.db.fixed_ip_associate_pool(
+ context.elevated(), network['id'], instance_id)
+ self._do_trigger_security_group_members_refresh_for_instance(
+ instance_id)
+ self._do_trigger_security_group_handler(
+ 'instance_add_security_group', instance_id)
+ get_vif = self.db.virtual_interface_get_by_instance_and_network
+ vif = get_vif(context, instance_id, network['id'])
+ values = {'allocated': True,
+ 'virtual_interface_id': vif['id']}
+ self.db.fixed_ip_update(context, address, values)
+
+ # NOTE(vish) This db query could be removed if we pass az and name
+ # (or the whole instance object).
+ instance = self.db.instance_get_by_uuid(context, instance_id)
+ name = instance['display_name']
- if self._validate_instance_zone_for_dns_domain(context, instance):
- self.instance_dns_manager.create_entry(name, address,
- "A",
- self.instance_dns_domain)
- self.instance_dns_manager.create_entry(instance_id, address,
- "A",
- self.instance_dns_domain)
- self._setup_network_on_host(context, network)
- return address
+ if self._validate_instance_zone_for_dns_domain(context, instance):
+ self.instance_dns_manager.create_entry(
+ name, address, "A", self.instance_dns_domain)
+ self.instance_dns_manager.create_entry(
+ instance_id, address, "A", self.instance_dns_domain)
+ self._setup_network_on_host(context, network)
+
+ QUOTAS.commit(context, reservations)
+ return address
+
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ QUOTAS.rollback(context, reservations)
def deallocate_fixed_ip(self, context, address, host=None, teardown=True):
"""Returns a fixed ip to the pool."""
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_uuid = fixed_ip_ref['instance_uuid']
vif_id = fixed_ip_ref['virtual_interface_id']
+
+ try:
+ reservations = QUOTAS.reserve(context, fixed_ips=-1)
+ except Exception:
+ reservations = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "fixed IP"))
+
self._do_trigger_security_group_members_refresh_for_instance(
instance_uuid)
self._do_trigger_security_group_handler(
@@ -910,6 +937,10 @@ class NetworkManager(manager.SchedulerDependentManager):
self._teardown_network_on_host(context, network)
+ # Commit the reservations
+ if reservations:
+ QUOTAS.commit(context, reservations)
+
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context)
@@ -1387,6 +1418,9 @@ class NetworkManager(manager.SchedulerDependentManager):
dev = self.driver.get_dev(network)
self.driver.update_dns(context, dev, network)
+ def add_network_to_project(self, ctxt, project_id, network_uuid):
+ raise NotImplementedError()
+
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
@@ -1559,7 +1593,12 @@ class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP,
"""Do any initialization that needs to be run if this is a
standalone service.
"""
- self.l3driver.initialize()
+ if not CONF.fixed_range:
+ ctxt = context.get_admin_context()
+ networks = self.db.network_get_all_by_host(ctxt, self.host)
+ self.l3driver.initialize(fixed_range=False, networks=networks)
+ else:
+ self.l3driver.initialize(fixed_range=CONF.fixed_range)
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
@@ -1567,6 +1606,8 @@ class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP,
"""Sets up network on this host."""
network['dhcp_server'] = self._get_dhcp_ip(context, network)
+ if not CONF.fixed_range:
+ self.l3driver.initialize_network(network.get('cidr'))
self.l3driver.initialize_gateway(network)
if not CONF.fake_network:
@@ -1630,7 +1671,12 @@ class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager):
standalone service.
"""
- self.l3driver.initialize()
+ if not CONF.fixed_range:
+ ctxt = context.get_admin_context()
+ networks = self.db.network_get_all_by_host(ctxt, self.host)
+ self.l3driver.initialize(fixed_range=False, networks=networks)
+ else:
+ self.l3driver.initialize(fixed_range=CONF.fixed_range)
NetworkManager.init_host(self)
self.init_host_floating_ips()
@@ -1773,6 +1819,8 @@ class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager):
address = network['vpn_public_address']
network['dhcp_server'] = self._get_dhcp_ip(context, network)
+ if not CONF.fixed_range:
+ self.l3driver.initialize_network(network.get('cidr'))
self.l3driver.initialize_gateway(network)
# NOTE(vish): only ensure this forward if the address hasn't been set
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index 1c9f19a4e..1b0a7c794 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -155,6 +155,13 @@ class BaseRequestHandler(object):
def set_status(self, status_code):
self.response.status = status_code
+ def set_404(self):
+ self.render_xml({"Error": {
+ "Code": "NoSuchKey",
+ "Message": "The resource you requested does not exist"
+ }})
+ self.set_status(404)
+
def finish(self, body=''):
self.response.body = utils.utf8(body)
@@ -233,7 +240,7 @@ class BucketHandler(BaseRequestHandler):
terse = int(self.get_argument("terse", 0))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
- self.set_status(404)
+ self.set_404()
return
object_names = []
for root, dirs, files in os.walk(path):
@@ -294,7 +301,7 @@ class BucketHandler(BaseRequestHandler):
self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
- self.set_status(404)
+ self.set_404()
return
if len(os.listdir(path)) > 0:
self.set_status(403)
@@ -310,7 +317,7 @@ class ObjectHandler(BaseRequestHandler):
path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or
not os.path.isfile(path)):
- self.set_status(404)
+ self.set_404()
return
info = os.stat(path)
self.set_header("Content-Type", "application/unknown")
@@ -328,7 +335,7 @@ class ObjectHandler(BaseRequestHandler):
self.application.directory, bucket))
if (not bucket_dir.startswith(self.application.directory) or
not os.path.isdir(bucket_dir)):
- self.set_status(404)
+ self.set_404()
return
path = self._object_path(bucket, object_name)
if not path.startswith(bucket_dir) or os.path.isdir(path):
@@ -348,7 +355,7 @@ class ObjectHandler(BaseRequestHandler):
path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or
not os.path.isfile(path)):
- self.set_status(404)
+ self.set_404()
return
os.unlink(path)
self.set_status(204)
diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py
index 3f25eed67..c23e36ec8 100644
--- a/nova/openstack/common/rpc/amqp.py
+++ b/nova/openstack/common/rpc/amqp.py
@@ -25,25 +25,27 @@ Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
+import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
-from eventlet import semaphore
from eventlet import queue
-
+from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
+
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
+
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
@@ -54,6 +56,7 @@ amqp_opts = [
cfg.CONF.register_opts(amqp_opts)
+UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
@@ -236,6 +239,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
'failure': failure}
if ending:
msg['ending'] = True
+ _add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
@@ -302,6 +306,37 @@ def pack_context(msg, context):
msg.update(context_d)
+class _MsgIdCache(object):
+ """This class checks any duplicate messages."""
+
+ # NOTE: This value is considered can be a configuration item, but
+ # it is not necessary to change its value in most cases,
+ # so let this value as static for now.
+ DUP_MSG_CHECK_SIZE = 16
+
+ def __init__(self, **kwargs):
+ self.prev_msgids = collections.deque([],
+ maxlen=self.DUP_MSG_CHECK_SIZE)
+
+ def check_duplicate_message(self, message_data):
+ """AMQP consumers may read same message twice when exceptions occur
+ before ack is returned. This method prevents doing it.
+ """
+ if UNIQUE_ID in message_data:
+ msg_id = message_data[UNIQUE_ID]
+ if msg_id not in self.prev_msgids:
+ self.prev_msgids.append(msg_id)
+ else:
+ raise rpc_common.DuplicateMessageError(msg_id=msg_id)
+
+
+def _add_unique_id(msg):
+ """Add unique_id for checking duplicate messages."""
+ unique_id = uuid.uuid4().hex
+ msg.update({UNIQUE_ID: unique_id})
+ LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
+
+
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
@@ -349,6 +384,7 @@ class ProxyCallback(_ThreadPoolWithWait):
connection_pool=connection_pool,
)
self.proxy = proxy
+ self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
@@ -368,6 +404,7 @@ class ProxyCallback(_ThreadPoolWithWait):
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
+ self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
@@ -406,9 +443,11 @@ class ProxyCallback(_ThreadPoolWithWait):
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
- LOG.exception(_('Exception during message handling'))
- ctxt.reply(None, sys.exc_info(),
- connection_pool=self.connection_pool)
+ # sys.exc_info() is deleted by LOG.exception().
+ exc_info = sys.exc_info()
+ LOG.error(_('Exception during message handling'),
+ exc_info=exc_info)
+ ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
@@ -422,6 +461,7 @@ class MulticallProxyWaiter(object):
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
+ self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
@@ -435,6 +475,7 @@ class MulticallProxyWaiter(object):
def _process_data(self, data):
result = None
+ self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
@@ -479,6 +520,7 @@ class MulticallWaiter(object):
self._done = False
self._got_ending = False
self._conf = conf
+ self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
@@ -490,6 +532,7 @@ class MulticallWaiter(object):
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
+ self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
@@ -542,6 +585,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
+ _add_unique_id(msg)
pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause
@@ -575,6 +619,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
+ _add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
@@ -583,6 +628,7 @@ def cast(conf, context, topic, msg, connection_pool):
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
+ _add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
@@ -590,6 +636,7 @@ def fanout_cast(conf, context, topic, msg, connection_pool):
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
+ _add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
@@ -599,6 +646,7 @@ def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
+ _add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
@@ -610,6 +658,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope):
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
+ _add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index 55fc5b044..5fca30717 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -125,6 +125,10 @@ class Timeout(RPCException):
message = _("Timeout while waiting on RPC response.")
+class DuplicateMessageError(RPCException):
+ message = _("Found duplicate message(%(msg_id)s). Skipping it.")
+
+
class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.")
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 0d83253f1..81afc2a8b 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -624,8 +624,8 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, socket.timeout):
- LOG.exception(_('Timed out waiting for RPC response: %s') %
- str(exc))
+ LOG.debug(_('Timed out waiting for RPC response: %s') %
+ str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index 542256d0c..fd4b25e7f 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -415,8 +415,8 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
- LOG.exception(_('Timed out waiting for RPC response: %s') %
- str(exc))
+ LOG.debug(_('Timed out waiting for RPC response: %s') %
+ str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index c1cca34e8..4102146fb 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -25,6 +25,7 @@ import eventlet
import greenlet
from oslo.config import cfg
+from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -91,8 +92,8 @@ def _serialize(data):
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
- LOG.error(_("JSON serialization failed."))
- raise
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("JSON serialization failed."))
def _deserialize(data):
@@ -511,9 +512,9 @@ class ZmqProxy(ZmqBaseReactor):
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
- LOG.error(_("Could not create IPC directory %s") %
- (ipc_dir, ))
- raise
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("Could not create IPC directory %s") %
+ (ipc_dir, ))
try:
self.register(consumption_proxy,
@@ -521,9 +522,9 @@ class ZmqProxy(ZmqBaseReactor):
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
- LOG.error(_("Could not create ZeroMQ receiver daemon. "
- "Socket may already be in use."))
- raise
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("Could not create ZeroMQ receiver daemon. "
+ "Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
@@ -594,6 +595,9 @@ class Connection(rpc_common.Connection):
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
+ # Register with matchmaker.
+ _get_matchmaker().register(topic, CONF.rpc_zmq_host)
+
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
@@ -620,6 +624,10 @@ class Connection(rpc_common.Connection):
self.topics.append(topic)
def close(self):
+ _get_matchmaker().stop_heartbeat()
+ for topic in self.topics:
+ _get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
+
self.reactor.close()
self.topics = []
@@ -627,6 +635,7 @@ class Connection(rpc_common.Connection):
self.reactor.wait()
def consume_in_thread(self):
+ _get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
@@ -742,7 +751,7 @@ def _multi_send(method, context, topic, msg, timeout=None,
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
- raise rpc_common.Timeout, "No match from matchmaker."
+ raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
@@ -785,7 +794,7 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
-def notify(conf, context, topic, msg, **kwargs):
+def notify(conf, context, topic, msg, envelope):
"""
Send notification event.
Notifications are sent to topic-priority.
@@ -793,9 +802,8 @@ def notify(conf, context, topic, msg, **kwargs):
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
- topic.replace('.', '-')
- kwargs['envelope'] = kwargs.get('envelope', True)
- cast(conf, context, topic, msg, **kwargs)
+ topic = topic.replace('.', '-')
+ cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/matchmaker.py b/nova/openstack/common/rpc/matchmaker.py
index 57cc0b34c..e4862396a 100644
--- a/nova/openstack/common/rpc/matchmaker.py
+++ b/nova/openstack/common/rpc/matchmaker.py
@@ -22,6 +22,7 @@ import contextlib
import itertools
import json
+import eventlet
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
@@ -33,6 +34,12 @@ matchmaker_opts = [
cfg.StrOpt('matchmaker_ringfile',
default='/etc/nova/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
+ cfg.IntOpt('matchmaker_heartbeat_freq',
+ default='300',
+ help='Heartbeat frequency'),
+ cfg.IntOpt('matchmaker_heartbeat_ttl',
+ default='600',
+ help='Heartbeat time-to-live.'),
]
CONF = cfg.CONF
@@ -70,12 +77,73 @@ class Binding(object):
class MatchMakerBase(object):
- """Match Maker Base Class."""
-
+ """
+ Match Maker Base Class.
+ Build off HeartbeatMatchMakerBase if building a
+ heartbeat-capable MatchMaker.
+ """
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
+ self.no_heartbeat_msg = _('Matchmaker does not implement '
+ 'registration or heartbeat.')
+
+ def register(self, key, host):
+ """
+ Register a host on a backend.
+ Heartbeats, if applicable, may keepalive registration.
+ """
+ pass
+
+ def ack_alive(self, key, host):
+ """
+ Acknowledge that a key.host is alive.
+ Used internally for updating heartbeats,
+ but may also be used publically to acknowledge
+ a system is alive (i.e. rpc message successfully
+ sent to host)
+ """
+ pass
+
+ def is_alive(self, topic, host):
+ """
+ Checks if a host is alive.
+ """
+ pass
+
+ def expire(self, topic, host):
+ """
+ Explicitly expire a host's registration.
+ """
+ pass
+
+ def send_heartbeats(self):
+ """
+ Send all heartbeats.
+ Use start_heartbeat to spawn a heartbeat greenthread,
+ which loops this method.
+ """
+ pass
+
+ def unregister(self, key, host):
+ """
+ Unregister a topic.
+ """
+ pass
+
+ def start_heartbeat(self):
+ """
+ Spawn heartbeat greenthread.
+ """
+ pass
+
+ def stop_heartbeat(self):
+ """
+ Destroys the heartbeat greenthread.
+ """
+ pass
+
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
@@ -99,6 +167,103 @@ class MatchMakerBase(object):
return workers
+class HeartbeatMatchMakerBase(MatchMakerBase):
+ """
+ Base for a heart-beat capable MatchMaker.
+ Provides common methods for registering,
+ unregistering, and maintaining heartbeats.
+ """
+ def __init__(self):
+ self.hosts = set()
+ self._heart = None
+ self.host_topic = {}
+
+ super(HeartbeatMatchMakerBase, self).__init__()
+
+ def send_heartbeats(self):
+ """
+ Send all heartbeats.
+ Use start_heartbeat to spawn a heartbeat greenthread,
+ which loops this method.
+ """
+ for key, host in self.host_topic:
+ self.ack_alive(key, host)
+
+ def ack_alive(self, key, host):
+ """
+ Acknowledge that a host.topic is alive.
+ Used internally for updating heartbeats,
+ but may also be used publically to acknowledge
+ a system is alive (i.e. rpc message successfully
+ sent to host)
+ """
+ raise NotImplementedError("Must implement ack_alive")
+
+ def backend_register(self, key, host):
+ """
+ Implements registration logic.
+ Called by register(self,key,host)
+ """
+ raise NotImplementedError("Must implement backend_register")
+
+ def backend_unregister(self, key, key_host):
+ """
+ Implements de-registration logic.
+ Called by unregister(self,key,host)
+ """
+ raise NotImplementedError("Must implement backend_unregister")
+
+ def register(self, key, host):
+ """
+ Register a host on a backend.
+ Heartbeats, if applicable, may keepalive registration.
+ """
+ self.hosts.add(host)
+ self.host_topic[(key, host)] = host
+ key_host = '.'.join((key, host))
+
+ self.backend_register(key, key_host)
+
+ self.ack_alive(key, host)
+
+ def unregister(self, key, host):
+ """
+ Unregister a topic.
+ """
+ if (key, host) in self.host_topic:
+ del self.host_topic[(key, host)]
+
+ self.hosts.discard(host)
+ self.backend_unregister(key, '.'.join((key, host)))
+
+ LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host)))
+
+ def start_heartbeat(self):
+ """
+ Implementation of MatchMakerBase.start_heartbeat
+ Launches greenthread looping send_heartbeats(),
+ yielding for CONF.matchmaker_heartbeat_freq seconds
+ between iterations.
+ """
+ if len(self.hosts) == 0:
+ raise MatchMakerException(
+ _("Register before starting heartbeat."))
+
+ def do_heartbeat():
+ while True:
+ self.send_heartbeats()
+ eventlet.sleep(CONF.matchmaker_heartbeat_freq)
+
+ self._heart = eventlet.spawn(do_heartbeat)
+
+ def stop_heartbeat(self):
+ """
+ Destroys the heartbeat greenthread.
+ """
+ if self._heart:
+ self._heart.kill()
+
+
class DirectBinding(Binding):
"""
Specifies a host in the key via a '.' character
diff --git a/nova/openstack/common/timeutils.py b/nova/openstack/common/timeutils.py
index e2c274057..2691dc456 100644
--- a/nova/openstack/common/timeutils.py
+++ b/nova/openstack/common/timeutils.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2011 OpenStack LLC.
+# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/nova/quota.py b/nova/quota.py
index 3361154dd..3903a6add 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -43,6 +43,10 @@ quota_opts = [
cfg.IntOpt('quota_floating_ips',
default=10,
help='number of floating ips allowed per project'),
+ cfg.IntOpt('quota_fixed_ips',
+ default=10,
+ help=('number of fixed ips allowed per project (this should be '
+ 'at least the number of instances allowed)')),
cfg.IntOpt('quota_metadata_items',
default=128,
help='number of metadata items allowed per instance'),
@@ -508,7 +512,7 @@ class NoopQuotaDriver(object):
quotas[resource.name] = -1
return quotas
- def limit_check(self, context, resources, values):
+ def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -528,10 +532,14 @@ class NoopQuotaDriver(object):
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
pass
- def reserve(self, context, resources, deltas, expire=None):
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -561,24 +569,33 @@ class NoopQuotaDriver(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
return []
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
pass
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
pass
@@ -776,15 +793,20 @@ class QuotaEngine(object):
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
-
- if not quota_driver_class:
- quota_driver_class = CONF.quota_driver
-
- if isinstance(quota_driver_class, basestring):
- quota_driver_class = importutils.import_object(quota_driver_class)
-
self._resources = {}
- self._driver = quota_driver_class
+ self._driver_cls = quota_driver_class
+ self.__driver = None
+
+ @property
+ def _driver(self):
+ if self.__driver:
+ return self.__driver
+ if not self._driver_cls:
+ self._driver_cls = CONF.quota_driver
+ if isinstance(self._driver_cls, basestring):
+ self._driver_cls = importutils.import_object(self._driver_cls)
+ self.__driver = self._driver_cls
+ return self.__driver
def __contains__(self, resource):
return resource in self._resources
@@ -1044,6 +1066,11 @@ def _sync_floating_ips(context, project_id, session):
context, project_id, session=session))
+def _sync_fixed_ips(context, project_id, session):
+ return dict(fixed_ips=db.fixed_ip_count_by_project(
+ context, project_id, session=session))
+
+
def _sync_security_groups(context, project_id, session):
return dict(security_groups=db.security_group_count_by_project(
context, project_id, session=session))
@@ -1058,6 +1085,7 @@ resources = [
ReservableResource('ram', _sync_instances, 'quota_ram'),
ReservableResource('floating_ips', _sync_floating_ips,
'quota_floating_ips'),
+ ReservableResource('fixed_ips', _sync_fixed_ips, 'quota_fixed_ips'),
AbsoluteResource('metadata_items', 'quota_metadata_items'),
AbsoluteResource('injected_files', 'quota_injected_files'),
AbsoluteResource('injected_file_content_bytes',
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index c3022fdea..87b0a39c6 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -31,7 +31,6 @@ from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
import nova.context
-from nova import db
from nova import exception
from nova import manager
from nova import notifications
@@ -209,7 +208,7 @@ class SchedulerManager(manager.Manager):
locals(), instance_uuid=instance_uuid)
# update instance state and notify on the transition
- (old_ref, new_ref) = db.instance_update_and_get_original(
+ (old_ref, new_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
@@ -245,9 +244,9 @@ class SchedulerManager(manager.Manager):
"""
# Getting compute node info and related instances info
- service_ref = db.service_get_by_compute_host(context, host)
- instance_refs = db.instance_get_all_by_host(context,
- service_ref['host'])
+ service_ref = self.db.service_get_by_compute_host(context, host)
+ instance_refs = self.db.instance_get_all_by_host(context,
+ service_ref['host'])
# Getting total available/used resource
compute_ref = service_ref['compute_node'][0]
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index c57d6a91b..bb513bf7d 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -202,7 +202,7 @@ class AggregateTestCase(test.TestCase):
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
- def test_update_with_bad_host_aggregate(self):
+ def test_update_with_bad_aggregate(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
@@ -236,7 +236,7 @@ class AggregateTestCase(test.TestCase):
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.action,
- self.req, "duplicate_aggregate",
+ self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_aggregate(self):
@@ -256,12 +256,12 @@ class AggregateTestCase(test.TestCase):
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
- self.req, "bogus_aggregate",
- body={"add_host": {"host": "host1"}})
+ self.req, "1",
+ body={"add_host": {"host": "bogus_host"}})
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
- self.req, "1", body={"asdf": "asdf"})
+ self.req, "1", body={"add_host": {"asdf": "asdf"}})
def test_remove_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
@@ -288,7 +288,7 @@ class AggregateTestCase(test.TestCase):
self.req, "bogus_aggregate",
body={"remove_host": {"host": "host1"}})
- def test_remove_host_with_bad_host(self):
+ def test_remove_host_with_host_not_in_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateHostNotFound(aggregate_id=aggregate,
host=host)
@@ -297,16 +297,27 @@ class AggregateTestCase(test.TestCase):
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
- self.req, "bogus_aggregate",
+ self.req, "1",
body={"remove_host": {"host": "host1"}})
+ def test_remove_host_with_bad_host(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.ComputeHostNotFound(host=host)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.action,
+ self.req, "1", body={"remove_host": {"host": "bogushost"}})
+
def test_remove_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
def test_remove_host_with_extra_param(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
- self.req, "1", body={"asdf": "asdf", "host": "asdf"})
+ self.req, "1", body={"remove_host": {"asdf": "asdf",
+ "host": "asdf"}})
def test_set_metadata(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
@@ -325,7 +336,7 @@ class AggregateTestCase(test.TestCase):
self.assertEqual(AGGREGATE, result["aggregate"])
- def test_set_metadata_with_bad_host_aggregate(self):
+ def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
@@ -340,12 +351,12 @@ class AggregateTestCase(test.TestCase):
def test_set_metadata_with_missing_metadata(self):
body = {"asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
- self.req, "bad_aggregate", body=body)
+ self.req, "1", body=body)
def test_set_metadata_with_extra_params(self):
body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
- self.req, "bad_aggregate", body=body)
+ self.req, "1", body=body)
def test_delete_aggregate(self):
def stub_delete_aggregate(context, aggregate):
diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py
index 89ce4cd5a..bf6bff27c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cells.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cells.py
@@ -27,6 +27,7 @@ from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import utils
FAKE_CELLS = [
@@ -394,3 +395,10 @@ class TestCellsXMLDeserializer(test.TestCase):
deserializer = cells_ext.CellDeserializer()
result = deserializer.deserialize(intext)
self.assertEqual(dict(body=expected), result)
+
+ def test_with_corrupt_xml(self):
+ deserializer = cells_ext.CellDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
index 818bc3dff..67417e60e 100644
--- a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
@@ -30,7 +30,8 @@ fake_fixed_ips = [{'id': 1,
'allocated': False,
'leased': False,
'reserved': False,
- 'host': None},
+ 'host': None,
+ 'deleted': False},
{'id': 2,
'address': '192.168.1.2',
'network_id': 1,
@@ -39,13 +40,24 @@ fake_fixed_ips = [{'id': 1,
'allocated': False,
'leased': False,
'reserved': False,
- 'host': None},
+ 'host': None,
+ 'deleted': False},
+ {'id': 3,
+ 'address': '10.0.0.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 3,
+ 'instance_uuid': '3',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'deleted': True},
]
def fake_fixed_ip_get_by_address(context, address):
for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address:
+ if fixed_ip['address'] == address and not fixed_ip['deleted']:
return fixed_ip
raise exception.FixedIpNotFoundForAddress(address=address)
@@ -54,7 +66,7 @@ def fake_fixed_ip_get_by_address_detailed(context, address):
network = {'id': 1,
'cidr': "192.168.1.0/24"}
for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address:
+ if fixed_ip['address'] == address and not fixed_ip['deleted']:
return (fixed_ip, FakeModel(network), None)
raise exception.FixedIpNotFoundForAddress(address=address)
@@ -115,14 +127,18 @@ class FixedIpTest(test.TestCase):
'address': '192.168.1.1'}}
self.assertEqual(response, res_dict)
- def test_fixed_ips_get_fail(self):
+ def test_fixed_ips_get_bad_ip_fail(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/10.0.0.1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
'10.0.0.1')
+ def test_fixed_ips_get_deleted_ip_fail(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/10.0.0.2')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.2')
+
def test_fixed_ip_reserve(self):
fake_fixed_ips[0]['reserved'] = False
- ip_addr = '192.168.1.1'
body = {'reserve': None}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-fixed-ips/192.168.1.1/action')
@@ -132,16 +148,21 @@ class FixedIpTest(test.TestCase):
self.assertEqual(fake_fixed_ips[0]['reserved'], True)
def test_fixed_ip_reserve_bad_ip(self):
- ip_addr = '10.0.0.1'
body = {'reserve': None}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-fixed-ips/10.0.0.1/action')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
'10.0.0.1', body)
+ def test_fixed_ip_reserve_deleted_ip(self):
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/10.0.0.2/action')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
+ '10.0.0.2', body)
+
def test_fixed_ip_unreserve(self):
fake_fixed_ips[0]['reserved'] = True
- ip_addr = '192.168.1.1'
body = {'unreserve': None}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-fixed-ips/192.168.1.1/action')
@@ -151,9 +172,15 @@ class FixedIpTest(test.TestCase):
self.assertEqual(fake_fixed_ips[0]['reserved'], False)
def test_fixed_ip_unreserve_bad_ip(self):
- ip_addr = '10.0.0.1'
body = {'unreserve': None}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-fixed-ips/10.0.0.1/action')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
'10.0.0.1', body)
+
+ def test_fixed_ip_unreserve_deleted_ip(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/10.0.0.2/action')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
+ '10.0.0.2', body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index 85f93a9d5..5678933dc 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -21,9 +21,11 @@ from nova.compute import power_state
from nova.compute import vm_states
from nova import context as context_maker
from nova import db
+from nova import exception
from nova.openstack.common import log as logging
from nova import test
from nova.tests import fake_hosts
+from nova.tests import utils
LOG = logging.getLogger(__name__)
@@ -390,3 +392,9 @@ class HostSerializerTest(test.TestCase):
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
+
+ def test_corrupt_xml(self):
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index 8238f9248..7bf871690 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -94,10 +94,14 @@ NEW_NETWORK = {
class FakeNetworkAPI(object):
_sentinel = object()
+ _vlan_is_disabled = False
def __init__(self):
self.networks = copy.deepcopy(FAKE_NETWORKS)
+ def disable_vlan(self):
+ self._vlan_is_disabled = True
+
def delete(self, context, network_id):
for i, network in enumerate(self.networks):
if network['id'] == network_id:
@@ -125,6 +129,8 @@ class FakeNetworkAPI(object):
def add_network_to_project(self, context,
project_id, network_uuid=None):
+ if self._vlan_is_disabled:
+ raise NotImplementedError()
if network_uuid:
for network in self.networks:
if network.get('project_id', None) is None:
@@ -274,6 +280,13 @@ class NetworksTest(test.TestCase):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, 100)
+ def test_network_add_vlan_disabled(self):
+ self.fake_network_api.disable_vlan()
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ self.controller.add, req, {'id': uuid})
+
def test_network_add(self):
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
index 7b0b62180..8286661a0 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
@@ -24,11 +24,13 @@ from nova.tests.api.openstack import fakes
def quota_set(class_name):
return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
- 'ram': 51200, 'floating_ips': 10, 'instances': 10,
- 'injected_files': 5, 'cores': 20,
- 'injected_file_content_bytes': 10240, 'security_groups': 10,
- 'security_group_rules': 20, 'key_pairs': 100,
- 'injected_file_path_bytes': 255}}
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': 10, 'instances': 10,
+ 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20, 'key_pairs': 100,
+ 'injected_file_path_bytes': 255}}
class QuotaClassSetsTest(test.TestCase):
@@ -43,6 +45,7 @@ class QuotaClassSetsTest(test.TestCase):
'cores': 20,
'ram': 51200,
'floating_ips': 10,
+ 'fixed_ips': 10,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
@@ -61,6 +64,7 @@ class QuotaClassSetsTest(test.TestCase):
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['fixed_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
@@ -86,7 +90,8 @@ class QuotaClassSetsTest(test.TestCase):
def test_quotas_update_as_admin(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
@@ -103,7 +108,8 @@ class QuotaClassSetsTest(test.TestCase):
def test_quotas_update_as_user(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
@@ -130,6 +136,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
injected_file_content_bytes=20,
ram=50,
floating_ips=60,
+ fixed_ips=10,
instances=70,
injected_files=80,
security_groups=10,
@@ -154,6 +161,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
injected_file_content_bytes='20',
ram='50',
floating_ips='60',
+ fixed_ips='10',
instances='70',
injected_files='80',
security_groups='10',
@@ -167,6 +175,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
'</injected_file_content_bytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
+ '<fixed_ips>10</fixed_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<cores>90</cores>'
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index 0616c4628..1ff7e60ab 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -26,7 +26,7 @@ from nova.tests.api.openstack import fakes
def quota_set(id):
return {'quota_set': {'id': id, 'metadata_items': 128,
- 'ram': 51200, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10, 'fixed_ips': 10,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
@@ -45,6 +45,7 @@ class QuotaSetsTest(test.TestCase):
'cores': 20,
'ram': 51200,
'floating_ips': 10,
+ 'fixed_ips': 10,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
@@ -61,6 +62,7 @@ class QuotaSetsTest(test.TestCase):
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['fixed_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
@@ -81,6 +83,7 @@ class QuotaSetsTest(test.TestCase):
'cores': 20,
'ram': 51200,
'floating_ips': 10,
+ 'fixed_ips': 10,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
@@ -106,12 +109,13 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_as_admin(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100}}
+ 'key_pairs': 100, 'fixed_ips': 10}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
@@ -122,7 +126,8 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_as_user(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
@@ -132,9 +137,20 @@ class QuotaSetsTest(test.TestCase):
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
+ def test_quotas_update_invalid_key(self):
+ body = {'quota_set': {'instances2': -2, 'cores': -2,
+ 'ram': -2, 'floating_ips': -2,
+ 'metadata_items': -2, 'injected_files': -2,
+ 'injected_file_content_bytes': -2}}
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body)
+
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
- 'ram': -2, 'floating_ips': -2,
+ 'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
@@ -147,7 +163,8 @@ class QuotaSetsTest(test.TestCase):
expected_resp = {'quota_set': {
'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
@@ -157,7 +174,8 @@ class QuotaSetsTest(test.TestCase):
# when PUT JSON format with empty string for quota
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': '', 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
@@ -171,7 +189,8 @@ class QuotaSetsTest(test.TestCase):
# when PUT XML format with empty string for quota
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': {}, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
+ 'fixed_ips': 10, 'metadata_items': 128,
+ 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
@@ -197,6 +216,7 @@ class QuotaXMLSerializerTest(test.TestCase):
injected_file_content_bytes=20,
ram=50,
floating_ips=60,
+ fixed_ips=10,
instances=70,
injected_files=80,
security_groups=10,
@@ -220,6 +240,7 @@ class QuotaXMLSerializerTest(test.TestCase):
injected_file_content_bytes='20',
ram='50',
floating_ips='60',
+ fixed_ips='10',
instances='70',
injected_files='80',
security_groups='10',
@@ -233,6 +254,7 @@ class QuotaXMLSerializerTest(test.TestCase):
'</injected_file_content_bytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
+ '<fixed_ips>10</fixed_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<security_groups>10</security_groups>'
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 4919d461f..02aa96956 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -30,6 +30,7 @@ from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import utils
CONF = cfg.CONF
FAKE_UUID = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
@@ -727,13 +728,6 @@ class TestSecurityGroupRules(test.TestCase):
self.assertEquals(security_group_rule['to_port'], 81)
def test_create_by_invalid_cidr_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "parent_group_id": self.sg2['id'],
- "cidr": "10.2.3.124/2433"}}
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
@@ -1146,6 +1140,13 @@ class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
}
self.assertEquals(request['body'], expected)
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
class TestSecurityGroupXMLDeserializer(test.TestCase):
@@ -1192,6 +1193,13 @@ class TestSecurityGroupXMLDeserializer(test.TestCase):
}
self.assertEquals(request['body'], expected)
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
class TestSecurityGroupXMLSerializer(test.TestCase):
def setUp(self):
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
index d4bf62d19..cb7ce67cb 100644
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -16,6 +16,7 @@
import datetime
from nova.api.openstack.compute.contrib import services
+from nova import availability_zones
from nova import context
from nova import db
from nova import exception
@@ -76,7 +77,13 @@ class FakeRequestWithHostService(object):
GET = {"host": "host1", "service": "nova-compute"}
-def fake_service_get_all(context):
+def fake_host_api_service_get_all(context, filters=None, set_zones=False):
+ if set_zones or 'availability_zone' in filters:
+ return availability_zones.set_availability_zones(context,
+ fake_services_list)
+
+
+def fake_db_api_service_get_all(context, disabled=None):
return fake_services_list
@@ -112,15 +119,16 @@ class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
- self.stubs.Set(db, "service_get_all", fake_service_get_all)
+ self.context = context.get_admin_context()
+ self.controller = services.ServiceController()
+
+ self.stubs.Set(self.controller.host_api, "service_get_all",
+ fake_host_api_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update", fake_service_update)
- self.context = context.get_admin_context()
- self.controller = services.ServiceController()
-
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
index 4c59e5aa9..ab9906135 100644
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -22,6 +22,7 @@ import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
from nova.compute import api
+from nova.compute import instance_types
from nova import context
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
@@ -29,6 +30,7 @@ from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
+from nova import utils
SERVERS = 5
TENANTS = 2
@@ -42,17 +44,21 @@ START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
-def fake_instance_type_get(self, context, instance_type_id):
- return {'id': 1,
- 'vcpus': VCPUS,
- 'root_gb': ROOT_GB,
- 'ephemeral_gb': EPHEMERAL_GB,
- 'memory_mb': MEMORY_MB,
- 'name':
- 'fakeflavor'}
+FAKE_INST_TYPE = {'id': 1,
+ 'vcpus': VCPUS,
+ 'root_gb': ROOT_GB,
+ 'ephemeral_gb': EPHEMERAL_GB,
+ 'memory_mb': MEMORY_MB,
+ 'name': 'fakeflavor',
+ 'flavorid': 'foo',
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'swap': 0}
def get_fake_db_instance(start, end, instance_id, tenant_id):
+ sys_meta = utils.dict_to_metadata(
+ instance_types.save_instance_type_info({}, FAKE_INST_TYPE))
return {'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % instance_id,
'image_ref': '1',
@@ -62,7 +68,8 @@ def get_fake_db_instance(start, end, instance_id, tenant_id):
'state_description': 'state',
'instance_type_id': 1,
'launched_at': start,
- 'terminated_at': end}
+ 'terminated_at': end,
+ 'system_metadata': sys_meta}
def fake_instance_get_active_by_window_joined(self, context, begin, end,
@@ -77,8 +84,6 @@ def fake_instance_get_active_by_window_joined(self, context, begin, end,
class SimpleTenantUsageTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageTest, self).setUp()
- self.stubs.Set(api.API, "get_instance_type",
- fake_instance_type_get)
self.stubs.Set(api.API, "get_active_by_window",
fake_instance_get_active_by_window_joined)
self.admin_context = context.RequestContext('fakeadmin_0',
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 62a688962..754e103d4 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -32,6 +32,7 @@ from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
from nova.tests import matchers
+from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
@@ -1146,3 +1147,10 @@ class TestServerActionXMLDeserializer(test.TestCase):
self.deserializer.deserialize,
serial_request,
'action')
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index b77814003..2dfefc541 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -51,6 +51,7 @@ from nova.tests.api.openstack import fakes
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests import matchers
+from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
@@ -3859,6 +3860,13 @@ class TestServerCreateRequestXMLDeserializer(test.TestCase):
}
self.assertEquals(request['body'], expected)
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
class TestAddressesXMLSerialization(test.TestCase):
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 227044572..a6344c09f 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -28,7 +28,7 @@ from nova.api.openstack import common
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
-from nova.tests import utils as test_utils
+from nova.tests import utils
NS = "{http://docs.openstack.org/compute/api/v1.1}"
@@ -297,7 +297,7 @@ class MiscFunctionsTest(test.TestCase):
self.fail("webob.exc.HTTPConflict was not raised")
def test_check_img_metadata_properties_quota_valid_metadata(self):
- ctxt = test_utils.get_test_admin_context()
+ ctxt = utils.get_test_admin_context()
metadata1 = {"key": "value"}
actual = common.check_img_metadata_properties_quota(ctxt, metadata1)
self.assertEqual(actual, None)
@@ -311,7 +311,7 @@ class MiscFunctionsTest(test.TestCase):
self.assertEqual(actual, None)
def test_check_img_metadata_properties_quota_inv_metadata(self):
- ctxt = test_utils.get_test_admin_context()
+ ctxt = utils.get_test_admin_context()
metadata1 = {"a" * 260: "value"}
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata1)
@@ -512,3 +512,11 @@ class MetadataXMLSerializationTest(test.TestCase):
""".replace(" ", "").replace("\n", ""))
self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_metadata_deserializer(self):
+ """Should throw a 400 error on corrupt xml."""
+ deserializer = common.MetadataXMLDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index a18dc78d5..374aa1162 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -7,6 +7,7 @@ from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import utils
class RequestTest(test.TestCase):
@@ -272,6 +273,21 @@ class ResourceTest(test.TestCase):
'<fooAction>true</fooAction>')
self.assertEqual(controller._action_foo, method)
+ def test_get_method_action_corrupt_xml(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ resource.get_method,
+ None, 'action',
+ 'application/xml',
+ utils.killer_xml_body())
+
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
diff --git a/nova/tests/api/openstack/test_xmlutil.py b/nova/tests/api/openstack/test_xmlutil.py
index bd7f24233..3ed6a86fc 100644
--- a/nova/tests/api/openstack/test_xmlutil.py
+++ b/nova/tests/api/openstack/test_xmlutil.py
@@ -16,9 +16,12 @@
# under the License.
from lxml import etree
+from xml.dom import minidom
from nova.api.openstack import xmlutil
+from nova import exception
from nova import test
+from nova.tests import utils as tests_utils
class SelectorTest(test.TestCase):
@@ -720,3 +723,64 @@ class MiscellaneousXMLUtilTests(test.TestCase):
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
+
+ def test_safe_parse_xml(self):
+
+ normal_body = ("""
+ <?xml version="1.0" ?><foo>
+ <bar>
+ <v1>hey</v1>
+ <v2>there</v2>
+ </bar>
+ </foo>""").strip()
+
+ dom = xmlutil.safe_minidom_parse_string(normal_body)
+ self.assertEqual(normal_body, str(dom.toxml()))
+
+ self.assertRaises(exception.MalformedRequestBody,
+ xmlutil.safe_minidom_parse_string,
+ tests_utils.killer_xml_body())
+
+
+class SafeParserTestCase(test.TestCase):
+ def test_external_dtd(self):
+ xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+ <html>
+ <head/>
+ <body>html with dtd</body>
+ </html>""")
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_external_file(self):
+ xml_string = """<!DOCTYPE external [
+ <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
+ ]>
+ <root>&ee;</root>"""
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_notation(self):
+ xml_string = """<?xml version="1.0" standalone="no"?>
+ <!-- comment data -->
+ <!DOCTYPE x [
+ <!NOTATION notation SYSTEM "notation.jpeg">
+ ]>
+ <root attr1="value1">
+ </root>"""
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index effe27660..3c7dd1941 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -938,7 +938,6 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
fake_instance = {'id': 2,
'uuid': 'fake_uuid',
'security_groups': 'fake',
- 'instance_type': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
diff --git a/nova/tests/cells/test_cells_utils.py b/nova/tests/cells/test_cells_utils.py
index 84f60a796..871df0372 100644
--- a/nova/tests/cells/test_cells_utils.py
+++ b/nova/tests/cells/test_cells_utils.py
@@ -80,3 +80,26 @@ class CellsUtilsTestCase(test.TestCase):
{'changes-since': 'fake-updated-since',
'project_id': 'fake-project'})
self.assertEqual(call_info['shuffle'], 2)
+
+ def test_split_cell_and_item(self):
+ path = 'australia', 'queensland', 'gold_coast'
+ cell = cells_utils._PATH_CELL_SEP.join(path)
+ item = 'host_5'
+ together = cells_utils.cell_with_item(cell, item)
+ self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]),
+ together)
+
+ # Test normal usage
+ result_cell, result_item = cells_utils.split_cell_and_item(together)
+ self.assertEqual(cell, result_cell)
+ self.assertEqual(item, result_item)
+
+ # Test with no cell
+ cell = None
+ together = cells_utils.cell_with_item(cell, item)
+ self.assertEqual(item, together)
+ print together
+ result_cell, result_item = cells_utils.split_cell_and_item(together)
+ print result_cell, result_item
+ self.assertEqual(cell, result_cell)
+ self.assertEqual(item, result_item)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index e94d8b788..03457841d 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -56,7 +56,7 @@ from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
-import nova.policy
+from nova import policy
from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
@@ -172,6 +172,12 @@ class BaseTestCase(test.TestCase):
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
+ def stub_out_client_exceptions(self):
+ def passthru(exceptions, func, *args, **kwargs):
+ return func(*args, **kwargs)
+
+ self.stubs.Set(rpc_common, 'catch_client_exception', passthru)
+
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
"""Create a test instance."""
if not params:
@@ -238,6 +244,60 @@ class BaseTestCase(test.TestCase):
return db.security_group_create(self.context, values)
+class ComputeVolumeTestCase(BaseTestCase):
+ def setUp(self):
+ super(ComputeVolumeTestCase, self).setUp()
+ self.volume_id = 'fake'
+ self.instance = {
+ 'id': 'fake',
+ 'uuid': 'fake',
+ 'name': 'fake',
+ 'root_device_name': '/dev/vda',
+ }
+ self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
+ {'id': self.volume_id})
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.driver, 'attach_volume',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ lambda *a, **kw: {})
+ self.stubs.Set(self.compute.volume_api, 'attach',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'check_attach',
+ lambda *a, **kw: None)
+
+ def store_cinfo(context, *args):
+ self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
+
+ self.stubs.Set(self.compute.conductor_api,
+ 'block_device_mapping_update',
+ store_cinfo)
+ self.stubs.Set(self.compute.conductor_api,
+ 'block_device_mapping_update_or_create',
+ store_cinfo)
+
+ def test_attach_volume_serial(self):
+
+ self.compute.attach_volume(self.context, self.volume_id,
+ '/dev/vdb', self.instance)
+ self.assertEqual(self.cinfo.get('serial'), self.volume_id)
+
+ def test_boot_volume_serial(self):
+ block_device_mapping = [{
+ 'id': 1,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': '/dev/vdb',
+ 'delete_on_termination': False,
+ }]
+ self.compute._setup_block_device_mapping(self.context, self.instance,
+ block_device_mapping)
+ self.assertEqual(self.cinfo.get('serial'), self.volume_id)
+
+
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
@@ -708,6 +768,19 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance,
self.context, instance=instance)
+ def test_run_instance_bails_on_missing_instance(self):
+ # Make sure that run_instance() will quickly ignore a deleted instance
+ called = {}
+ instance = self._create_instance()
+
+ def fake_instance_update(self, *a, **args):
+ called['instance_update'] = True
+ raise exception.InstanceNotFound(instance_id='foo')
+ self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
+
+ self.compute.run_instance(self.context, instance)
+ self.assertIn('instance_update', called)
+
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
#check failed to schedule --> terminate
@@ -1150,15 +1223,20 @@ class ComputeTestCase(BaseTestCase):
# this is called with the wrong args, so we have to hack
# around it.
reboot_call_info = {}
- expected_call_info = {'args': (econtext, updated_instance1,
- expected_nw_info, reboot_type,
- fake_block_dev_info),
- 'kwargs': {}}
+ expected_call_info = {
+ 'args': (econtext, updated_instance1, expected_nw_info,
+ reboot_type),
+ 'kwargs': {'block_device_info': fake_block_dev_info}}
def fake_reboot(*args, **kwargs):
reboot_call_info['args'] = args
reboot_call_info['kwargs'] = kwargs
+ # NOTE(sirp): Since `bad_volumes_callback` is a function defined
+ # within `reboot_instance`, we don't have access to its value and
+ # can't stub it out, thus we skip that comparison.
+ kwargs.pop('bad_volumes_callback')
+
self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
# Power state should be updated again
@@ -1529,9 +1607,16 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
+ self.assertRaises(rpc_common.ClientException,
+ self.compute.get_vnc_console,
+ self.context, 'invalid', instance=instance)
+
+ self.stub_out_client_exceptions()
+
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
+
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
@@ -1542,9 +1627,16 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
+ self.assertRaises(rpc_common.ClientException,
+ self.compute.get_vnc_console,
+ self.context, None, instance=instance)
+
+ self.stub_out_client_exceptions()
+
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, None, instance=instance)
+
self.compute.terminate_instance(self.context, instance=instance)
def test_spicehtml5_spice_console(self):
@@ -1570,9 +1662,16 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
+ self.assertRaises(rpc_common.ClientException,
+ self.compute.get_spice_console,
+ self.context, 'invalid', instance=instance)
+
+ self.stub_out_client_exceptions()
+
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
+
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_spice_console_type(self):
@@ -1583,11 +1682,56 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
+ self.assertRaises(rpc_common.ClientException,
+ self.compute.get_spice_console,
+ self.context, None, instance=instance)
+
+ self.stub_out_client_exceptions()
+
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, None, instance=instance)
+
self.compute.terminate_instance(self.context, instance=instance)
+ def test_vnc_console_instance_not_ready(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+ instance = self._create_fake_instance(
+ params={'vm_state': vm_states.BUILDING})
+ instance = jsonutils.to_primitive(instance)
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.stub_out_client_exceptions()
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_vnc_console, self.context, 'novnc',
+ instance=instance)
+
+ def test_spice_console_instance_not_ready(self):
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance(
+ params={'vm_state': vm_states.BUILDING})
+ instance = jsonutils.to_primitive(instance)
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_spice_console",
+ fake_driver_get_console)
+
+ self.stub_out_client_exceptions()
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_spice_console, self.context, 'spice-html5',
+ instance=instance)
+
def test_diagnostics(self):
# Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
@@ -1828,7 +1972,8 @@ class ComputeTestCase(BaseTestCase):
"""
instance = self._create_fake_instance()
- def fake_delete_instance(context, instance, bdms):
+ def fake_delete_instance(context, instance, bdms,
+ reservations=None):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
@@ -1990,6 +2135,59 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
return reservations
+ def test_quotas_succesful_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ resvs = self._ensure_quota_reservations_committed()
+ self.compute.terminate_instance(self.context, instance,
+ bdms=None, reservations=resvs)
+
+ def test_quotas_failed_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ def fake_shutdown_instance(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute, '_shutdown_instance',
+ fake_shutdown_instance)
+
+ resvs = self._ensure_quota_reservations_rolledback()
+ self.assertRaises(test.TestingException,
+ self.compute.terminate_instance,
+ self.context, instance,
+ bdms=None, reservations=resvs)
+
+ def test_quotas_succesful_soft_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ params=dict(task_state=task_states.SOFT_DELETING)))
+ resvs = self._ensure_quota_reservations_committed()
+ self.compute.soft_delete_instance(self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_failed_soft_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ params=dict(task_state=task_states.SOFT_DELETING)))
+
+ def fake_soft_delete(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute.driver, 'soft_delete',
+ fake_soft_delete)
+
+ resvs = self._ensure_quota_reservations_rolledback()
+ self.assertRaises(test.TestingException,
+ self.compute.soft_delete_instance,
+ self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_destroy_of_soft_deleted_instance(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ params=dict(vm_state=vm_states.SOFT_DELETED)))
+ # Termination should be successful, but quota reservations
+ # rolled back because the instance was in SOFT_DELETED state.
+ resvs = self._ensure_quota_reservations_rolledback()
+ self.compute.terminate_instance(self.context, instance,
+ bdms=None, reservations=resvs)
+
def test_finish_resize(self):
# Contrived test to ensure finish_resize doesn't raise anything.
@@ -3414,13 +3612,15 @@ class ComputeTestCase(BaseTestCase):
if migration['id'] == m['id']:
migration['status'] = status
- def fake_confirm_resize(context, instance):
+ def fake_confirm_resize(context, instance, migration_ref=None):
# raise exception for 'fake_uuid4' to check migration status
# does not get set to 'error' on confirm_resize failure.
if instance['uuid'] == 'fake_uuid4':
raise test.TestingException
+ self.assertNotEqual(migration_ref, None)
for migration in migrations:
- if migration['instance_uuid'] == instance['uuid']:
+ if (migration['instance_uuid'] ==
+ migration_ref['instance_uuid']):
migration['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid',
@@ -4302,33 +4502,6 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], None)
self.assertTrue(instance['deleted'])
- def test_repeated_delete_quota(self):
- in_use = {'instances': 1}
-
- def fake_reserve(context, expire=None, project_id=None, **deltas):
- return dict(deltas.iteritems())
-
- self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
-
- def fake_commit(context, deltas, project_id=None):
- for k, v in deltas.iteritems():
- in_use[k] = in_use.get(k, 0) + v
-
- self.stubs.Set(QUOTAS, 'commit', fake_commit)
-
- instance, instance_uuid = self._run_instance(params={
- 'host': CONF.host})
-
- self.compute_api.delete(self.context, instance)
- self.compute_api.delete(self.context, instance)
-
- instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.DELETING)
-
- self.assertEquals(in_use['instances'], 0)
-
- db.instance_destroy(self.context, instance['uuid'])
-
def test_delete_fast_if_host_not_set(self):
instance = self._create_fake_instance({'host': None})
self.compute_api.delete(self.context, instance)
@@ -4363,9 +4536,8 @@ class ComputeAPITestCase(BaseTestCase):
instance, instance_uuid = self._run_instance(params={
'host': CONF.host})
+ # Make sure this is not called on the API side.
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg(),
- project_id=mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.soft_delete(self.context, instance)
@@ -4521,9 +4693,6 @@ class ComputeAPITestCase(BaseTestCase):
# Ensure quotas are committed
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
- if self.__class__.__name__ == 'CellsComputeAPITestCase':
- # Called a 2nd time (for the child cell) when testing cells
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.restore(self.context, instance)
@@ -6853,7 +7022,7 @@ class ComputePolicyTestCase(BaseTestCase):
self.compute_api = compute.API()
def test_actions_are_prefixed(self):
- self.mox.StubOutWithMock(nova.policy, 'enforce')
+ self.mox.StubOutWithMock(policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
self.mox.ReplayAll()
compute_api.check_policy(self.context, 'reboot', {})
@@ -7260,18 +7429,18 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.compute._spawn(mox.IgnoreArg(), self.instance, None, None, None,
False, None).AndRaise(test.TestingException("BuildError"))
self.compute._reschedule_or_reraise(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg(), None, None, None, False, None, {})
+ mox.IgnoreArg(), None, None, None, False, None, {}, [])
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance)
- def test_deallocate_network_fail(self):
- """Test de-allocation of network failing before re-scheduling logic
- can even run.
+ def test_shutdown_instance_fail(self):
+ """Test shutdown instance failing before re-scheduling logic can even
+ run.
"""
instance_uuid = self.instance['uuid']
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
try:
raise test.TestingException("Original")
@@ -7281,8 +7450,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
- self.compute._deallocate_network(self.context,
- self.instance).AndRaise(InnerTestingException("Error"))
+ self.compute._shutdown_instance(self.context, self.instance,
+ mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
@@ -7297,11 +7466,14 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# Test handling of exception from _reschedule.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
- self.compute._deallocate_network(self.context,
- self.instance)
+ self.compute._shutdown_instance(self.context, self.instance,
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(self.context, instance_uuid,
+ mox.IgnoreArg())
self.compute._reschedule(self.context, None, instance_uuid,
{}, self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING).AndRaise(
@@ -7322,7 +7494,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
@@ -7332,8 +7505,11 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
- self.compute._deallocate_network(self.context,
- self.instance)
+
+ self.compute._shutdown_instance(self.context, self.instance,
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(self.context, instance_uuid,
+ mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance, method_args,
task_states.SCHEDULING, exc_info).AndReturn(False)
@@ -7350,7 +7526,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
@@ -7361,8 +7538,10 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
- self.compute._deallocate_network(self.context,
- self.instance)
+ self.compute._shutdown_instance(self.context, self.instance,
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(self.context, instance_uuid,
+ mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndReturn(
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index 8ba35e033..78100bcc3 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -18,10 +18,13 @@ Tests For Compute w/ Cells
"""
import functools
+from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova import db
+from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova import quota
from nova.tests.compute import test_compute
@@ -40,7 +43,16 @@ def stub_call_to_cells(context, instance, method, *args, **kwargs):
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
- return fn(context, instance, *args, **kwargs)
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ return fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
@@ -52,7 +64,17 @@ def stub_cast_to_cells(context, instance, method, *args, **kwargs):
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
- fn(context, instance, *args, **kwargs)
+
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
def deploy_stubs(stubs, api, original_instance=None):
@@ -171,6 +193,36 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def test_evacuate(self):
self.skipTest("Test is incompatible with cells.")
+ def test_delete_instance_no_cell(self):
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'instance_delete_everywhere')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_cast_to_cells')
+ inst = self._create_fake_instance()
+ exc = exception.InstanceUnknownCell(instance_uuid=inst['uuid'])
+ self.compute_api._cast_to_cells(self.context, inst,
+ 'delete').AndRaise(exc)
+ cells_rpcapi.instance_delete_everywhere(self.context,
+ inst, 'hard')
+ self.mox.ReplayAll()
+ self.compute_api.delete(self.context, inst)
+
+ def test_soft_delete_instance_no_cell(self):
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'instance_delete_everywhere')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_cast_to_cells')
+ inst = self._create_fake_instance()
+ exc = exception.InstanceUnknownCell(instance_uuid=inst['uuid'])
+ self.compute_api._cast_to_cells(self.context, inst,
+ 'soft_delete').AndRaise(exc)
+ cells_rpcapi.instance_delete_everywhere(self.context,
+ inst, 'soft')
+ self.mox.ReplayAll()
+ self.compute_api.soft_delete(self.context, inst)
+
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 6c40a95e2..a089e9dc6 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -221,7 +221,9 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance,
+ reservations=['uuid1', 'uuid2'],
+ version='2.27')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
@@ -368,7 +370,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance, bdms=[],
- version='2.4')
+ reservations=['uuid1', 'uuid2'],
+ version='2.27')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 72c04e427..dd779c778 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -112,6 +112,7 @@ class _BaseTestCase(object):
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
+ self.stub_out_client_exceptions()
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
@@ -601,7 +602,6 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
- self.stub_out_client_exceptions()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
@@ -673,16 +673,32 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
- db_result_listified=False):
+ db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
- getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ if db_exception:
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ else:
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
- result = self.conductor.service_get_all_by(self.context, **condargs)
- if db_result_listified:
- self.assertEqual(['fake-result'], result)
+ if db_exception:
+ self.assertRaises(rpc_common.ClientException,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+
+ self.stub_out_client_exceptions()
+
+ self.assertRaises(db_exception.__class__,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
else:
- self.assertEqual('fake-result', result)
+ result = self.conductor.service_get_all_by(self.context,
+ **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -713,6 +729,19 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
('host', 'binary'),
dict(host='host', binary='binary'))
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host'),
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary'),
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
@@ -786,15 +815,24 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
'fake-key', 'fake-sort')
def _test_stubbed(self, name, dbargs, condargs,
- db_result_listified=False):
+ db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
- getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ if db_exception:
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ else:
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
- result = self.conductor.service_get_all_by(self.context, **condargs)
- if db_result_listified:
- self.assertEqual(['fake-result'], result)
+ if db_exception:
+ self.assertRaises(db_exception.__class__,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
else:
- self.assertEqual('fake-result', result)
+ result = self.conductor.service_get_all_by(self.context,
+ **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -820,6 +858,24 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
dict(topic='compute', host='host'),
db_result_listified=True)
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary'))
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host'),
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary'),
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
@@ -913,8 +969,12 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
args = args[1:]
else:
ctxt = self.context
+ db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
- getattr(db, name)(ctxt, *args).AndReturn('fake-result')
+ if db_exception:
+ getattr(db, name)(ctxt, *args).AndRaise(db_exception)
+ else:
+ getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
@@ -922,8 +982,13 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
- result = getattr(self.conductor, name)(self.context, *args)
- self.assertEqual(
+ if db_exception:
+ self.assertRaises(db_exception.__class__,
+ getattr(self.conductor, name),
+ self.context, *args)
+ else:
+ result = getattr(self.conductor, name)(self.context, *args)
+ self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
@@ -941,6 +1006,18 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args', 'host', 'binary')
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host', 'host',
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args', 'host', 'binary',
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
def test_service_create(self):
self._test_stubbed('service_create', {})
@@ -990,6 +1067,40 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
+ def test_quota_commit_with_project_id(self):
+ diff_proj_id = 'diff_fake_proj_id'
+ self.assertNotEqual(self.context.project_id, diff_proj_id)
+ call_info = {}
+
+ def mgr_quota_commit(ctxt, reservations):
+ call_info['resvs'] = reservations
+ call_info['project_id'] = ctxt.project_id
+
+ self.stubs.Set(self.conductor_manager, 'quota_commit',
+ mgr_quota_commit)
+
+ self.conductor.quota_commit(self.context, 'fake_resvs',
+ project_id=diff_proj_id)
+ self.assertEqual(diff_proj_id, call_info['project_id'])
+ self.assertEqual('fake_resvs', call_info['resvs'])
+
+ def test_quota_rollback_with_project_id(self):
+ diff_proj_id = 'diff_fake_proj_id'
+ self.assertNotEqual(self.context.project_id, diff_proj_id)
+ call_info = {}
+
+ def mgr_quota_rollback(ctxt, reservations):
+ call_info['resvs'] = reservations
+ call_info['project_id'] = ctxt.project_id
+
+ self.stubs.Set(self.conductor_manager, 'quota_rollback',
+ mgr_quota_rollback)
+
+ self.conductor.quota_rollback(self.context, 'fake_resvs',
+ project_id=diff_proj_id)
+ self.assertEqual(diff_proj_id, call_info['project_id'])
+ self.assertEqual('fake_resvs', call_info['resvs'])
+
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
@@ -998,7 +1109,6 @@ class ConductorLocalAPITestCase(ConductorAPITestCase):
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
- self.stub_out_client_exceptions()
def test_client_exceptions(self):
instance = self._create_fake_instance()
diff --git a/nova/tests/db/test_sqlite.py b/nova/tests/db/test_sqlite.py
new file mode 100644
index 000000000..0383f058b
--- /dev/null
+++ b/nova/tests/db/test_sqlite.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test cases for sqlite-specific logic"""
+
+from nova import test
+import os
+from sqlalchemy import create_engine
+from sqlalchemy import Column, BigInteger, String
+from sqlalchemy.ext.declarative import declarative_base
+import subprocess
+
+
+class TestSqlite(test.TestCase):
+ """Tests for sqlite-specific logic."""
+
+ def setUp(self):
+ super(TestSqlite, self).setUp()
+ self.db_file = "test_bigint.sqlite"
+ if os.path.exists(self.db_file):
+ os.remove(self.db_file)
+
+ def test_big_int_mapping(self):
+ base_class = declarative_base()
+
+ class User(base_class):
+ """Dummy class with a BigInteger column for testing."""
+ __tablename__ = "users"
+ id = Column(BigInteger, primary_key=True)
+ name = Column(String)
+
+ get_schema_cmd = "sqlite3 %s '.schema'" % self.db_file
+ engine = create_engine("sqlite:///%s" % self.db_file)
+ base_class.metadata.create_all(engine)
+ process = subprocess.Popen(get_schema_cmd, shell=True,
+ stdout=subprocess.PIPE)
+ output, _ = process.communicate()
+ self.assertFalse('BIGINT' in output, msg="column type BIGINT "
+ "not converted to INTEGER in schema")
+
+ def tearDown(self):
+ if os.path.exists(self.db_file):
+ os.remove(self.db_file)
+ super(TestSqlite, self).tearDown()
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
index 4b430ad7c..c393ab0c7 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
@@ -2,6 +2,7 @@
"quota_class_set": {
"cores": 20,
"floating_ips": 10,
+ "fixed_ips": 10,
"id": "%(set_id)s",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
index 3dffd47f0..8ab8436d7 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
@@ -2,6 +2,7 @@
<quota_class_set id="%(set_id)s">
<cores>20</cores>
<floating_ips>10</floating_ips>
+ <fixed_ips>10</fixed_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
<injected_files>5</injected_files>
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
index f074c829f..3974f65db 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
@@ -4,6 +4,7 @@
"cores": 50,
"ram": 51200,
"floating_ips": 10,
+ "fixed_ips": 10,
"metadata_items": 128,
"injected_files": 5,
"injected_file_content_bytes": 10240,
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
index d14785482..f27082ab3 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
@@ -2,6 +2,7 @@
<quota_class_set>
<cores>50</cores>
<floating_ips>10</floating_ips>
+ <fixed_ips>10</fixed_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
<injected_files>5</injected_files>
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
index 99a11f4ff..8d195b924 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
@@ -2,6 +2,7 @@
"quota_class_set": {
"cores": 50,
"floating_ips": 10,
+ "fixed_ips": 10,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
index 44c658a41..5c12a81e7 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
@@ -2,6 +2,7 @@
<quota_class_set>
<cores>50</cores>
<floating_ips>10</floating_ips>
+ <fixed_ips>10</fixed_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
<injected_files>5</injected_files>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
index ee1f6a397..364a59f7a 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
@@ -2,6 +2,7 @@
"quota_set": {
"cores": 20,
"floating_ips": 10,
+ "fixed_ips": 10,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
index 6a39c8506..36e6da544 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
@@ -2,6 +2,7 @@
<quota_set id="fake_tenant">
<cores>20</cores>
<floating_ips>10</floating_ips>
+ <fixed_ips>10</fixed_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
<injected_files>5</injected_files>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
index ee1f6a397..364a59f7a 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
@@ -2,6 +2,7 @@
"quota_set": {
"cores": 20,
"floating_ips": 10,
+ "fixed_ips": 10,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
index 6a39c8506..36e6da544 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
@@ -2,6 +2,7 @@
<quota_set id="fake_tenant">
<cores>20</cores>
<floating_ips>10</floating_ips>
+ <fixed_ips>10</fixed_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
<injected_files>5</injected_files>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
index c16dc6bb5..43525cfd5 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
@@ -2,6 +2,7 @@
"quota_set": {
"cores": 20,
"floating_ips": 10,
+ "fixed_ips": 10,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
index 126c3fced..3c411e8e5 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
@@ -2,6 +2,7 @@
<quota_set>
<cores>20</cores>
<floating_ips>10</floating_ips>
+ <fixed_ips>10</fixed_ips>
<injected_file_content_bytes>10240</injected_file_content_bytes>
<injected_file_path_bytes>255</injected_file_path_bytes>
<injected_files>5</injected_files>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 26f37275a..1ca839b3f 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -1963,7 +1963,7 @@ class ServicesJsonTest(ApiSampleTestBase):
def setUp(self):
super(ServicesJsonTest, self).setUp()
self.stubs.Set(db, "service_get_all",
- test_services.fake_service_get_all)
+ test_services.fake_db_api_service_get_all)
self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
test_services.fake_service_get_by_host_binary)
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 92b8e1d91..2fe53968b 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -34,6 +34,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
+from nova import quota
from nova import test
from nova.tests import fake_ldap
from nova.tests import fake_network
@@ -286,6 +287,7 @@ class FlatNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
@@ -306,6 +308,10 @@ class FlatNetworkTestCase(test.TestCase):
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
+
+ quota.QUOTAS.reserve(mox.IgnoreArg(),
+ fixed_ips=mox.IgnoreArg()).AndReturn(None)
+
db.instance_get_by_uuid(self.context,
mox.IgnoreArg()).AndReturn({'display_name': HOST})
@@ -327,6 +333,7 @@ class FlatNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
@@ -347,6 +354,10 @@ class FlatNetworkTestCase(test.TestCase):
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
+
+ quota.QUOTAS.reserve(mox.IgnoreArg(),
+ fixed_ips=mox.IgnoreArg()).AndReturn(None)
+
db.instance_get_by_uuid(self.context,
mox.IgnoreArg()).AndReturn({'display_name': HOST})
@@ -414,6 +425,7 @@ class FlatNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
@@ -432,6 +444,10 @@ class FlatNetworkTestCase(test.TestCase):
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
+
+ quota.QUOTAS.reserve(mox.IgnoreArg(),
+ fixed_ips=mox.IgnoreArg()).AndReturn(None)
+
db.instance_get_by_uuid(self.context,
mox.IgnoreArg()).AndReturn({'display_name': HOST})
@@ -531,6 +547,7 @@ class VlanNetworkTestCase(test.TestCase):
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
+
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
@@ -1601,6 +1618,207 @@ class CommonNetworkTestCase(test.TestCase):
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
+ def _test_init_host_static_fixed_range(self, net_manager):
+ self.flags(fake_network=True,
+ fixed_range='10.0.0.0/22',
+ routing_source_ip='192.168.0.1',
+ metadata_host='192.168.0.1',
+ public_interface='eth1',
+ dmz_cidr=['10.0.3.0/24'])
+ binary_name = linux_net.get_binary_name()
+
+ # Stub out calls we don't want to really run
+ self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
+ self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
+ lambda *args: None)
+
+ # Call the network manager init code to configure the fixed_range
+ net_manager.init_host()
+
+ # Get the iptables rules that got created
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ # The expected rules that should be configured based on the fixed_range
+ expected_lines = ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s'
+ % (binary_name, CONF.fixed_range,
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, CONF.fixed_range,
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, CONF.fixed_range, CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ CONF.fixed_range,
+ CONF.fixed_range)]
+
+ # Finally, compare the expected rules against the actual ones
+ for line in expected_lines:
+ self.assertTrue(line in new_lines)
+
+ def _test_init_host_dynamic_fixed_range(self, net_manager):
+ self.flags(fake_network=True,
+ fixed_range='',
+ routing_source_ip='172.16.0.1',
+ metadata_host='172.16.0.1',
+ public_interface='eth1',
+ dmz_cidr=['10.0.3.0/24'])
+ binary_name = linux_net.get_binary_name()
+
+ # Stub out calls we don't want to really run, mock the db
+ self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
+ self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
+ lambda *args: None)
+ self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
+ lambda *args: None)
+ self.mox.StubOutWithMock(db, 'network_get_all_by_host')
+ db.network_get_all_by_host(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(networks)
+ self.mox.ReplayAll()
+
+ # Call the network manager init code to configure the fixed_range
+ net_manager.init_host()
+
+ # Get the iptables rules that got created
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ # The expected rules that should be configured based on the fixed_range
+ expected_lines = ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s'
+ % (binary_name, networks[0]['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, networks[0]['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, networks[0]['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ networks[0]['cidr'],
+ networks[0]['cidr']),
+ '[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s'
+ % (binary_name, networks[1]['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, networks[1]['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, networks[1]['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ networks[1]['cidr'],
+ networks[1]['cidr'])]
+
+ # Compare the expected rules against the actual ones
+ for line in expected_lines:
+ self.assertTrue(line in new_lines)
+
+ # Add an additional network and ensure the rules get configured
+ new_network = {'id': 2,
+ 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
+ 'label': 'test2',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.2.0/24',
+ 'cidr_v6': '2001:dba::/64',
+ 'gateway_v6': '2001:dba::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.2.1',
+ 'broadcast': '192.168.2.255',
+ 'dns1': '192.168.2.1',
+ 'dns2': '192.168.2.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.2.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'}
+
+ # Call the network manager init code to configure the fixed_range
+ ctxt = context.get_admin_context()
+ net_manager._setup_network_on_host(ctxt, new_network)
+
+ # Get the new iptables rules that got created from adding a new network
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ # Add the new expected rules to the old ones
+ expected_lines += ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o '
+ '%s' % (binary_name, new_network['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, new_network['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, new_network['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
+ '! --ctstate DNAT -j ACCEPT' % (binary_name,
+ new_network['cidr'],
+ new_network['cidr'])]
+
+ # Compare the expected rules (with new network) against the actual ones
+ for line in expected_lines:
+ self.assertTrue(line in new_lines)
+
+ def test_flatdhcpmanager_static_fixed_range(self):
+ """Test FlatDHCPManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.FlatDHCPManager(host=HOST)
+ self.network.db = db
+
+ # Test existing behavior:
+ # CONF.fixed_range is set, NAT based on CONF.fixed_range
+ self._test_init_host_static_fixed_range(self.network)
+
+ def test_flatdhcpmanager_dynamic_fixed_range(self):
+ """Test FlatDHCPManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.FlatDHCPManager(host=HOST)
+ self.network.db = db
+
+ # Test new behavior:
+ # CONF.fixed_range is not set, defaults to None
+ # Determine networks to NAT based on lookup
+ self._test_init_host_dynamic_fixed_range(self.network)
+
+ def test_vlanmanager_static_fixed_range(self):
+ """Test VlanManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+
+ # Test existing behavior:
+ # CONF.fixed_range is set, NAT based on CONF.fixed_range
+ self._test_init_host_static_fixed_range(self.network)
+
+ def test_vlanmanager_dynamic_fixed_range(self):
+ """Test VlanManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+
+ # Test new behavior:
+ # CONF.fixed_range is not set, defaults to None
+ # Determine networks to NAT based on lookup
+ self._test_init_host_dynamic_fixed_range(self.network)
+
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 0a2d81fb1..bb6b3817b 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -327,25 +327,20 @@ class DbApiTestCase(DbTestCase):
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
- def test_instance_update_of_instance_type_id(self):
+ def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
- inst_type1 = db.instance_type_get_by_name(ctxt, 'm1.tiny')
- inst_type2 = db.instance_type_get_by_name(ctxt, 'm1.small')
-
- values = {'instance_type_id': inst_type1['id']}
+ # Create an instance with some metadata
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
+ 'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
-
- self.assertEqual(instance['instance_type']['id'], inst_type1['id'])
- self.assertEqual(instance['instance_type']['name'],
- inst_type1['name'])
-
- values = {'instance_type_id': inst_type2['id']}
- instance = db.instance_update(ctxt, instance['uuid'], values)
-
- self.assertEqual(instance['instance_type']['id'], inst_type2['id'])
- self.assertEqual(instance['instance_type']['name'],
- inst_type2['name'])
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('foo', instance_meta['host'])
+ self.assertEqual('meow', instance_meta['key1'])
+ db.instance_destroy(ctxt, instance['uuid'])
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ # Make sure instance metadata is deleted as well
+ self.assertEqual({}, instance_meta)
def test_instance_update_unique_name(self):
otherprojectcontext = context.RequestContext(self.user_id,
@@ -1517,6 +1512,12 @@ class CapacityTestCase(test.TestCase):
self.assertEqual(2, int(stats['num_proj_12345']))
self.assertEqual(1, int(stats['num_tribbles']))
+ def test_compute_node_update_always_updates_updated_at(self):
+ item = self._create_helper('host1')
+ item_updated = db.compute_node_update(self.ctxt,
+ item['id'], {})
+ self.assertNotEqual(item['updated_at'], item_updated['updated_at'])
+
def test_compute_node_stat_prune(self):
item = self._create_helper('host1')
for stat in item['stats']:
@@ -1553,10 +1554,14 @@ class MigrationTestCase(test.TestCase):
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
- source_node='a', dest_compute='host2', dest_node='b'):
+ source_node='a', dest_compute='host2', dest_node='b',
+ system_metadata=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
+ if system_metadata:
+ db.instance_system_metadata_update(self.ctxt, instance['uuid'],
+ system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
@@ -1568,6 +1573,14 @@ class MigrationTestCase(test.TestCase):
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
+ def test_migration_get_in_progress_joins(self):
+ self._create(source_compute='foo', system_metadata={'foo': 'bar'})
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'foo', 'a')
+ system_metadata = migrations[0]['instance']['system_metadata'][0]
+ self.assertEqual(system_metadata['key'], 'foo')
+ self.assertEqual(system_metadata['value'], 'bar')
+
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
@@ -1829,6 +1842,153 @@ class TaskLogTestCase(test.TestCase):
self.assertEqual(result['errors'], 1)
+class BlockDeviceMappingTestCase(test.TestCase):
+ def setUp(self):
+ super(BlockDeviceMappingTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, {})
+
+ def _create_bdm(self, values):
+ values.setdefault('instance_uuid', self.instance['uuid'])
+ values.setdefault('device_name', 'fake_device')
+ db.block_device_mapping_create(self.ctxt, values)
+ uuid = values['instance_uuid']
+
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+
+ for bdm in bdms:
+ if bdm['device_name'] == values['device_name']:
+ return bdm
+
+ def test_block_device_mapping_create(self):
+ bdm = self._create_bdm({})
+ self.assertFalse(bdm is None)
+
+ def test_block_device_mapping_update(self):
+ bdm = self._create_bdm({})
+ db.block_device_mapping_update(self.ctxt, bdm['id'],
+ {'virtual_name': 'some_virt_name'})
+ uuid = bdm['instance_uuid']
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(bdm_real[0]['virtual_name'], 'some_virt_name')
+
+ def test_block_device_mapping_update_or_create(self):
+ values = {
+ 'instance_uuid': self.instance['uuid'],
+ 'device_name': 'fake_name',
+ 'virtual_name': 'some_virt_name'
+ }
+ # check create
+ db.block_device_mapping_update_or_create(self.ctxt, values)
+ uuid = values['instance_uuid']
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
+
+ # check update
+ values['virtual_name'] = 'virtual_name'
+ db.block_device_mapping_update_or_create(self.ctxt, values)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'fake_name')
+ self.assertEqual(bdm_real['virtual_name'], 'virtual_name')
+
+ def test_block_device_mapping_update_or_create_check_remove_virt(self):
+ uuid = self.instance['uuid']
+ values = {
+ 'instance_uuid': uuid,
+ 'virtual_name': 'ephemeral12'
+ }
+
+ # check that old bdm with same virtual_names are deleted on create
+ val1 = dict(values)
+ val1['device_name'] = 'device1'
+ db.block_device_mapping_create(self.ctxt, val1)
+ val2 = dict(values)
+ val2['device_name'] = 'device2'
+ db.block_device_mapping_update_or_create(self.ctxt, val2)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'device2')
+ self.assertEqual(bdm_real['virtual_name'], 'ephemeral12')
+
+ # check that old bdm with same virtual_names are deleted on update
+ val3 = dict(values)
+ val3['device_name'] = 'device3'
+ val3['virtual_name'] = 'some_name'
+ db.block_device_mapping_create(self.ctxt, val3)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 2)
+
+ val3['virtual_name'] = 'ephemeral12'
+ db.block_device_mapping_update_or_create(self.ctxt, val3)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'device3')
+ self.assertEqual(bdm_real['virtual_name'], 'ephemeral12')
+
+ def test_block_device_mapping_get_all_by_instance(self):
+ uuid1 = self.instance['uuid']
+ uuid2 = db.instance_create(self.ctxt, {})['uuid']
+
+ bmds_values = [{'instance_uuid': uuid1,
+ 'virtual_name': 'virtual_name',
+ 'device_name': 'first'},
+ {'instance_uuid': uuid2,
+ 'virtual_name': 'virtual_name1',
+ 'device_name': 'second'},
+ {'instance_uuid': uuid2,
+ 'virtual_name': 'virtual_name2',
+ 'device_name': 'third'}]
+
+ for bdm in bmds_values:
+ self._create_bdm(bdm)
+
+ bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
+ self.assertEqual(len(bmd), 1)
+ self.assertEqual(bmd[0]['virtual_name'], 'virtual_name')
+ self.assertEqual(bmd[0]['device_name'], 'first')
+
+ bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
+ self.assertEqual(len(bmd), 2)
+
+ def test_block_device_mapping_destroy(self):
+ bdm = self._create_bdm({})
+ db.block_device_mapping_destroy(self.ctxt, bdm['id'])
+ bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
+ bdm['instance_uuid'])
+ self.assertEqual(len(bdm), 0)
+
+ def test_block_device_mapping_destory_by_instance_and_volumne(self):
+ vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
+ vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
+
+ self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1})
+ self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2})
+
+ uuid = self.instance['uuid']
+ db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
+ vol_id1)
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['device_name'], 'fake2')
+
+ def test_block_device_mapping_destroy_by_instance_and_device(self):
+ self._create_bdm({'device_name': 'fake1'})
+ self._create_bdm({'device_name': 'fake2'})
+
+ uuid = self.instance['uuid']
+ params = (self.ctxt, uuid, 'fake1')
+ db.block_device_mapping_destroy_by_instance_and_device(*params)
+
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['device_name'], 'fake2')
+
+
class ArchiveTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index c6d75aea1..0ddfe080d 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -42,6 +42,7 @@ from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
from nova.virt import configdrive
+from nova.virt import driver
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
@@ -51,6 +52,7 @@ from nova.virt.hyperv import networkutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
@@ -88,10 +90,6 @@ class HyperVAPITestCase(test.TestCase):
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.quantumv2.api.API')
- self.flags(vswitch_name='external',
- force_volumeutils_v1=True,
- group='hyperv')
-
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
@@ -118,6 +116,14 @@ class HyperVAPITestCase(test.TestCase):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
+ def fake_vmutils__init__(self, host='.'):
+ pass
+ vmutils.VMUtils.__init__ = fake_vmutils__init__
+
+ def fake_get_volume_utils(self):
+ return volumeutils.VolumeUtils()
+ volumeops.VolumeOps._get_volume_utils = fake_get_volume_utils
+
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
@@ -141,7 +147,7 @@ class HyperVAPITestCase(test.TestCase):
self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_iscsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count')
self._mox.StubOutWithMock(vmutils.VMUtils,
@@ -150,6 +156,8 @@ class HyperVAPITestCase(test.TestCase):
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_controller_volume_paths')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
@@ -183,6 +191,8 @@ class HyperVAPITestCase(test.TestCase):
'get_session_id_from_mounted_disk')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_device_number_for_target')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_from_disk_path')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'login_storage_target')
@@ -523,16 +533,21 @@ class HyperVAPITestCase(test.TestCase):
self._conn.destroy(self._instance_data, None)
self._mox.VerifyAll()
- def test_live_migration(self):
- self._test_live_migration(False)
+ def test_live_migration_without_volumes(self):
+ self._test_live_migration()
+
+ def test_live_migration_with_volumes(self):
+ self._test_live_migration(with_volumes=True)
def test_live_migration_with_target_failure(self):
- self._test_live_migration(True)
+ self._test_live_migration(test_failure=True)
- def _test_live_migration(self, test_failure):
+ def _test_live_migration(self, test_failure=False,
+ with_volumes=False):
dest_server = 'fake_server'
instance_data = self._get_instance_data()
+ instance_name = instance_data['name']
fake_post_method = self._mox.CreateMockAnything()
if not test_failure:
@@ -544,10 +559,27 @@ class HyperVAPITestCase(test.TestCase):
fake_recover_method(self._context, instance_data, dest_server,
False)
+ fake_ide_controller_path = 'fakeide'
+ fake_scsi_controller_path = 'fakescsi'
+
+ if with_volumes:
+ fake_scsi_disk_path = 'fake_scsi_disk_path'
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun = 1
+ fake_scsi_paths = {0: fake_scsi_disk_path}
+ else:
+ fake_scsi_paths = {}
+
m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
instance_data['name'], dest_server)
if test_failure:
- m.AndRaise(Exception('Simulated failure'))
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ if with_volumes:
+ m.AndReturn([(fake_target_iqn, fake_target_lun)])
+ volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
+ else:
+ m.AndReturn([])
self._mox.ReplayAll()
try:
@@ -555,19 +587,22 @@ class HyperVAPITestCase(test.TestCase):
dest_server, fake_post_method,
fake_recover_method)
exception_raised = False
- except Exception:
+ except vmutils.HyperVException:
exception_raised = True
self.assertTrue(not test_failure ^ exception_raised)
self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
- self._test_pre_live_migration(True)
+ self._test_pre_live_migration(True, False)
def test_pre_live_migration_no_cow_image(self):
- self._test_pre_live_migration(False)
+ self._test_pre_live_migration(False, False)
- def _test_pre_live_migration(self, cow):
+ def test_pre_live_migration_with_volumes(self):
+ self._test_pre_live_migration(False, True)
+
+ def _test_pre_live_migration(self, cow, with_volumes):
self.flags(use_cow_images=cow)
instance_data = self._get_instance_data()
@@ -591,9 +626,29 @@ class HyperVAPITestCase(test.TestCase):
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
+ if with_volumes:
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+ else:
+ block_device_info = None
+
self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance,
- None, network_info)
+ block_device_info, network_info)
self._mox.VerifyAll()
if cow:
@@ -734,7 +789,8 @@ class HyperVAPITestCase(test.TestCase):
return image_path == self._fetched_image
def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
- boot_from_volume=False):
+ boot_from_volume=False,
+ block_device_info=None):
vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool))
@@ -750,6 +806,16 @@ class HyperVAPITestCase(test.TestCase):
m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
+ if boot_from_volume:
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
+ target_lun, target_portal, True)
+
vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name), mox.IsA(str),
mox.IsA(str)).InAnyOrder()
@@ -787,7 +853,8 @@ class HyperVAPITestCase(test.TestCase):
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
self._setup_create_instance_mocks(setup_vif_mocks_func,
- boot_from_volume)
+ boot_from_volume,
+ block_device_info)
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
@@ -818,41 +885,57 @@ class HyperVAPITestCase(test.TestCase):
vhd_path = pathutils.PathUtils().get_vhd_path(self._test_vm_name)
self.assertEquals(vhd_path, self._instance_ide_disks[0])
- def test_attach_volume(self):
- instance_data = self._get_instance_data()
- instance_name = instance_data['name']
+ def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
- connection_info = db_fakes.get_fake_volume_info_data(
- self._volume_target_portal, self._volume_id)
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
- mount_point = '/dev/sdc'
+ def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
+ fake_mounted_disk, fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
volumeutils.VolumeUtils.login_storage_target(target_lun,
target_iqn,
target_portal)
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
+ target_portal=None, boot_from_volume=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
- fake_free_slot = 1
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
- m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
- fake_device_number)
- m.AndReturn(fake_mounted_disk)
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
- m = vmutils.VMUtils.get_vm_iscsi_controller(instance_name)
- m.AndReturn(fake_controller_path)
+ if boot_from_volume:
+ m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
+ m.AndReturn(fake_controller_path)
+ fake_free_slot = 0
+ else:
+ m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
+ m.AndReturn(fake_controller_path)
- m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
- m.AndReturn(fake_free_slot)
+ fake_free_slot = 1
+ m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
+ m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
fake_controller_path,
@@ -860,15 +943,8 @@ class HyperVAPITestCase(test.TestCase):
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
- self._mox.ReplayAll()
- self._conn.attach_volume(connection_info, instance_data, mount_point)
- self._mox.VerifyAll()
-
- self.assertEquals(len(self._instance_volume_disks), 1)
-
- def test_detach_volume(self):
+ def test_attach_volume(self):
instance_data = self._get_instance_data()
- instance_name = instance_data['name']
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
@@ -878,6 +954,18 @@ class HyperVAPITestCase(test.TestCase):
target_portal = data['target_portal']
mount_point = '/dev/sdc'
+ self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self._conn.attach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ self.assertEquals(len(self._instance_volume_disks), 1)
+
+ def _mock_detach_volume(self, target_iqn, target_lun):
+ mount_point = '/dev/sdc'
+
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_free_slot = 1
@@ -893,11 +981,10 @@ class HyperVAPITestCase(test.TestCase):
volumeutils.VolumeUtils.logout_storage_target(mox.IsA(str))
- self._mox.ReplayAll()
- self._conn.detach_volume(connection_info, instance_data, mount_point)
- self._mox.VerifyAll()
+ def test_detach_volume(self):
+ instance_data = self._get_instance_data()
+ instance_name = instance_data['name']
- def test_boot_from_volume(self):
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
@@ -905,33 +992,17 @@ class HyperVAPITestCase(test.TestCase):
target_iqn = data['target_iqn']
target_portal = data['target_portal']
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- fake_mounted_disk = "fake_mounted_disk"
- fake_device_number = 0
- fake_controller_path = 'fake_scsi_controller_path'
-
- volumeutils.VolumeUtils.login_storage_target(target_lun,
- target_iqn,
- target_portal)
+ mount_point = '/dev/sdc'
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
+ self._mock_detach_volume(target_iqn, target_lun)
- m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
- fake_device_number)
- m.AndReturn(fake_mounted_disk)
-
- m = vmutils.VMUtils.get_vm_ide_controller(mox.IsA(str), mox.IsA(int))
- m.AndReturn(fake_controller_path)
+ self._mox.ReplayAll()
+ self._conn.detach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
- m = vmutils.VMUtils.attach_volume_to_controller(mox.IsA(str),
- fake_controller_path,
- 0,
- fake_mounted_disk)
- m.WithSideEffects(self._add_volume_disk)
+ def test_boot_from_volume(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
self._setup_spawn_instance_mocks(cow=False,
block_device_info=block_device_info,
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 7ca867f77..147ab25f7 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -365,24 +365,6 @@ class InstanceTypeTestCase(test.TestCase):
"test1", read_deleted="no")
self.assertEqual("instance_type1_redo", instance_type["name"])
- def test_will_list_deleted_type_for_active_instance(self):
- # Ensure deleted instance types with active instances can be read.
- ctxt = context.get_admin_context()
- inst_type = instance_types.create("test", 256, 1, 120, 100, "test1")
-
- instance_params = {"instance_type_id": inst_type["id"]}
- instance = db.instance_create(ctxt, instance_params)
-
- # NOTE(jk0): Delete the instance type and reload the instance from the
- # DB. The instance_type object will still be available to the active
- # instance, otherwise being None.
- instance_types.destroy(inst_type["name"])
- instance = db.instance_get_by_uuid(ctxt, instance["uuid"])
-
- self.assertRaises(exception.InstanceTypeNotFound,
- instance_types.get_instance_type, inst_type["name"])
- self.assertTrue(instance["instance_type"])
-
class InstanceTypeToolsTest(test.TestCase):
def _dict_to_metadata(self, data):
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 55b6aaec4..86f2fe914 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -2864,7 +2864,8 @@ class LibvirtConnTestCase(test.TestCase):
def test_broken_connection(self):
for (error, domain) in (
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
- (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC)):
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
+ (libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -3532,6 +3533,35 @@ class LibvirtConnTestCase(test.TestCase):
}
self.assertEqual(actual, expect)
+ def test_failing_vcpu_count(self):
+ """Domain can fail to return the vcpu description in case it's
+ just starting up or shutting down. Make sure None is handled
+ gracefully.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self, vcpus):
+ self._vcpus = vcpus
+
+ def vcpus(self):
+ if self._vcpus is None:
+ return None
+ else:
+ return ([1] * self._vcpus, [True] * self._vcpus)
+
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn = driver._conn
+ self.mox.StubOutWithMock(driver, 'list_instance_ids')
+ self.mox.StubOutWithMock(conn, 'lookupByID')
+
+ driver.list_instance_ids().AndReturn([1, 2])
+ conn.lookupByID(1).AndReturn(DiagFakeDomain(None))
+ conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
+
+ self.mox.ReplayAll()
+
+ self.assertEqual(5, driver.get_vcpu_used())
+
def test_get_instance_capabilities(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
diff --git a/nova/tests/test_migration_utils.py b/nova/tests/test_migration_utils.py
index ddaaa2552..1096be0d3 100644
--- a/nova/tests/test_migration_utils.py
+++ b/nova/tests/test_migration_utils.py
@@ -16,13 +16,16 @@
# under the License.
from migrate.changeset import UniqueConstraint
-from sqlalchemy import Integer, BigInteger, DateTime, String
+from sqlalchemy import Integer, DateTime, String
from sqlalchemy import MetaData, Table, Column
+from sqlalchemy.exc import SAWarning
from sqlalchemy.sql import select
+from sqlalchemy.types import UserDefinedType
from nova.db.sqlalchemy import utils
from nova import exception
from nova.tests import test_migrations
+import warnings
class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
@@ -71,6 +74,12 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
test_table.drop()
def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self):
+
+ class CustomType(UserDefinedType):
+ """Dummy column type for testing unsupported types."""
+ def get_col_spec(self):
+ return "CustomType"
+
table_name = "__test_tmp_table__"
uc_name = 'uniq_foo'
values = [
@@ -86,15 +95,16 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
Column('id', Integer, primary_key=True,
nullable=False),
Column('a', Integer),
- Column('foo', BigInteger, default=0),
+ Column('foo', CustomType, default=0),
UniqueConstraint('a', name='uniq_a'),
UniqueConstraint('foo', name=uc_name))
test_table.create()
engine.execute(test_table.insert(), values)
if key == "sqlite":
+ warnings.simplefilter("ignore", SAWarning)
# NOTE(boris-42): Missing info about column `foo` that has
- # unsupported type BigInteger.
+ # unsupported type CustomType.
self.assertRaises(exception.NovaException,
utils.drop_unique_constraint,
engine, table_name, uc_name, 'foo')
@@ -106,7 +116,7 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
engine, table_name, uc_name, 'foo',
foo=Integer())
- foo = Column('foo', BigInteger, default=0)
+ foo = Column('foo', CustomType, default=0)
utils.drop_unique_constraint(engine, table_name, uc_name, 'foo',
foo=foo)
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index cf5c2f509..60975c68c 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -785,7 +785,7 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
is_public=False),
dict(id=13, name='type4', memory_mb=128, vcpus=1,
root_gb=10, ephemeral_gb=0, flavorid="4", swap=0,
- rxtx_factor=1.0, vcpu_weight=1, disabled=True,
+ rxtx_factor=1.0, vcpu_weight=None, disabled=True,
is_public=True),
dict(id=14, name='type5', memory_mb=128, vcpus=1,
root_gb=10, ephemeral_gb=0, flavorid="5", swap=0,
@@ -831,8 +831,14 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
for prop in instance_type_props:
prop_name = 'instance_type_%s' % prop
self.assertIn(prop_name, inst_sys_meta)
- self.assertEqual(str(inst_sys_meta[prop_name]),
- str(inst_type[prop]))
+ if prop == "vcpu_weight":
+ # NOTE(danms) vcpu_weight can be NULL
+ self.assertEqual(inst_sys_meta[prop_name],
+ inst_type[prop] and str(inst_type[prop])
+ or None)
+ else:
+ self.assertEqual(str(inst_sys_meta[prop_name]),
+ str(inst_type[prop]))
# migration 154, add shadow tables for deleted data
# There are 53 shadow tables but we only test one
@@ -1032,6 +1038,74 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
for key, value in data[the_id].items():
self.assertEqual(value, result[key])
+ # migration 161, fix system_metadata "None" values should be NULL
+ def _pre_upgrade_161(self, engine):
+ fake_instances = [dict(uuid='m161-uuid1')]
+ sm_base = dict(instance_uuid='m161-uuid1', value=None)
+ now = timeutils.utcnow().replace(microsecond=0)
+ fake_sys_meta = [
+ # Should be fixed
+ dict(sm_base, key='instance_type_foo', value='None'),
+ dict(sm_base, key='instance_type_bar', value='88 mph'),
+
+ # Should be unaffected
+ dict(sm_base, key='instance_type_name', value='None'),
+ dict(sm_base, key='instance_type_flavorid', value='None'),
+ dict(sm_base, key='foo', value='None'),
+ dict(sm_base, key='instance_type_bat'),
+ dict(sm_base, key='instance_type_baz', created_at=now),
+ ]
+
+ instances = get_table(engine, 'instances')
+ sys_meta = get_table(engine, 'instance_system_metadata')
+ engine.execute(instances.insert(), fake_instances)
+
+ data = {}
+ for sm in fake_sys_meta:
+ result = sys_meta.insert().values(sm).execute()
+ sm['id'] = result.inserted_primary_key[0]
+ data[sm['id']] = sm
+
+ return data
+
+ def _check_161(self, engine, data):
+ our_ids = data.keys()
+ sys_meta = get_table(engine, 'instance_system_metadata')
+ results = sys_meta.select().where(sys_meta.c.id.in_(our_ids)).\
+ execute()
+ results = list(results)
+ self.assertEqual(len(our_ids), len(results))
+ for result in results:
+ the_id = result['id']
+ key = result['key']
+ value = result['value']
+ original = data[the_id]
+
+ if key == 'instance_type_baz':
+ # Neither value nor created_at should have been altered
+ self.assertEqual(result['value'], original['value'])
+ self.assertEqual(result['created_at'], original['created_at'])
+ elif key in ['instance_type_name', 'instance_type_flavorid']:
+ # These should not have their values changed, but should
+ # have corrected created_at stamps
+ self.assertEqual(result['value'], original['value'])
+ self.assertTrue(isinstance(result['created_at'],
+ datetime.datetime))
+ elif key.startswith('instance_type'):
+ # Values like instance_type_% should be stamped and values
+ # converted from 'None' to None where appropriate
+ self.assertEqual(result['value'],
+ None if original['value'] == 'None'
+ else original['value'])
+ self.assertTrue(isinstance(result['created_at'],
+ datetime.datetime))
+ else:
+ # None of the non-instance_type values should have
+ # been touched. Since we didn't set created_at on any
+ # of them, they should all still be None.
+ self.assertEqual(result['value'], original['value'])
+ self.assertEqual(result['created_at'], None)
+
class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
index b1d1958f0..49f9f3256 100644
--- a/nova/tests/test_nova_manage.py
+++ b/nova/tests/test_nova_manage.py
@@ -382,3 +382,17 @@ class DBCommandsTestCase(test.TestCase):
def test_archive_deleted_rows_negative(self):
self.assertRaises(SystemExit,
self.commands.archive_deleted_rows, -1)
+
+
+class ServiceCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(ServiceCommandsTestCase, self).setUp()
+ self.commands = nova_manage.ServiceCommands()
+
+ def test_service_enable_invalid_params(self):
+ self.assertRaises(SystemExit,
+ self.commands.enable, 'nohost', 'noservice')
+
+ def test_service_disable_invalid_params(self):
+ self.assertRaises(SystemExit,
+ self.commands.disable, 'nohost', 'noservice')
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index a951ba44c..c6a385bdd 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -18,6 +18,8 @@
Test suite for PowerVMDriver.
"""
+import contextlib
+
from nova import context
from nova import db
from nova import test
@@ -106,6 +108,9 @@ class FakeIVMOperator(object):
class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter):
def __init__(self):
+ self.connection_data = common.Connection(host='fake_compute_1',
+ username='fake_user',
+ password='fake_pass')
pass
def _create_logical_volume(self, size):
@@ -306,3 +311,85 @@ class PowerVMDriverTestCase(test.TestCase):
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
+
+ def test_migrate_volume_use_instance_name(self):
+ inst_name = 'instance-00000000'
+ lv_name = 'logical-vol-name'
+ src_host = 'compute_host_1'
+ dest = 'compute_host_1'
+ image_path = 'some/image/path'
+ fake_noop = lambda *args, **kwargs: None
+
+ self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
+ '_copy_device_to_file', fake_noop)
+
+ self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
+ 'run_vios_command_as_root', fake_noop)
+ blockdev_op = self.powervm_connection._powervm._disk_adapter
+ file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
+ image_path, inst_name)
+ expected_path = 'some/image/path/instance-00000000_rsz.gz'
+ self.assertEqual(file_path, expected_path)
+
+ def test_migrate_volume_use_lv_name(self):
+ lv_name = 'logical-vol-name'
+ src_host = 'compute_host_1'
+ dest = 'compute_host_1'
+ image_path = 'some/image/path'
+ fake_noop = lambda *args, **kwargs: None
+
+ self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
+ '_copy_device_to_file', fake_noop)
+
+ self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
+ 'run_vios_command_as_root', fake_noop)
+ blockdev_op = self.powervm_connection._powervm._disk_adapter
+ file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
+ image_path)
+ expected_path = 'some/image/path/logical-vol-name_rsz.gz'
+ self.assertEqual(file_path, expected_path)
+
+ def test_migrate_build_scp_command(self):
+ lv_name = 'logical-vol-name'
+ src_host = 'compute_host_1'
+ dest = 'compute_host_2'
+ image_path = 'some/image/path'
+ fake_noop = lambda *args, **kwargs: None
+
+ @contextlib.contextmanager
+ def fake_vios_to_vios_auth(*args, **kwargs):
+ key_name = 'some_key'
+ yield key_name
+ self.stubs.Set(common, 'vios_to_vios_auth',
+ fake_vios_to_vios_auth)
+
+ self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
+ 'run_vios_command_as_root', fake_noop)
+
+ def fake_run_vios_command(*args, **kwargs):
+ cmd = args[0]
+ exp_cmd = ' '.join(['scp -o "StrictHostKeyChecking no" -i',
+ 'some_key',
+ 'some/image/path/logical-vol-name_rsz.gz',
+ 'fake_user@compute_host_2:some/image/path'])
+ self.assertEqual(exp_cmd, cmd)
+
+ self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
+ 'run_vios_command',
+ fake_run_vios_command)
+
+ blockdev_op = self.powervm_connection._powervm._disk_adapter
+ file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
+ image_path)
+
+ def test_get_resize_name(self):
+ inst_name = 'instance-00000001'
+ expected_name = 'rsz_instance-00000001'
+ result = self.powervm_connection._get_resize_name(inst_name)
+ self.assertEqual(expected_name, result)
+
+ def test_get_long_resize_name(self):
+ inst_name = 'some_really_long_instance_name_00000001'
+ expected_name = 'rsz__really_long_instance_name_00000001'
+ result = self.powervm_connection._get_resize_name(inst_name)
+ self.assertEqual(expected_name, result)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 1ea4eea21..1f422748b 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -719,6 +719,7 @@ class DbQuotaDriverTestCase(test.TestCase):
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
+ quota_fixed_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
@@ -745,6 +746,7 @@ class DbQuotaDriverTestCase(test.TestCase):
cores=20,
ram=50 * 1024,
floating_ips=10,
+ fixed_ips=10,
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
@@ -778,6 +780,7 @@ class DbQuotaDriverTestCase(test.TestCase):
cores=20,
ram=25 * 1024,
floating_ips=10,
+ fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
@@ -830,6 +833,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self._stub_quota_class_get_all_by_name()
def test_get_project_quotas(self):
+ self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
@@ -861,6 +865,11 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=2,
reserved=0,
),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
metadata_items=dict(
limit=64,
in_use=0,
@@ -899,6 +908,7 @@ class DbQuotaDriverTestCase(test.TestCase):
))
def test_get_project_quotas_alt_context_no_class(self):
+ self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
@@ -929,6 +939,11 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=2,
reserved=0,
),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
metadata_items=dict(
limit=128,
in_use=0,
@@ -967,6 +982,7 @@ class DbQuotaDriverTestCase(test.TestCase):
))
def test_get_project_quotas_alt_context_with_class(self):
+ self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
@@ -998,6 +1014,11 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=2,
reserved=0,
),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
metadata_items=dict(
limit=64,
in_use=0,
@@ -1087,6 +1108,9 @@ class DbQuotaDriverTestCase(test.TestCase):
floating_ips=dict(
limit=10,
),
+ fixed_ips=dict(
+ limit=10,
+ ),
metadata_items=dict(
limit=64,
),
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index c601bb0af..af6a9b9aa 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -23,7 +23,6 @@ import os
import os.path
import StringIO
import tempfile
-from xml.dom import minidom
import mox
import netaddr
@@ -450,39 +449,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
- def test_safe_parse_xml(self):
-
- normal_body = ("""
- <?xml version="1.0" ?><foo>
- <bar>
- <v1>hey</v1>
- <v2>there</v2>
- </bar>
- </foo>""").strip()
-
- def killer_body():
- return (("""<!DOCTYPE x [
- <!ENTITY a "%(a)s">
- <!ENTITY b "%(b)s">
- <!ENTITY c "%(c)s">]>
- <foo>
- <bar>
- <v1>%(d)s</v1>
- </bar>
- </foo>""") % {
- 'a': 'A' * 10,
- 'b': '&a;' * 10,
- 'c': '&b;' * 10,
- 'd': '&c;' * 9999,
- }).strip()
-
- dom = utils.safe_minidom_parse_string(normal_body)
- self.assertEqual(normal_body, str(dom.toxml()))
-
- self.assertRaises(ValueError,
- utils.safe_minidom_parse_string,
- killer_body())
-
def test_xhtml_escape(self):
self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"'))
self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'"))
@@ -992,47 +958,3 @@ class StringLengthTestCase(test.TestCase):
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
-
-
-class SafeParserTestCase(test.TestCase):
- def test_external_dtd(self):
- xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
- <html>
- <head/>
- <body>html with dtd</body>
- </html>""")
-
- parser = utils.ProtectedExpatParser(forbid_dtd=False,
- forbid_entities=True)
- self.assertRaises(ValueError,
- minidom.parseString,
- xml_string, parser)
-
- def test_external_file(self):
- xml_string = """<!DOCTYPE external [
- <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
- ]>
- <root>&ee;</root>"""
-
- parser = utils.ProtectedExpatParser(forbid_dtd=False,
- forbid_entities=True)
- self.assertRaises(ValueError,
- minidom.parseString,
- xml_string, parser)
-
- def test_notation(self):
- xml_string = """<?xml version="1.0" standalone="no"?>
- <!-- comment data -->
- <!DOCTYPE x [
- <!NOTATION notation SYSTEM "notation.jpeg">
- ]>
- <root attr1="value1">
- </root>"""
-
- parser = utils.ProtectedExpatParser(forbid_dtd=False,
- forbid_entities=True)
- self.assertRaises(ValueError,
- minidom.parseString,
- xml_string, parser)
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 755d49be1..e9248c7b7 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -183,3 +183,20 @@ def cleanup_dns_managers():
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
+
+
+def killer_xml_body():
+ return (("""<!DOCTYPE x [
+ <!ENTITY a "%(a)s">
+ <!ENTITY b "%(b)s">
+ <!ENTITY c "%(c)s">]>
+ <foo>
+ <bar>
+ <v1>%(d)s</v1>
+ </bar>
+ </foo>""") % {
+ 'a': 'A' * 10,
+ 'b': '&a;' * 10,
+ 'c': '&b;' * 10,
+ 'd': '&c;' * 9999,
+ }).strip()
diff --git a/nova/utils.py b/nova/utils.py
index fe6c75df3..dbbbd1eb6 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -36,10 +36,6 @@ import struct
import sys
import tempfile
import time
-from xml.dom import minidom
-from xml.parsers import expat
-from xml import sax
-from xml.sax import expatreader
from xml.sax import saxutils
from eventlet import event
@@ -657,60 +653,6 @@ class DynamicLoopingCall(LoopingCallBase):
return self.done
-class ProtectedExpatParser(expatreader.ExpatParser):
- """An expat parser which disables DTD's and entities by default."""
-
- def __init__(self, forbid_dtd=True, forbid_entities=True,
- *args, **kwargs):
- # Python 2.x old style class
- expatreader.ExpatParser.__init__(self, *args, **kwargs)
- self.forbid_dtd = forbid_dtd
- self.forbid_entities = forbid_entities
-
- def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- raise ValueError("Inline DTD forbidden")
-
- def entity_decl(self, entityName, is_parameter_entity, value, base,
- systemId, publicId, notationName):
- raise ValueError("<!ENTITY> entity declaration forbidden")
-
- def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
- # expat 1.2
- raise ValueError("<!ENTITY> unparsed entity forbidden")
-
- def external_entity_ref(self, context, base, systemId, publicId):
- raise ValueError("<!ENTITY> external entity forbidden")
-
- def notation_decl(self, name, base, sysid, pubid):
- raise ValueError("<!ENTITY> notation forbidden")
-
- def reset(self):
- expatreader.ExpatParser.reset(self)
- if self.forbid_dtd:
- self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
- self._parser.EndDoctypeDeclHandler = None
- if self.forbid_entities:
- self._parser.EntityDeclHandler = self.entity_decl
- self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
- self._parser.ExternalEntityRefHandler = self.external_entity_ref
- self._parser.NotationDeclHandler = self.notation_decl
- try:
- self._parser.SkippedEntityHandler = None
- except AttributeError:
- # some pyexpat versions do not support SkippedEntity
- pass
-
-
-def safe_minidom_parse_string(xml_string):
- """Parse an XML string using minidom safely.
-
- """
- try:
- return minidom.parseString(xml_string, parser=ProtectedExpatParser())
- except sax.SAXParseException as se:
- raise expat.ExpatError()
-
-
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 94d3f0a92..97d72cc74 100755
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -275,7 +275,7 @@ class BareMetalDriver(driver.ComputeDriver):
_update_state(context, node, None, baremetal_states.DELETED)
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
pm = get_power_manager(node=node, instance=instance)
diff --git a/nova/virt/baremetal/virtual_power_driver_settings.py b/nova/virt/baremetal/virtual_power_driver_settings.py
index cd85bddbc..e0df12242 100644
--- a/nova/virt/baremetal/virtual_power_driver_settings.py
+++ b/nova/virt/baremetal/virtual_power_driver_settings.py
@@ -52,7 +52,7 @@ class virsh(object):
self.reboot_cmd = 'reset {_NodeName_}'
self.list_cmd = "list --all | tail -n +2 | awk -F\" \" '{print $2}'"
self.list_running_cmd = \
- "list --all|grep running|awk -F\" \" '{print \"$2\"}'"
+ "list --all|grep running|awk -v qc='\"' -F\" \" '{print qc$2qc}'"
self.get_node_macs = ("dumpxml {_NodeName_} | grep "
'"mac address" | awk -F'
'"'
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 3690f6ddf..85e1d109f 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -187,9 +187,10 @@ class Mount(object):
LOG.debug(_("Mount %(dev)s on %(dir)s") %
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
- run_as_root=True)
+ discard_warnings=True, run_as_root=True)
if err:
self.error = _('Failed to mount filesystem: %s') % err
+ LOG.debug(self.error)
return False
self.mounted = True
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 11c65519c..71bcaef42 100755
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -248,7 +248,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
@@ -261,6 +261,9 @@ class ComputeDriver(object):
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
+ :param block_device_info: Info pertaining to attached volumes
+ :param bad_volumes_callback: Function to handle any bad volumes
+ encountered
"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 58f303efc..b2b102486 100755
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -131,7 +131,7 @@ class FakeDriver(driver.ComputeDriver):
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
pass
@staticmethod
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index 32221843f..8f880652e 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -28,23 +28,19 @@ if sys.platform == 'win32':
import _winreg
import wmi
-from oslo.config import cfg
-
from nova import block_device
from nova.openstack.common import log as logging
from nova.virt import driver
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('my_ip', 'nova.netconf')
class BaseVolumeUtils(object):
- def __init__(self):
+ def __init__(self, host='.'):
if sys.platform == 'win32':
- self._conn_wmi = wmi.WMI(moniker='//./root/wmi')
- self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ self._conn_wmi = wmi.WMI(moniker='//%s/root/wmi' % host)
+ self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
@abc.abstractmethod
def login_storage_target(self, target_lun, target_iqn, target_portal):
@@ -76,10 +72,7 @@ class BaseVolumeUtils(object):
"Choosing the default one"))
computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
- return {
- 'ip': CONF.my_ip,
- 'initiator': initiator_name,
- }
+ return initiator_name
def volume_in_mapping(self, mount_device, block_device_info):
block_device_list = [block_device.strip_dev(vol['mount_device'])
@@ -103,27 +96,48 @@ class BaseVolumeUtils(object):
start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
end_device_id = disk_path.find('"', start_device_id + 1)
device_id = disk_path[start_device_id + 1:end_device_id]
- return device_id[device_id.find("\\") + 2:]
+ drive_number = device_id[device_id.find("\\") + 2:]
+ if drive_number == 'NODRIVE':
+ return None
+ return int(drive_number)
def get_session_id_from_mounted_disk(self, physical_drive_path):
drive_number = self._get_drive_number_from_disk_path(
physical_drive_path)
+ if not drive_number:
+ return None
+
initiator_sessions = self._conn_wmi.query("SELECT * FROM "
"MSiSCSIInitiator_Session"
"Class")
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
- device_number = str(device.DeviceNumber)
+ device_number = device.DeviceNumber
if device_number == drive_number:
return initiator_session.SessionId
def get_device_number_for_target(self, target_iqn, target_lun):
- initiator_session = self._conn_wmi.query("SELECT * FROM "
- "MSiSCSIInitiator_Session"
- "Class WHERE TargetName='%s'"
- % target_iqn)[0]
- devices = initiator_session.Devices
+ initiator_sessions = self._conn_wmi.query("SELECT * FROM "
+ "MSiSCSIInitiator_Session"
+ "Class WHERE TargetName='%s'"
+ % target_iqn)
+ if not initiator_sessions:
+ return None
+
+ devices = initiator_sessions[0].Devices
for device in devices:
if device.ScsiLun == target_lun:
return device.DeviceNumber
+
+ def get_target_from_disk_path(self, disk_path):
+ initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
+ drive_number = self._get_drive_number_from_disk_path(disk_path)
+ if not drive_number:
+ return None
+
+ for initiator_session in initiator_sessions:
+ devices = initiator_session.Devices
+ for device in devices:
+ if device.DeviceNumber == drive_number:
+ return (device.TargetName, device.ScsiLun)
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 289f3c4b6..477f8fa2a 100755
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -54,7 +54,7 @@ class HyperVDriver(driver.ComputeDriver):
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, instance, network_info, block_device_info=None,
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 94f6f74d8..adca7b8f3 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -25,7 +25,6 @@ from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import pathutils
-from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
@@ -35,9 +34,7 @@ CONF.import_opt('use_cow_images', 'nova.virt.driver')
class LiveMigrationOps(object):
def __init__(self):
-
self._pathutils = pathutils.PathUtils()
- self._vmutils = vmutils.VMUtils()
self._livemigrutils = livemigrationutils.LiveMigrationUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@@ -49,7 +46,10 @@ class LiveMigrationOps(object):
instance_name = instance_ref["name"]
try:
- self._livemigrutils.live_migrate_vm(instance_name, dest)
+ iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name,
+ dest)
+ for (target_iqn, target_lun) in iscsi_targets:
+ self._volumeops.logout_storage_target(target_iqn)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
@@ -66,11 +66,13 @@ class LiveMigrationOps(object):
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
- ebs_root = self._volumeops.ebs_root_in_block_devices(
+ boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
- if not ebs_root:
+ if not boot_from_volume:
self._imagecache.get_cached_image(context, instance)
+ self._volumeops.login_storage_targets(block_device_info)
+
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py
index d039a5016..2563e1182 100644
--- a/nova/virt/hyperv/livemigrationutils.py
+++ b/nova/virt/hyperv/livemigrationutils.py
@@ -20,8 +20,10 @@ import sys
if sys.platform == 'win32':
import wmi
+from nova import exception
from nova.openstack.common import log as logging
from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
@@ -30,6 +32,7 @@ class LiveMigrationUtils(object):
def __init__(self):
self._vmutils = vmutils.VMUtils()
+ self._volutils = volumeutilsv2.VolumeUtilsV2()
def _get_conn_v2(self, host='localhost'):
try:
@@ -64,19 +67,107 @@ class LiveMigrationUtils(object):
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if not n:
- raise vmutils.HyperVException(_('VM not found: %s') % vm_name)
+ raise exception.NotFound(_('VM not found: %s') % vm_name)
elif n > 1:
raise vmutils.HyperVException(_('Duplicate VM name found: %s')
% vm_name)
return vms[0]
- def live_migrate_vm(self, vm_name, dest_host):
- self.check_live_migration_config()
+ def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
+ LOG.debug(_("Destroying existing remote planned VM: %s"),
+ planned_vm.ElementName)
+ vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
+ (job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
+ self._vmutils.check_ret_val(ret_val, job_path)
- # We need a v2 namespace VM object
- conn_v2_local = self._get_conn_v2()
+ def _check_existing_planned_vm(self, conn_v2_remote, vm):
+ # Make sure that there's not yet a remote planned VM on the target
+ # host for this VM
+ planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)
+ if planned_vms:
+ self._destroy_planned_vm(conn_v2_remote, planned_vms[0])
- vm = self._get_vm(conn_v2_local, vm_name)
+ def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote,
+ vm, rmt_ip_addr_list, dest_host):
+ # Staged
+ vsmsd = conn_v2_local.query("select * from "
+ "Msvm_VirtualSystemMigrationSettingData "
+ "where MigrationType = 32770")[0]
+ vsmsd.DestinationIPAddressList = rmt_ip_addr_list
+ migration_setting_data = vsmsd.GetText_(1)
+
+ LOG.debug(_("Creating remote planned VM for VM: %s"),
+ vm.ElementName)
+ migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
+ (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
+ ComputerSystem=vm.path_(),
+ DestinationHost=dest_host,
+ MigrationSettingData=migration_setting_data)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
+ return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
+
+ def _get_physical_disk_paths(self, vm_name):
+ ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
+ ide_paths = self._vmutils.get_controller_volume_paths(ide_ctrl_path)
+
+ scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
+ scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
+
+ return dict(ide_paths.items() + scsi_paths.items())
+
+ def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
+ volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
+
+ disk_paths_remote = {}
+ iscsi_targets = []
+ for (rasd_rel_path, disk_path) in disk_paths.items():
+ (target_iqn,
+ target_lun) = self._volutils.get_target_from_disk_path(disk_path)
+ iscsi_targets.append((target_iqn, target_lun))
+
+ dev_num = volutils_remote.get_device_number_for_target(target_iqn,
+ target_lun)
+ disk_path_remote = vmutils_remote.get_mounted_disk_by_drive_number(
+ dev_num)
+
+ disk_paths_remote[rasd_rel_path] = disk_path_remote
+
+ return (disk_paths_remote, iscsi_targets)
+
+ def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
+ planned_vm, vm_name,
+ disk_paths_remote):
+ vm_settings = planned_vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+
+ updated_resource_setting_data = []
+ sasds = vm_settings.associators(
+ wmi_association_class='Msvm_VirtualSystemSettingDataComponent')
+ for sasd in sasds:
+ if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
+ "Microsoft:Hyper-V:Physical Disk Drive" and
+ sasd.HostResource):
+ # Replace the local disk target with the correct remote one
+ old_disk_path = sasd.HostResource[0]
+ new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
+
+ LOG.debug(_("Replacing host resource "
+ "%(old_disk_path)s with "
+ "%(new_disk_path)s on planned VM %(vm_name)s") %
+ locals())
+ sasd.HostResource = [new_disk_path]
+ updated_resource_setting_data.append(sasd.GetText_(1))
+
+ LOG.debug(_("Updating remote planned VM disk paths for VM: %s"),
+ vm_name)
+ vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
+ (res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
+ ResourceSettings=updated_resource_setting_data)
+ vmutils_remote.check_ret_val(ret_val, job_path)
+
+ def _get_vhd_setting_data(self, vm):
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
@@ -90,26 +181,69 @@ class LiveMigrationUtils(object):
"Microsoft:Hyper-V:Virtual Hard Disk"):
#sasd.PoolId = ""
new_resource_setting_data.append(sasd.GetText_(1))
+ return new_resource_setting_data
- LOG.debug(_("Getting live migration networks for remote host: %s"),
- dest_host)
- conn_v2_remote = self._get_conn_v2(dest_host)
- migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
- rmt_ip_addr_list = migr_svc_rmt.MigrationServiceListenerIPAddressList
-
+ def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
+ new_resource_setting_data, dest_host):
# VirtualSystemAndStorage
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
+ if planned_vm:
+ vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
migration_setting_data = vsmsd.GetText_(1)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
- LOG.debug(_("Starting live migration for VM: %s"), vm_name)
+ LOG.debug(_("Starting live migration for VM: %s"), vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
+
+ def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
+ LOG.debug(_("Getting live migration networks for remote host: %s"),
+ dest_host)
+ migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
+ return migr_svc_rmt.MigrationServiceListenerIPAddressList
+
+ def live_migrate_vm(self, vm_name, dest_host):
+ self.check_live_migration_config()
+
+ conn_v2_local = self._get_conn_v2()
+ conn_v2_remote = self._get_conn_v2(dest_host)
+
+ vm = self._get_vm(conn_v2_local, vm_name)
+ self._check_existing_planned_vm(conn_v2_remote, vm)
+
+ rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
+ dest_host)
+
+ iscsi_targets = []
+ planned_vm = None
+ disk_paths = self._get_physical_disk_paths(vm_name)
+ if disk_paths:
+ vmutils_remote = vmutils.VMUtils(dest_host)
+ (disk_paths_remote,
+ iscsi_targets) = self._get_remote_disk_data(vmutils_remote,
+ disk_paths,
+ dest_host)
+
+ planned_vm = self._create_remote_planned_vm(conn_v2_local,
+ conn_v2_remote,
+ vm, rmt_ip_addr_list,
+ dest_host)
+
+ self._update_planned_vm_disk_resources(vmutils_remote,
+ conn_v2_remote, planned_vm,
+ vm_name, disk_paths_remote)
+
+ new_resource_setting_data = self._get_vhd_setting_data(vm)
+ self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
+ new_resource_setting_data, dest_host)
+
+ # In case the caller wants to log off the targets after migration
+ return iscsi_targets
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 6536efe1e..45fea329d 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -44,10 +44,10 @@ class HyperVException(exception.NovaException):
class VMUtils(object):
- def __init__(self):
+ def __init__(self, host='.'):
if sys.platform == 'win32':
- self._conn = wmi.WMI(moniker='//./root/virtualization')
- self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
+ self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
@@ -92,7 +92,7 @@ class VMUtils(object):
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
- raise HyperVException(_('VM not found: %s') % vm_name)
+ raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
@@ -164,7 +164,7 @@ class VMUtils(object):
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
- def get_vm_iscsi_controller(self, vm_name):
+ def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
@@ -263,7 +263,7 @@ class VMUtils(object):
scsicontrl = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData',
scsicontrldflt)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- scsiresource = self._add_virt_resource(scsicontrl, vm.path_())
+ self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
@@ -316,7 +316,6 @@ class VMUtils(object):
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
-
vm = self._lookup_vm_check(vm_name)
(job_path, ret_val) = vm.RequestStateChange(req_state)
#Invalid state for current operation (32775) typically means that
@@ -470,7 +469,8 @@ class VMUtils(object):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(
disk_path)
- self._remove_virt_resource(physical_disk, vm.path_())
+ if physical_disk:
+ self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM "
@@ -488,3 +488,15 @@ class VMUtils(object):
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
+
+ def get_controller_volume_paths(self, controller_path):
+ disks = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType="
+ "'Microsoft Physical Disk Drive' AND "
+ "Parent='%s'" % controller_path)
+ disk_data = {}
+ for disk in disks:
+ if disk.HostResource:
+ disk_data[disk.path().RelPath] = disk.HostResource[0]
+ return disk_data
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 3542a6194..74953435a 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -23,6 +23,7 @@ import time
from oslo.config import cfg
+from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostutils
@@ -87,6 +88,30 @@ class VolumeOps(object):
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
+ def login_storage_targets(self, block_device_info):
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ for vol in mapping:
+ self._login_storage_target(vol['connection_info'])
+
+ def _login_storage_target(self, connection_info):
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ # Check if we already logged in
+ if self._volutils.get_device_number_for_target(target_iqn, target_lun):
+ LOG.debug(_("Already logged in on storage target. No need to "
+ "login. Portal: %(target_portal)s, "
+ "IQN: %(target_iqn)s, LUN: %(target_lun)s") % locals())
+ else:
+ LOG.debug(_("Logging in on storage target. Portal: "
+ "%(target_portal)s, IQN: %(target_iqn)s, "
+ "LUN: %(target_lun)s") % locals())
+ self._volutils.login_storage_target(target_lun, target_iqn,
+ target_portal)
+ # Wait for the target to be mounted
+ self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
+
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""
Attach a volume to the SCSI controller or to the IDE controller if
@@ -94,13 +119,13 @@ class VolumeOps(object):
"""
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s")
% locals())
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
- self._volutils.login_storage_target(target_lun, target_iqn,
- target_portal)
try:
+ self._login_storage_target(connection_info)
+
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
@@ -113,7 +138,7 @@ class VolumeOps(object):
slot = 0
else:
#Find the SCSI controller for the vm
- ctrller_path = self._vmutils.get_vm_iscsi_controller(
+ ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
@@ -136,13 +161,19 @@ class VolumeOps(object):
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
+ def logout_storage_target(self, target_iqn):
+ LOG.debug(_("Logging off storage target %(target_iqn)s") % locals())
+ self._volutils.logout_storage_target(target_iqn)
+
def detach_volume(self, connection_info, instance_name):
"""Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s") % locals())
+
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
+
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
@@ -151,8 +182,7 @@ class VolumeOps(object):
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
- #Sending logout
- self._volutils.logout_storage_target(target_iqn)
+ self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
@@ -165,27 +195,26 @@ class VolumeOps(object):
'initiator': self._initiator,
}
- def _get_mounted_disk_from_lun(self, target_iqn, target_lun):
+ def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ wait_for_device=False):
device_number = self._volutils.get_device_number_for_target(target_iqn,
target_lun)
if device_number is None:
- raise vmutils.HyperVException(_('Unable to find a mounted '
- 'disk for target_iqn: %s')
- % target_iqn)
+ raise exception.NotFound(_('Unable to find a mounted disk for '
+ 'target_iqn: %s') % target_iqn)
LOG.debug(_('Device number: %(device_number)s, '
'target lun: %(target_lun)s') % locals())
#Finding Mounted disk drive
- for i in range(1, CONF.hyperv.volume_attach_retry_count):
+ for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
- if mounted_disk_path:
+ if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
- raise vmutils.HyperVException(_('Unable to find a mounted disk '
- 'for target_iqn: %s')
- % target_iqn)
+ raise exception.NotFound(_('Unable to find a mounted disk '
+ 'for target_iqn: %s') % target_iqn)
return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
@@ -194,3 +223,6 @@ class VolumeOps(object):
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
+
+ def get_target_from_disk_path(self, physical_drive_path):
+ return self._volutils.get_target_from_disk_path(physical_drive_path)
diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py
index cdc0803ee..5fafe4c32 100644
--- a/nova/virt/hyperv/volumeutilsv2.py
+++ b/nova/virt/hyperv/volumeutilsv2.py
@@ -37,10 +37,10 @@ CONF = cfg.CONF
class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
- def __init__(self):
- super(VolumeUtilsV2, self).__init__()
+ def __init__(self, host='.'):
+ super(VolumeUtilsV2, self).__init__(host)
- storage_namespace = '//./root/microsoft/windows/storage'
+ storage_namespace = '//%s/root/microsoft/windows/storage' % host
if sys.platform == 'win32':
self._conn_storage = wmi.WMI(moniker=storage_namespace)
@@ -64,16 +64,21 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
+ targets = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)
+ if targets:
+ target = targets[0]
+ if target.IsConnected:
+ sessions = self._conn_storage.MSFT_iSCSISession(
+ TargetNodeAddress=target_iqn)
- target = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)[0]
- if target.IsConnected:
- session = self._conn_storage.MSFT_iSCSISession(
- TargetNodeAddress=target_iqn)[0]
- if session.IsPersistent:
- session.Unregister()
- target.Disconnect()
+ for session in sessions:
+ if session.IsPersistent:
+ session.Unregister()
+
+ target.Disconnect()
def execute_log_out(self, session_id):
- session = self._conn_wmi.MSiSCSIInitiator_SessionClass(
- SessionId=session_id)[0]
- self.logout_storage_target(session.TargetName)
+ sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
+ SessionId=session_id)
+ if sessions:
+ self.logout_storage_target(sessions[0].TargetName)
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
index 0625d407b..8dc579300 100644
--- a/nova/virt/libvirt/designer.py
+++ b/nova/virt/libvirt/designer.py
@@ -101,11 +101,16 @@ def set_vif_host_backend_802qbh_config(conf, devname, profileid,
def set_vif_bandwidth_config(conf, extra_specs):
- """Config vif inbound/outbound bandwidth limit."""
+ """Config vif inbound/outbound bandwidth limit. parameters are
+ set in instance_type_extra_specs table, key is in the format
+ quota:vif_inbound_average.
+ """
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in extra_specs.iteritems():
- if key in bandwidth_items:
- setattr(conf, key, value)
+ scope = key.split(':')
+ if len(scope) > 1 and scope[0] == 'quota':
+ if scope[1] in bandwidth_items:
+ setattr(conf, scope[1], value)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 5e6b0e742..eabe75c73 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -579,7 +579,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._wrapped_conn.getLibVersion()
return True
except libvirt.libvirtError as e:
- if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
+ if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
+ libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
@@ -1279,7 +1280,7 @@ class LibvirtDriver(driver.ComputeDriver):
out_path, image_format)
def reboot(self, context, instance, network_info, reboot_type='SOFT',
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
@@ -2136,8 +2137,10 @@ class LibvirtDriver(driver.ComputeDriver):
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
- if key in quota_items:
- setattr(guest, key, value)
+ scope = key.split(':')
+ if len(scope) > 1 and scope[0] == 'quota':
+ if scope[1] in quota_items:
+ setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
@@ -2563,7 +2566,11 @@ class LibvirtDriver(driver.ComputeDriver):
try:
dom = self._conn.lookupByID(dom_id)
vcpus = dom.vcpus()
- total += len(vcpus[1])
+ if vcpus is None:
+ LOG.debug(_("couldn't obtain the vpu count from domain id:"
+ " %s") % dom_id)
+ else:
+ total += len(vcpus[1])
except libvirt.libvirtError as err:
if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug(_("List of domains returned by libVirt: %s")
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 25c6be1f3..c15896986 100755
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -120,8 +120,10 @@ class Image(object):
# throttling for qemu.
if self.source_type in ['file', 'block']:
for key, value in extra_specs.iteritems():
- if key in tune_items:
- setattr(info, key, value)
+ scope = key.split(':')
+ if len(scope) > 1 and scope[0] == 'quota':
+ if scope[1] in tune_items:
+ setattr(info, scope[1], value)
return info
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index c8d58d939..247746faa 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -273,7 +273,7 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
with common.vios_to_vios_auth(self.connection_data.host,
dest,
self.connection_data) as key_name:
- cmd = ''.join(['scp -o "StrictHostKeyChecking no"',
+ cmd = ' '.join(['scp -o "StrictHostKeyChecking no"',
('-i %s' % key_name),
file_path,
'%s@%s:%s' % (self.connection_data.username,
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index c388eecfd..c193111c8 100755
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -106,13 +106,16 @@ class PowerVMDriver(driver.ComputeDriver):
self._powervm.destroy(instance['name'], destroy_disks)
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
+ :param block_device_info: Info pertaining to attached volumes
+ :param bad_volumes_callback: Function to handle any bad volumes
+ encountered
"""
pass
@@ -275,7 +278,7 @@ class PowerVMDriver(driver.ComputeDriver):
defines the image from which this instance
was created
"""
- lpar_obj = self._powervm._create_lpar_instance(instance)
+ lpar_obj = self._powervm._create_lpar_instance(instance, network_info)
instance_type = instance_types.extract_instance_type(instance)
new_lv_size = instance_type['root_gb']
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index eeec4c5c2..798a2fde3 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -181,7 +181,7 @@ class VMwareESXDriver(driver.ComputeDriver):
self._vmops.snapshot(context, instance, name, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index decaed2b0..302679685 100755
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -194,9 +194,10 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None):
+ block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
- self._vmops.reboot(instance, reboot_type)
+ self._vmops.reboot(instance, reboot_type,
+ bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 56dd5bd3d..cd7311678 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -33,7 +33,6 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
-from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
@@ -272,14 +271,31 @@ class VMOps(object):
step=5,
total_steps=RESIZE_TOTAL_STEPS)
- def _start(self, instance, vm_ref=None):
+ def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
+
+ # Attached volumes that have become non-responsive will prevent a VM
+ # from starting, so scan for these before attempting to start
+ #
+ # In order to make sure this detach is consistent (virt, BDM, cinder),
+ # we only detach in the virt-layer if a callback is provided.
+ if bad_volumes_callback:
+ bad_devices = self._volumeops.find_bad_volumes(vm_ref)
+ for device_name in bad_devices:
+ self._volumeops.detach_volume(
+ None, instance['name'], device_name)
+
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
+ # Allow higher-layers a chance to detach bad-volumes as well (in order
+ # to cleanup BDM entries and detach in Cinder)
+ if bad_volumes_callback and bad_devices:
+ bad_volumes_callback(bad_devices)
+
def _create_disks(self, context, instance, name_label, disk_image_type,
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
@@ -930,7 +946,7 @@ class VMOps(object):
return 'VDI.resize'
- def reboot(self, instance, reboot_type):
+ def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
@@ -948,9 +964,18 @@ class VMOps(object):
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
+ self._start(instance, vm_ref=vm_ref,
+ bad_volumes_callback=bad_volumes_callback)
+ return
+ elif details[0] == 'SR_BACKEND_FAILURE_46':
+ LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
+ " volumes and starting halted instance"),
+ instance=instance)
+ self._start(instance, vm_ref=vm_ref,
+ bad_volumes_callback=bad_volumes_callback)
return
- raise
+ else:
+ raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
@@ -1325,20 +1350,11 @@ class VMOps(object):
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
- # NOTE(johannes): This can fail if the VM object hasn't been created
- # yet on the dom0. Since that step happens fairly late in the build
- # process, there's a potential for a race condition here. Until the
- # VM object is created, return back a 409 error instead of a 404
- # error.
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
- if instance['vm_state'] != vm_states.BUILDING:
- raise
-
- LOG.info(_('Fetching VM ref while BUILDING failed'),
- instance=instance)
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
+ # The compute manager expects InstanceNotFound for this case.
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d3c3046b7..add3787a3 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -151,3 +151,31 @@ class VolumeOps(object):
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
self._detach_vbd(vbd_ref, unplug=unplug)
+
+ def find_bad_volumes(self, vm_ref):
+ """Find any volumes with their connection severed.
+
+ Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not
+ work when a VBD is present that points to a non-working volume. To work
+ around this, we scan for non-working volumes and detach them before
+ retrying a failed operation.
+ """
+ bad_devices = []
+ vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
+ for vbd_ref in vbd_refs:
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
+
+ try:
+ # TODO(sirp): bug1152401 This relies on a 120 sec timeout
+ # within XenServer, update this to fail-fast when this is fixed
+ # upstream
+ self._session.call_xenapi("SR.scan", sr_ref)
+ except self._session.XenAPI.Failure, exc:
+ if exc.details[0] == 'SR_BACKEND_FAILURE_40':
+ vbd_rec = vbd_rec = self._session.call_xenapi(
+ "VBD.get_record", vbd_ref)
+ bad_devices.append('/dev/%s' % vbd_rec['device'])
+ else:
+ raise
+
+ return bad_devices
diff --git a/tools/pip-requires b/tools/pip-requires
index d8b836a29..092f5498c 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,4 +1,4 @@
-SQLAlchemy>=0.7.8,<=0.7.9
+SQLAlchemy>=0.7.8,<0.7.99
Cheetah>=2.4.4
amqplib>=0.6.1
anyjson>=0.2.4
@@ -22,9 +22,9 @@ iso8601>=0.1.4
httplib2
setuptools_git>=0.4
python-cinderclient>=1.0.1
-python-quantumclient>=2.1.2
+python-quantumclient>=2.2.0,<3.0.0
python-glanceclient>=0.5.0,<2
python-keystoneclient>=0.2.0
stevedore>=0.7
-websockify
-http://tarballs.openstack.org/oslo-config/oslo.config-1.1.0b1.tar.gz#egg=oslo.config
+websockify<0.4
+oslo.config>=1.1.0