summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-dhcpbridge4
-rwxr-xr-xbin/nova-manage10
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json34
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml12
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json219
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml71
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json10
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml5
-rw-r--r--etc/nova/nova.conf.sample2
-rw-r--r--etc/nova/policy.json1
-rw-r--r--nova/api/ec2/cloud.py10
-rw-r--r--nova/api/openstack/compute/contrib/baremetal_nodes.py10
-rw-r--r--nova/api/openstack/compute/contrib/image_size.py88
-rw-r--r--nova/api/openstack/compute/contrib/quota_classes.py3
-rw-r--r--nova/api/openstack/compute/contrib/quotas.py4
-rw-r--r--nova/api/openstack/compute/images.py2
-rw-r--r--nova/api/sizelimit.py41
-rw-r--r--nova/common/memorycache.py5
-rw-r--r--nova/compute/api.py17
-rw-r--r--nova/compute/cells_api.py6
-rw-r--r--nova/compute/instance_types.py4
-rwxr-xr-xnova/compute/manager.py23
-rw-r--r--nova/compute/rpcapi.py16
-rw-r--r--nova/consoleauth/manager.py45
-rw-r--r--nova/consoleauth/rpcapi.py14
-rw-r--r--nova/context.py53
-rw-r--r--nova/db/api.py11
-rw-r--r--nova/db/sqlalchemy/api.py130
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py52
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py3
-rw-r--r--nova/locale/nova.pot3087
-rw-r--r--nova/network/linux_net.py9
-rw-r--r--nova/network/quantumv2/api.py1
-rw-r--r--nova/network/rpcapi.py3
-rw-r--r--nova/openstack/common/cfg.py17
-rw-r--r--nova/openstack/common/db/api.py101
-rw-r--r--nova/openstack/common/db/exception.py45
-rw-r--r--nova/openstack/common/db/sqlalchemy/session.py107
-rw-r--r--nova/openstack/common/eventlet_backdoor.py8
-rw-r--r--nova/openstack/common/processutils.py135
-rw-r--r--nova/openstack/common/rpc/__init__.py1
-rw-r--r--nova/openstack/common/rpc/common.py8
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py1
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py121
-rw-r--r--nova/servicegroup/drivers/zk.py21
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py30
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_image_size.py130
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_snapshots.py118
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py1
-rw-r--r--nova/tests/api/openstack/fakes.py26
-rw-r--r--nova/tests/api/test_sizelimit.py59
-rw-r--r--nova/tests/baremetal/db/test_bm_interface.py4
-rw-r--r--nova/tests/baremetal/db/test_bm_pxe_ip.py6
-rw-r--r--nova/tests/baremetal/test_driver.py2
-rw-r--r--nova/tests/baremetal/test_pxe.py4
-rw-r--r--nova/tests/compute/test_compute.py82
-rw-r--r--nova/tests/compute/test_rpcapi.py6
-rw-r--r--nova/tests/conductor/test_conductor.py2
-rw-r--r--nova/tests/consoleauth/test_consoleauth.py64
-rw-r--r--nova/tests/consoleauth/test_rpcapi.py8
-rw-r--r--nova/tests/fake_policy.py1
-rw-r--r--nova/tests/fakeguestfs.py7
-rw-r--r--nova/tests/fakelibvirt.py1
-rw-r--r--nova/tests/image/fake.py7
-rw-r--r--nova/tests/image/test_fake.py3
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl34
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl12
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl219
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl71
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl3
-rw-r--r--nova/tests/integrated/test_api_samples.py29
-rw-r--r--nova/tests/network/test_linux_net.py3
-rw-r--r--nova/tests/network/test_manager.py4
-rw-r--r--nova/tests/network/test_quantumv2.py4
-rw-r--r--nova/tests/network/test_rpcapi.py2
-rw-r--r--nova/tests/servicegroup/test_zk_driver.py2
-rw-r--r--nova/tests/test_imagecache.py27
-rw-r--r--nova/tests/test_libvirt.py217
-rw-r--r--nova/tests/test_libvirt_vif.py21
-rw-r--r--nova/tests/test_migrations.py201
-rw-r--r--nova/tests/test_powervm.py6
-rw-r--r--nova/tests/test_utils.py45
-rw-r--r--nova/tests/test_xenapi.py6
-rw-r--r--nova/tests/virt/disk/test_nbd.py8
-rw-r--r--nova/utils.py18
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py15
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py9
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py3
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/session.py2
-rw-r--r--nova/virt/baremetal/pxe.py4
-rw-r--r--nova/virt/disk/vfs/guestfs.py5
-rw-r--r--nova/virt/firewall.py12
-rwxr-xr-xnova/virt/libvirt/driver.py233
-rw-r--r--nova/virt/libvirt/imagecache.py59
-rwxr-xr-xnova/virt/libvirt/utils.py38
-rw-r--r--nova/virt/libvirt/vif.py3
-rw-r--r--nova/virt/netutils.py3
-rw-r--r--nova/virt/powervm/blockdev.py6
-rw-r--r--nova/virt/vmwareapi/vmops.py1
-rw-r--r--openstack-common.conf2
-rwxr-xr-xrun_tests.sh17
-rwxr-xr-xtools/hacking.py7
-rw-r--r--tools/pip-requires4
-rwxr-xr-xtools/run_pep8.sh2
106 files changed, 4420 insertions, 2079 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index c00578821..1acaf4cd1 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -42,6 +42,7 @@ from nova import context
from nova import db
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -121,7 +122,8 @@ def main():
except KeyError:
config_file = os.environ['FLAGFILE']
- config.parse_args(sys.argv, default_config_files=[config_file])
+ config.parse_args(sys.argv,
+ default_config_files=jsonutils.loads(config_file))
logging.setup("nova")
diff --git a/bin/nova-manage b/bin/nova-manage
index c4e9841ce..0fde8ba0a 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -80,7 +80,7 @@ from nova import db
from nova.db import migration
from nova import exception
from nova.openstack.common import cliutils
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -861,7 +861,7 @@ class InstanceTypeCommands(object):
except exception.InstanceTypeNotFound:
print _("Valid instance type name is required")
sys.exit(1)
- except db_session.DBError, e:
+ except db_exc.DBError, e:
print _("DB Error: %s") % e
sys.exit(2)
except Exception:
@@ -878,7 +878,7 @@ class InstanceTypeCommands(object):
inst_types = instance_types.get_all_types()
else:
inst_types = instance_types.get_instance_type_by_name(name)
- except db_session.DBError, e:
+ except db_exc.DBError, e:
_db_error(e)
if isinstance(inst_types.values()[0], dict):
for k, v in inst_types.iteritems():
@@ -909,7 +909,7 @@ class InstanceTypeCommands(object):
ext_spec)
print _("Key %(key)s set to %(value)s on instance"
" type %(name)s") % locals()
- except db_session.DBError, e:
+ except db_exc.DBError, e:
_db_error(e)
@args('--name', dest='name', metavar='<name>',
@@ -932,7 +932,7 @@ class InstanceTypeCommands(object):
key)
print _("Key %(key)s on instance type %(name)s unset") % locals()
- except db_session.DBError, e:
+ except db_exc.DBError, e:
_db_error(e)
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json
new file mode 100644
index 000000000..1548aeb59
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "OS-EXT-IMG-SIZE:size": "74185822",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml
new file mode 100644
index 000000000..49fe2ee31
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b" OS-EXT-IMG-SIZE:size="74185822">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+</image> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json
new file mode 100644
index 000000000..bbd9dcfb1
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json
@@ -0,0 +1,219 @@
+{
+ "images": [
+ {
+ "OS-EXT-IMG-SIZE:size": "74185822",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "58145823",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "49163826",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "26360814",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "84035174",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "83594576",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml
new file mode 100644
index 000000000..d0b5787ca
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b" OS-EXT-IMG-SIZE:size="74185822">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="155d900f-4e14-4e4c-a73d-069cbf4541e6" OS-EXT-IMG-SIZE:size="25165824">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a2459075-d96c-40d5-893e-577ff92e721c" OS-EXT-IMG-SIZE:size="58145823">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a440c04b-79fa-479c-bed1-0b816eaec379" OS-EXT-IMG-SIZE:size="49163826">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77" OS-EXT-IMG-SIZE:size="26360814">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="cedef40a-ed67-4d10-800e-17455edce175" OS-EXT-IMG-SIZE:size="84035174">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" OS-EXT-IMG-SIZE:size="83594576">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 0ce9b9aca..0ad7a6498 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -25,6 +25,14 @@
"updated": "2013-01-30T00:00:00+00:00"
},
{
+ "alias": "OS-EXT-IMG-SIZE",
+ "description": "Adds image size to image listings.",
+ "links": [],
+ "name": "ImageSize",
+ "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1",
+ "updated": "2013-02-19T00:00:00+00:00"
+ },
+ {
"alias": "OS-EXT-IPS",
"description": "Adds type parameter to the ip list.",
"links": [],
@@ -322,7 +330,7 @@
},
{
"alias": "os-instance-actions",
- "description": "View a log of actions taken on an instance",
+ "description": "View a log of actions and events taken on an instance.",
"links": [],
"name": "InstanceActions",
"namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1",
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index 4fcdeb285..da20e0b61 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -9,6 +9,9 @@
<extension alias="OS-EXT-AZ" updated="2013-01-30T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
<description>Extended Server Attributes support.</description>
</extension>
+ <extension alias="OS-EXT-IMG-SIZE" updated="2013-02-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/image_size/api/v1.1" name="ImageSize">
+ <description>Adds image size to image listings.</description>
+ </extension>
<extension alias="OS-EXT-IPS" updated="2013-01-06T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
<description>Adds type parameter to the ip list.</description>
</extension>
@@ -139,7 +142,7 @@
<description>Admin-only hypervisor administration.</description>
</extension>
<extension alias="os-instance-actions" updated="2013-02-08T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/instance-actions/api/v1.1" name="InstanceActions">
- <description>View a log of actions taken on an instance</description>
+ <description>View a log of actions and events taken on an instance.</description>
</extension>
<extension alias="os-instance_usage_audit_log" updated="2012-07-06T01:00:00+00:00" namespace="http://docs.openstack.org/ext/services/api/v1.1" name="OSInstanceUsageAuditLog">
<description>Admin-only Task Log Monitoring.</description>
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 9cbb8c1a5..a094469f7 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -970,7 +970,7 @@
# Options defined in nova.network.linux_net
#
-# location of flagfile for dhcpbridge (string value)
+# location of flagfile(s) for dhcpbridge (multi valued)
#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
# Location to keep network config files (string value)
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index fb54efc8b..5a6800f94 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -67,6 +67,7 @@
"compute_extension:hide_server_addresses": "is_admin:False",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
+ "compute_extension:image_size": "",
"compute_extension:instance_actions": "",
"compute_extension:instance_actions:events": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index b3f9bd099..dcbde3428 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -42,6 +42,7 @@ from nova import db
from nova import exception
from nova.image import s3
from nova import network
+from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
@@ -1696,6 +1697,15 @@ class CloudSecurityGroupNovaAPI(compute_api.SecurityGroupAPI,
pass
+class CloudSecurityGroupQuantumAPI(quantum_driver.SecurityGroupAPI,
+ EC2SecurityGroupExceptions):
+ pass
+
+
def get_cloud_security_group_api():
if cfg.CONF.security_group_api.lower() == 'nova':
return CloudSecurityGroupNovaAPI()
+ elif cfg.CONF.security_group_api.lower() == 'quantum':
+ return CloudSecurityGroupQuantumAPI()
+ else:
+ raise NotImplementedError()
diff --git a/nova/api/openstack/compute/contrib/baremetal_nodes.py b/nova/api/openstack/compute/contrib/baremetal_nodes.py
index 38d66d2ae..70bb6e8a0 100644
--- a/nova/api/openstack/compute/contrib/baremetal_nodes.py
+++ b/nova/api/openstack/compute/contrib/baremetal_nodes.py
@@ -104,7 +104,7 @@ class BareMetalNodeController(wsgi.Controller):
try:
ifs = db.bm_interface_get_all_by_bm_node_id(
context, node_from_db['id'])
- except exception.InstanceNotFound:
+ except exception.NodeNotFound:
ifs = []
node = _node_dict(node_from_db)
node['interfaces'] = [_interface_dict(i) for i in ifs]
@@ -117,11 +117,11 @@ class BareMetalNodeController(wsgi.Controller):
authorize(context)
try:
node = db.bm_node_get(context, id)
- except exception.InstanceNotFound:
+ except exception.NodeNotFound:
raise webob.exc.HTTPNotFound
try:
ifs = db.bm_interface_get_all_by_bm_node_id(context, id)
- except exception.InstanceNotFound:
+ except exception.NodeNotFound:
ifs = []
node = _node_dict(node)
node['interfaces'] = [_interface_dict(i) for i in ifs]
@@ -141,14 +141,14 @@ class BareMetalNodeController(wsgi.Controller):
authorize(context)
try:
db.bm_node_destroy(context, id)
- except exception.InstanceNotFound:
+ except exception.NodeNotFound:
raise webob.exc.HTTPNotFound
return webob.Response(status_int=202)
def _check_node_exists(self, context, node_id):
try:
db.bm_node_get(context, node_id)
- except exception.InstanceNotFound:
+ except exception.NodeNotFound:
raise webob.exc.HTTPNotFound
@wsgi.serializers(xml=InterfaceTemplate)
diff --git a/nova/api/openstack/compute/contrib/image_size.py b/nova/api/openstack/compute/contrib/image_size.py
new file mode 100644
index 000000000..21998738f
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/image_size.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+
+authorize = extensions.soft_extension_authorizer('compute', 'image_size')
+
+
+def make_image(elem):
+ elem.set('{%s}size' % Image_size.namespace, '%s:size' % Image_size.alias)
+
+
+class ImagesSizeTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('images')
+ elem = xmlutil.SubTemplateElement(root, 'image', selector='images')
+ make_image(elem)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Image_size.alias: Image_size.namespace})
+
+
+class ImageSizeTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('image', selector='image')
+ make_image(root)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Image_size.alias: Image_size.namespace})
+
+
+class ImageSizeController(wsgi.Controller):
+
+ def _extend_image(self, image, image_cache):
+ key = "%s:size" % Image_size.alias
+ image[key] = image_cache['size']
+
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ context = req.environ["nova.context"]
+ if authorize(context):
+ # Attach our slave template to the response object
+ resp_obj.attach(xml=ImageSizeTemplate())
+ image_resp = resp_obj.obj['image']
+ # image guaranteed to be in the cache due to the core API adding
+ # it in its 'show' method
+ image_cached = req.get_db_item('images', image_resp['id'])
+ self._extend_image(image_resp, image_cached)
+
+ @wsgi.extends
+ def detail(self, req, resp_obj):
+ context = req.environ['nova.context']
+ if authorize(context):
+ # Attach our slave template to the response object
+ resp_obj.attach(xml=ImagesSizeTemplate())
+ images_resp = list(resp_obj.obj['images'])
+ # images guaranteed to be in the cache due to the core API adding
+ # it in its 'detail' method
+ for image in images_resp:
+ image_cached = req.get_db_item('images', image['id'])
+ self._extend_image(image, image_cached)
+
+
+class Image_size(extensions.ExtensionDescriptor):
+ """Adds image size to image listings."""
+
+ name = "ImageSize"
+ alias = "OS-EXT-IMG-SIZE"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "image_size/api/v1.1")
+ updated = "2013-02-19T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ controller = ImageSizeController()
+ extension = extensions.ControllerExtension(self, 'images', controller)
+ return [extension]
diff --git a/nova/api/openstack/compute/contrib/quota_classes.py b/nova/api/openstack/compute/contrib/quota_classes.py
index f3f5b9b08..7b94e45b1 100644
--- a/nova/api/openstack/compute/contrib/quota_classes.py
+++ b/nova/api/openstack/compute/contrib/quota_classes.py
@@ -18,6 +18,7 @@ import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+import nova.context
from nova import db
from nova import exception
from nova import quota
@@ -59,7 +60,7 @@ class QuotaClassSetsController(object):
context = req.environ['nova.context']
authorize(context)
try:
- db.sqlalchemy.api.authorize_quota_class_context(context, id)
+ nova.context.authorize_quota_class_context(context, id)
return self._format_quota_set(id,
QUOTAS.get_class_quotas(context, id))
except exception.NotAuthorized:
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index 728c3fad6..b1a461431 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -20,8 +20,8 @@ import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+import nova.context
from nova import db
-from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.openstack.common import log as logging
from nova import quota
@@ -78,7 +78,7 @@ class QuotaSetsController(object):
context = req.environ['nova.context']
authorize_show(context)
try:
- sqlalchemy_api.authorize_project_context(context, id)
+ nova.context.authorize_project_context(context, id)
return self._format_quota_set(id, self._get_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 7dda64f87..703d2fe2d 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -144,6 +144,7 @@ class Controller(wsgi.Controller):
explanation = _("Image not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
+ req.cache_db_items('images', [image], 'id')
return self._view_builder.show(req, image)
def delete(self, req, id):
@@ -200,6 +201,7 @@ class Controller(wsgi.Controller):
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
+ req.cache_db_items('images', images, 'id')
return self._view_builder.detail(req, images)
def create(self, *args, **kwargs):
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 67d459583..1e88f183e 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -37,6 +37,35 @@ CONF.register_opt(max_request_body_size_opt)
LOG = logging.getLogger(__name__)
+class LimitingReader(object):
+ """Reader to limit the size of an incoming request."""
+ def __init__(self, data, limit):
+ """
+ :param data: Underlying data object
+ :param limit: maximum number of bytes the reader should allow
+ """
+ self.data = data
+ self.limit = limit
+ self.bytes_read = 0
+
+ def __iter__(self):
+ for chunk in self.data:
+ self.bytes_read += len(chunk)
+ if self.bytes_read > self.limit:
+ msg = _("Request is too large.")
+ raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
+ else:
+ yield chunk
+
+ def read(self, i=None):
+ result = self.data.read(i)
+ self.bytes_read += len(result)
+ if self.bytes_read > self.limit:
+ msg = _("Request is too large.")
+ raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
+ return result
+
+
class RequestBodySizeLimiter(wsgi.Middleware):
"""Limit the size of incoming requests."""
@@ -45,9 +74,11 @@ class RequestBodySizeLimiter(wsgi.Middleware):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- if (req.content_length > CONF.osapi_max_request_body_size
- or len(req.body) > CONF.osapi_max_request_body_size):
+ if req.content_length > CONF.osapi_max_request_body_size:
msg = _("Request is too large.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
- else:
- return self.application
+ raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
+ if req.content_length is None and req.is_body_readable:
+ limiter = LimitingReader(req.body_file,
+ CONF.osapi_max_request_body_size)
+ req.body_file = limiter
+ return self.application
diff --git a/nova/common/memorycache.py b/nova/common/memorycache.py
index f89e4b265..c124784d5 100644
--- a/nova/common/memorycache.py
+++ b/nova/common/memorycache.py
@@ -83,3 +83,8 @@ class Client(object):
new_value = int(value) + delta
self.cache[key] = (self.cache[key][0], str(new_value))
return new_value
+
+ def delete(self, key, time=0):
+ """Deletes the value associated with a key."""
+ if key in self.cache:
+ del self.cache[key]
diff --git a/nova/compute/api.py b/nova/compute/api.py
index cc07a998a..bba6ee1eb 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -100,6 +100,7 @@ CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
+RO_SECURITY_GROUPS = ['default']
def check_instance_state(vm_state=None, task_state=(None,)):
@@ -2189,8 +2190,9 @@ class API(base.Base):
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
- connect_info['token'], console_type, connect_info['host'],
- connect_info['port'], connect_info['internal_access_path'])
+ connect_info['token'], console_type,
+ connect_info['host'], connect_info['port'],
+ connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@@ -2207,10 +2209,10 @@ class API(base.Base):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
-
self.consoleauth_rpcapi.authorize_console(context,
- connect_info['token'], console_type, connect_info['host'],
- connect_info['port'], connect_info['internal_access_path'])
+ connect_info['token'], console_type,
+ connect_info['host'], connect_info['port'],
+ connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@@ -2879,6 +2881,11 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
return groups
def destroy(self, context, security_group):
+ if security_group['name'] in RO_SECURITY_GROUPS:
+ msg = _("Unable to delete system group '%s'") % \
+ security_group['name']
+ self.raise_invalid_group(msg)
+
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 1e30331bc..22e31a8e1 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -465,7 +465,8 @@ class ComputeCellsAPI(compute_api.API):
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
- connect_info['port'], connect_info['internal_access_path'])
+ connect_info['port'], connect_info['internal_access_path'],
+ instance_uuid=instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@@ -480,7 +481,8 @@ class ComputeCellsAPI(compute_api.API):
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
- connect_info['port'], connect_info['internal_access_path'])
+ connect_info['port'], connect_info['internal_access_path'],
+ instance_uuid=instance['uuid'])
return {'url': connect_info['access_url']}
@validate_cell
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 73105b33f..3060e0bc2 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -28,7 +28,7 @@ from oslo.config import cfg
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import log as logging
from nova import utils
@@ -134,7 +134,7 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
try:
return db.instance_type_create(context.get_admin_context(), kwargs)
- except db_session.DBError, e:
+ except db_exc.DBError, e:
LOG.exception(_('DB error: %s') % e)
raise exception.InstanceTypeCreateFailed()
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index afeb9f02e..99b97e921 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -50,6 +50,7 @@ from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
+from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
@@ -317,7 +318,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.25'
+ RPC_API_VERSION = '2.26'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -335,6 +336,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.conductor_api = conductor.API()
self.is_quantum_security_groups = (
openstack_driver.is_quantum_security_groups())
+ self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
+
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -1223,6 +1226,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
+ if CONF.vnc_enabled or CONF.spice.enabled:
+ self.consoleauth_rpcapi.delete_tokens_for_instance(context,
+ instance['uuid'])
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_event
@wrap_instance_fault
@@ -2394,9 +2401,9 @@ class ComputeManager(manager.SchedulerDependentManager):
return self.driver.set_host_enabled(host, enabled)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- def get_host_uptime(self, context, host):
+ def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
- return self.driver.get_host_uptime(host)
+ return self.driver.get_host_uptime(self.host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
@@ -2555,6 +2562,16 @@ class ComputeManager(manager.SchedulerDependentManager):
return connect_info
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @wrap_instance_fault
+ def validate_console_port(self, ctxt, instance, port, console_type):
+ if console_type == "spice-html5":
+ console_info = self.driver.get_spice_console(instance)
+ else:
+ console_info = self.driver.get_vnc_console(instance)
+
+ return console_info['port'] == port
+
def _attach_volume_boot(self, context, instance, volume, mountpoint):
"""Attach a volume to an instance at boot time. So actual attach
is done by instance creation"""
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 0be9972da..914c45471 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -50,9 +50,9 @@ def _compute_topic(topic, ctxt, host, instance):
if not instance:
raise exception.NovaException(_('No compute host specified'))
host = instance['host']
- if not host:
- raise exception.NovaException(_('Unable to find host for '
- 'Instance %s') % instance['uuid'])
+ if not host:
+ raise exception.NovaException(_('Unable to find host for '
+ 'Instance %s') % instance['uuid'])
return rpc.queue_get_for(ctxt, topic, host)
@@ -161,6 +161,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.23 - Remove network_info from reboot_instance
2.24 - Added get_spice_console method
2.25 - Add attach_interface() and detach_interface()
+ 2.26 - Add validate_console_token to ensure the service connects to
+ vnc on the correct port
'''
#
@@ -321,6 +323,14 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.24')
+ def validate_console_port(self, ctxt, instance, port, console_type):
+ instance_p = jsonutils.to_primitive(instance)
+ return self.call(ctxt, self.make_msg('validate_console_port',
+ instance=instance_p, port=port, console_type=console_type),
+ topic=_compute_topic(self.topic, ctxt,
+ None, instance),
+ version='2.26')
+
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 74321a27b..56e94dffd 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -23,6 +23,8 @@ import time
from oslo.config import cfg
from nova.common import memorycache
+from nova.compute import rpcapi as compute_rpcapi
+from nova.conductor import api as conductor_api
from nova import manager
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -46,15 +48,27 @@ CONF.register_opts(consoleauth_opts)
class ConsoleAuthManager(manager.Manager):
"""Manages token based authentication."""
- RPC_API_VERSION = '1.1'
+ RPC_API_VERSION = '1.2'
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(*args, **kwargs)
self.mc = memorycache.get_client()
+ self.compute_rpcapi = compute_rpcapi.ComputeAPI()
+ self.conductor_api = conductor_api.API()
+
+ def _get_tokens_for_instance(self, instance_uuid):
+ tokens_str = self.mc.get(instance_uuid.encode('UTF-8'))
+ if not tokens_str:
+ tokens = []
+ else:
+ tokens = jsonutils.loads(tokens_str)
+ return tokens
def authorize_console(self, context, token, console_type, host, port,
- internal_access_path):
+ internal_access_path, instance_uuid=None):
+
token_dict = {'token': token,
+ 'instance_uuid': instance_uuid,
'console_type': console_type,
'host': host,
'port': port,
@@ -62,14 +76,39 @@ class ConsoleAuthManager(manager.Manager):
'last_activity_at': time.time()}
data = jsonutils.dumps(token_dict)
self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl)
+ if instance_uuid is not None:
+ tokens = self._get_tokens_for_instance(instance_uuid)
+ tokens.append(token)
+ self.mc.set(instance_uuid.encode('UTF-8'),
+ jsonutils.dumps(tokens))
+
LOG.audit(_("Received Token: %(token)s, %(token_dict)s)"), locals())
+ def _validate_token(self, context, token):
+ instance_uuid = token['instance_uuid']
+ if instance_uuid is None:
+ return False
+ instance = self.conductor_api.instance_get_by_uuid(context,
+ instance_uuid)
+ return self.compute_rpcapi.validate_console_port(context,
+ instance,
+ token['port'],
+ token['console_type'])
+
def check_token(self, context, token):
token_str = self.mc.get(token.encode('UTF-8'))
token_valid = (token_str is not None)
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s)"), locals())
if token_valid:
- return jsonutils.loads(token_str)
+ token = jsonutils.loads(token_str)
+ if self._validate_token(context, token):
+ return token
+
+ def delete_tokens_for_instance(self, context, instance_uuid):
+ tokens = self._get_tokens_for_instance(instance_uuid)
+ for token in tokens:
+ self.mc.delete(token)
+ self.mc.delete(instance_uuid.encode('UTF-8'))
def get_backdoor_port(self, context):
return self.backdoor_port
diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py
index 813143f76..474f3ad19 100644
--- a/nova/consoleauth/rpcapi.py
+++ b/nova/consoleauth/rpcapi.py
@@ -32,6 +32,8 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.0 - Initial version.
1.1 - Added get_backdoor_port()
+ 1.2 - Added instance_uuid to authorize_console, and
+ delete_tokens_for_instance
'''
#
@@ -50,18 +52,26 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
default_version=self.BASE_RPC_API_VERSION)
def authorize_console(self, ctxt, token, console_type, host, port,
- internal_access_path):
+ internal_access_path, instance_uuid=None):
# The remote side doesn't return anything, but we want to block
# until it completes.
return self.call(ctxt,
self.make_msg('authorize_console',
token=token, console_type=console_type,
host=host, port=port,
- internal_access_path=internal_access_path))
+ internal_access_path=internal_access_path,
+ instance_uuid=instance_uuid),
+ version="1.2")
def check_token(self, ctxt, token):
return self.call(ctxt, self.make_msg('check_token', token=token))
+ def delete_tokens_for_instance(self, ctxt, instance_uuid):
+ return self.call(ctxt,
+ self.make_msg('delete_tokens_for_instance',
+ instance_uuid=instance_uuid),
+ version="1.2")
+
def get_backdoor_port(self, ctxt, host):
return self.call(ctxt, self.make_msg('get_backdoor_port'),
version='1.1')
diff --git a/nova/context.py b/nova/context.py
index 60fd5b4c0..831a91b11 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -22,6 +22,7 @@
import copy
import uuid
+from nova import exception
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -166,3 +167,55 @@ def get_admin_context(read_deleted="no"):
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
+
+
+def is_user_context(context):
+ """Indicates if the request context is a normal user."""
+ if not context:
+ return False
+ if context.is_admin:
+ return False
+ if not context.user_id or not context.project_id:
+ return False
+ return True
+
+
+def require_admin_context(ctxt):
+ """Raise exception.AdminRequired() if context is an admin context."""
+ if not ctxt.is_admin:
+ raise exception.AdminRequired()
+
+
+def require_context(ctxt):
+ """Raise exception.NotAuthorized() if context is not a user or an
+ admin context.
+ """
+ if not ctxt.is_admin and not is_user_context(ctxt):
+ raise exception.NotAuthorized()
+
+
+def authorize_project_context(context, project_id):
+ """Ensures a request has permission to access the given project."""
+ if is_user_context(context):
+ if not context.project_id:
+ raise exception.NotAuthorized()
+ elif context.project_id != project_id:
+ raise exception.NotAuthorized()
+
+
+def authorize_user_context(context, user_id):
+ """Ensures a request has permission to access the given user."""
+ if is_user_context(context):
+ if not context.user_id:
+ raise exception.NotAuthorized()
+ elif context.user_id != user_id:
+ raise exception.NotAuthorized()
+
+
+def authorize_quota_class_context(context, class_name):
+ """Ensures a request has permission to access the given quota class."""
+ if is_user_context(context):
+ if not context.quota_class:
+ raise exception.NotAuthorized()
+ elif context.quota_class != class_name:
+ raise exception.NotAuthorized()
diff --git a/nova/db/api.py b/nova/db/api.py
index e38cf1866..d14999b45 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -47,14 +47,11 @@ from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
+from nova.openstack.common.db import api as db_api
from nova.openstack.common import log as logging
-from nova import utils
db_opts = [
- cfg.StrOpt('db_backend',
- default='sqlalchemy',
- help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
@@ -69,8 +66,10 @@ db_opts = [
CONF = cfg.CONF
CONF.register_opts(db_opts)
-IMPL = utils.LazyPluggable('db_backend',
- sqlalchemy='nova.db.sqlalchemy.api')
+_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
+
+
+IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index b83b8e839..9efdc47f1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -23,6 +23,8 @@ import collections
import copy
import datetime
import functools
+import sys
+import time
import uuid
from oslo.config import cfg
@@ -45,9 +47,11 @@ from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
+import nova.context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
+from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import log as logging
@@ -74,42 +78,9 @@ get_engine = db_session.get_engine
get_session = db_session.get_session
-def is_user_context(context):
- """Indicates if the request context is a normal user."""
- if not context:
- return False
- if context.is_admin:
- return False
- if not context.user_id or not context.project_id:
- return False
- return True
-
-
-def authorize_project_context(context, project_id):
- """Ensures a request has permission to access the given project."""
- if is_user_context(context):
- if not context.project_id:
- raise exception.NotAuthorized()
- elif context.project_id != project_id:
- raise exception.NotAuthorized()
-
-
-def authorize_user_context(context, user_id):
- """Ensures a request has permission to access the given user."""
- if is_user_context(context):
- if not context.user_id:
- raise exception.NotAuthorized()
- elif context.user_id != user_id:
- raise exception.NotAuthorized()
-
-
-def authorize_quota_class_context(context, class_name):
- """Ensures a request has permission to access the given quota class."""
- if is_user_context(context):
- if not context.quota_class:
- raise exception.NotAuthorized()
- elif context.quota_class != class_name:
- raise exception.NotAuthorized()
+def get_backend():
+ """The backend is this module itself."""
+ return sys.modules[__name__]
def require_admin_context(f):
@@ -120,9 +91,7 @@ def require_admin_context(f):
"""
def wrapper(*args, **kwargs):
- context = args[0]
- if not context.is_admin:
- raise exception.AdminRequired()
+ nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
@@ -131,17 +100,15 @@ def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
- :py:func:`authorize_project_context` and
- :py:func:`authorize_user_context`.
+ :py:func:`nova.context.authorize_project_context` and
+ :py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
- context = args[0]
- if not context.is_admin and not is_user_context(context):
- raise exception.NotAuthorized()
+ nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
@@ -174,6 +141,24 @@ def require_aggregate_exists(f):
return wrapper
+def _retry_on_deadlock(f):
+ """Decorator to retry a DB API call if Deadlock was received."""
+ @functools.wraps(f)
+ def wrapped(*args, **kwargs):
+ while True:
+ try:
+ return f(*args, **kwargs)
+ except db_exc.DBDeadlock:
+ LOG.warn(_("Deadlock detected when running "
+ "'%(func_name)s': Retrying..."),
+ dict(func_name=f.__name__))
+ # Retry!
+ time.sleep(0.5)
+ continue
+ functools.update_wrapper(wrapped, f)
+ return wrapped
+
+
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
@@ -215,7 +200,7 @@ def model_query(context, model, *args, **kwargs):
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
- if is_user_context(context) and project_only:
+ if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
@@ -658,7 +643,7 @@ def floating_ip_get_pools(context):
@require_context
def floating_ip_allocate_address(context, project_id, pool):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
@@ -749,7 +734,7 @@ def floating_ip_create(context, values, session=None):
@require_context
def floating_ip_count_by_project(context, project_id, session=None):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
@@ -848,7 +833,7 @@ def floating_ip_get_all_by_host(context, host):
@require_context
def floating_ip_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
@@ -879,8 +864,8 @@ def _floating_ip_get_by_address(context, address, session=None):
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
- if result.project_id and is_user_context(context):
- authorize_project_context(context, result.project_id)
+ if result.project_id and nova.context.is_user_context(context):
+ nova.context.authorize_project_context(context, result.project_id)
return result
@@ -1128,10 +1113,11 @@ def fixed_ip_get(context, id, get_network=False):
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
- if is_user_context(context) and result['instance_uuid'] is not None:
+ if (nova.context.is_user_context(context) and
+ result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
- authorize_project_context(context, instance.project_id)
+ nova.context.authorize_project_context(context, instance.project_id)
return result
@@ -1157,11 +1143,12 @@ def fixed_ip_get_by_address(context, address, session=None):
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
- if is_user_context(context) and result['instance_uuid'] is not None:
+ if (nova.context.is_user_context(context) and
+ result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'],
session)
- authorize_project_context(context, instance.project_id)
+ nova.context.authorize_project_context(context, instance.project_id)
return result
@@ -1262,7 +1249,7 @@ def virtual_interface_create(context, values):
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
- except db_session.DBError:
+ except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
@@ -1966,7 +1953,7 @@ def key_pair_create(context, values):
@require_context
def key_pair_destroy(context, user_id, name):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
@@ -1975,7 +1962,7 @@ def key_pair_destroy(context, user_id, name):
@require_context
def key_pair_get(context, user_id, name):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
@@ -1989,14 +1976,14 @@ def key_pair_get(context, user_id, name):
@require_context
def key_pair_get_all_by_user(context, user_id):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
@@ -2077,7 +2064,7 @@ def network_create_safe(context, values):
try:
network_ref.save()
return network_ref
- except db_session.DBDuplicateEntry:
+ except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@@ -2319,7 +2306,7 @@ def network_update(context, network_id, values):
network_ref.update(values)
try:
network_ref.save(session=session)
- except db_session.DBDuplicateEntry:
+ except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
@@ -2365,7 +2352,7 @@ def quota_get(context, project_id, resource):
@require_context
def quota_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -2417,7 +2404,7 @@ def quota_class_get(context, class_name, resource):
@require_context
def quota_class_get_all_by_name(context, class_name):
- authorize_quota_class_context(context, class_name)
+ nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
@@ -2469,7 +2456,7 @@ def quota_usage_get(context, project_id, resource):
@require_context
def quota_usage_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -2816,7 +2803,7 @@ def volume_get_iscsi_target_num(context, volume_id):
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
- """Create ec2 compatable volume by provided uuid."""
+ """Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
@@ -2853,7 +2840,7 @@ def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
- """Create ec2 compatable snapshot by provided uuid."""
+ """Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
@@ -3168,7 +3155,7 @@ def security_group_destroy(context, security_group_id):
@require_context
def security_group_count_by_project(context, project_id, session=None):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
@@ -3545,7 +3532,7 @@ def instance_type_create(context, values):
instance_type_ref.update(values)
instance_type_ref.save(session=session)
except Exception, e:
- raise db_session.DBError(e)
+ raise db_exc.DBError(e)
return _dict_with_extra_specs(instance_type_ref)
@@ -3974,6 +3961,7 @@ def bw_usage_get_by_uuids(context, uuids, start_period):
@require_context
+@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None,
session=None):
@@ -4206,7 +4194,7 @@ def s3_image_create(context, image_uuid):
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception, e:
- raise db_session.DBError(e)
+ raise db_exc.DBError(e)
return s3_image_ref
@@ -4637,7 +4625,7 @@ def action_event_get_by_id(context, action_id, event_id):
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
- """Create ec2 compatable instance by provided uuid."""
+ """Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
@@ -4726,7 +4714,7 @@ def task_log_begin_task(context, task_name, period_beginning, period_ending,
task.task_items = task_items
try:
task.save()
- except db_session.DBDuplicateEntry:
+ except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
index a208aecf6..c30cdecdc 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
@@ -50,6 +50,5 @@ def downgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
- node = Column('node', String(length=255))
- instances.drop_column(node)
+ instances.drop_column('node')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py
index 97b0f7bb0..692f9599b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py
@@ -1,4 +1,4 @@
-# Copyright 2012 OpenSmigrations.ck LLC.
+# Copyright 2012 Openstack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -16,6 +16,29 @@ from sqlalchemy import and_, Index, String, Column, MetaData, Table
from sqlalchemy.sql.expression import select, update
+def _drop_index(engine, table, idx_name):
+ """Drop index from DB and remove index from SQLAlchemy table metadata.
+
+ idx.drop() in SQLAlchemy will issue a DROP INDEX statement to the DB but
+ WILL NOT update the table metadata to remove the `Index` object.
+
+ This can cause subsequent drop column calls on a related column to fail
+ because `drop_column` will see an `Index` object that isn't there, thus
+ issuing an erroneous second DROP INDEX call.
+
+ The solution is to update the table metadata to reflect the now dropped
+ column.
+ """
+ for idx in getattr(table, 'indexes'):
+ if idx.name == idx_name:
+ break
+ else:
+ raise Exception("Index '%s' not found!" % idx_name)
+
+ idx.drop(engine)
+ table.indexes.remove(idx)
+
+
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
@@ -52,15 +75,14 @@ def downgrade(migrate_engine):
migrations = Table('migrations', meta, autoload=True)
- # drop new columns:
- source_node = Column('source_node', String(length=255))
- migrations.drop_column(source_node)
+ # drop new index:
+ _drop_index(migrate_engine, migrations,
+ 'migrations_by_host_nodes_and_status_idx')
- dest_node = Column('dest_node', String(length=255))
- migrations.drop_column(dest_node)
+ # drop new columns:
+ migrations.drop_column('source_node')
- # drop new index:
- _drop_new_index(migrations, migrate_engine)
+ migrations.drop_column('dest_node')
# re-add old index:
i = _old_index(migrations)
@@ -110,20 +132,6 @@ def _add_new_index(migrations, migrate_engine):
i.create(migrate_engine)
-def _drop_new_index(migrations, migrate_engine):
- if migrate_engine.name == "mysql":
- sql = ("drop index migrations_by_host_nodes_and_status_idx on "
- "migrations")
- migrate_engine.execute(sql)
-
- else:
- i = Index('migrations_by_host_nodes_and_status_idx',
- migrations.c.deleted, migrations.c.source_compute,
- migrations.c.dest_compute, migrations.c.source_node,
- migrations.c.dest_node, migrations.c.status)
- i.drop(migrate_engine)
-
-
def _old_index(migrations):
i = Index('migrations_by_host_and_status_idx', migrations.c.deleted,
migrations.c.source_compute, migrations.c.dest_compute,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
index 04f31ce5f..f1cfaf5c6 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
@@ -53,5 +53,6 @@ def downgrade(migrate_engine):
[aggregate_metadata.c.value]).where(aggregates.c.id ==
aggregate_metadata.c.aggregate_id).where(aggregate_metadata.c.key ==
'availability_zone')).execute()
- delete(aggregate_metadata, aggregate_metadata.c.key == 'availability_zone')
+ delete(aggregate_metadata,
+ aggregate_metadata.c.key == 'availability_zone').execute()
aggregates.c.availability_zone.alter(nullable=False)
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 743550929..1b3d0474c 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova jenkins.nova.propose.translation.update.162\n"
+"Project-Id-Version: nova jenkins.nova.propose.translation.update.169\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2013-02-18 00:03+0000\n"
+"POT-Creation-Date: 2013-02-25 00:04+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -17,17 +17,17 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 0.9.6\n"
-#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:97
+#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:98
#, python-format
msgid "block_device_list %s"
msgstr ""
-#: nova/context.py:62
+#: nova/context.py:63
#, python-format
msgid "Arguments dropped when creating context: %s"
msgstr ""
-#: nova/context.py:101
+#: nova/context.py:102
#, python-format
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
@@ -97,7 +97,7 @@ msgstr ""
msgid "An unknown exception occurred."
msgstr ""
-#: nova/exception.py:144 nova/openstack/common/rpc/common.py:89
+#: nova/exception.py:144 nova/openstack/common/rpc/common.py:90
msgid "Exception in string format operation"
msgstr ""
@@ -174,8 +174,8 @@ msgstr ""
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:234 nova/api/ec2/cloud.py:461
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2639
+#: nova/exception.py:234 nova/api/ec2/cloud.py:463
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2691
msgid "Keypair data is invalid"
msgstr ""
@@ -191,7 +191,7 @@ msgstr ""
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:250 nova/api/openstack/compute/servers.py:1331
+#: nova/exception.py:250 nova/api/openstack/compute/servers.py:1330
#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
@@ -205,7 +205,7 @@ msgstr ""
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:262 nova/api/ec2/cloud.py:615
+#: nova/exception.py:262 nova/api/ec2/cloud.py:617
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
@@ -220,7 +220,7 @@ msgstr ""
msgid "Invalid cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:274 nova/openstack/common/db/sqlalchemy/session.py:353
+#: nova/exception.py:274 nova/openstack/common/db/sqlalchemy/session.py:345
msgid "Invalid Parameter: Unicode is not supported by the current database."
msgstr ""
@@ -295,70 +295,74 @@ msgid "Failed to terminate instance"
msgstr ""
#: nova/exception.py:339
-msgid "Service is unavailable at this time."
+msgid "Failed to deploy instance"
msgstr ""
#: nova/exception.py:343
-msgid "Insufficient compute resources."
+msgid "Service is unavailable at this time."
msgstr ""
#: nova/exception.py:347
+msgid "Insufficient compute resources."
+msgstr ""
+
+#: nova/exception.py:351
#, python-format
msgid "Compute service of %(host)s is unavailable at this time."
msgstr ""
-#: nova/exception.py:351
+#: nova/exception.py:355
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
-#: nova/exception.py:356
+#: nova/exception.py:360
msgid "The supplied hypervisor type of is invalid."
msgstr ""
-#: nova/exception.py:360
+#: nova/exception.py:364
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
-#: nova/exception.py:365
+#: nova/exception.py:369
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
msgstr ""
-#: nova/exception.py:370
+#: nova/exception.py:374
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr ""
-#: nova/exception.py:374
+#: nova/exception.py:378
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:378
+#: nova/exception.py:382
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:382
+#: nova/exception.py:386
msgid "Unacceptable CPU info"
msgstr ""
-#: nova/exception.py:386
+#: nova/exception.py:390
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:390
+#: nova/exception.py:394
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:396
+#: nova/exception.py:400
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -366,85 +370,85 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:403
+#: nova/exception.py:407
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:407
+#: nova/exception.py:411
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:411
+#: nova/exception.py:415
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:415
+#: nova/exception.py:419
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:419
+#: nova/exception.py:423
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:423
+#: nova/exception.py:427
#, python-format
msgid "Unexpected argument for periodic task creation: %(arg)s."
msgstr ""
-#: nova/exception.py:427
+#: nova/exception.py:431
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:432
+#: nova/exception.py:436
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:437
+#: nova/exception.py:441
#, python-format
msgid "No agent-build associated with id %(id)s."
msgstr ""
-#: nova/exception.py:441
+#: nova/exception.py:445
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:445
+#: nova/exception.py:449
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:449
+#: nova/exception.py:453
#, python-format
msgid "No target id found for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:453
+#: nova/exception.py:457
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:457
+#: nova/exception.py:461
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:461
+#: nova/exception.py:465
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:465
+#: nova/exception.py:469
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:469
+#: nova/exception.py:473
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -452,767 +456,802 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:476
+#: nova/exception.py:480
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:480
+#: nova/exception.py:484
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:484
+#: nova/exception.py:488
#, python-format
msgid "Network %(network_id)s is duplicated."
msgstr ""
-#: nova/exception.py:488
+#: nova/exception.py:492
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:492
+#: nova/exception.py:496
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:496
+#: nova/exception.py:500
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:500
+#: nova/exception.py:504
+#, python-format
+msgid "Port id %(port_id)s could not be found."
+msgstr ""
+
+#: nova/exception.py:508
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:504
+#: nova/exception.py:512
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:508
+#: nova/exception.py:516
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:512
+#: nova/exception.py:520
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:516
+#: nova/exception.py:524
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:520
+#: nova/exception.py:528
#, python-format
msgid ""
"Either Network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:525
+#: nova/exception.py:533
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:529
+#: nova/exception.py:537
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:533
-#, python-format
-msgid "Port %(port_id)s could not be found."
-msgstr ""
-
-#: nova/exception.py:537
+#: nova/exception.py:541
#, python-format
msgid "Port %(port_id)s not usable for instance %(instance)s."
msgstr ""
-#: nova/exception.py:541
+#: nova/exception.py:545
#, python-format
msgid "No free port available for instance %(instance)s."
msgstr ""
-#: nova/exception.py:545
+#: nova/exception.py:549
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:549
+#: nova/exception.py:553
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:553
+#: nova/exception.py:557
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:557
+#: nova/exception.py:561
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:562
+#: nova/exception.py:566
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:566
+#: nova/exception.py:570
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:571
+#: nova/exception.py:575
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:576
+#: nova/exception.py:580
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:581
+#: nova/exception.py:585
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:585
+#: nova/exception.py:589
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:589
+#: nova/exception.py:593
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:598
+#: nova/exception.py:602
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:602
+#: nova/exception.py:606
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:606
+#: nova/exception.py:610
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:610
+#: nova/exception.py:614
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:614
+#: nova/exception.py:618
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:618
+#: nova/exception.py:622
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:622
+#: nova/exception.py:626
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:627
+#: nova/exception.py:631
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:632
+#: nova/exception.py:636
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:636
+#: nova/exception.py:640
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:640
+#: nova/exception.py:644
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:644
+#: nova/exception.py:648
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:648
+#: nova/exception.py:652
msgid "Cannot disassociate auto assigined floating ip"
msgstr ""
-#: nova/exception.py:652
+#: nova/exception.py:656
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:656
+#: nova/exception.py:660
#, python-format
msgid "Certificate %(certificate_id)s not found."
msgstr ""
-#: nova/exception.py:660
+#: nova/exception.py:664
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:664
+#: nova/exception.py:668
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:668
+#: nova/exception.py:672
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:672
+#: nova/exception.py:676
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:676
+#: nova/exception.py:680
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:680
+#: nova/exception.py:684
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:685
+#: nova/exception.py:689
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:689
+#: nova/exception.py:693
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:693
+#: nova/exception.py:697
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:697
+#: nova/exception.py:701
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:701
+#: nova/exception.py:705
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:705
+#: nova/exception.py:709
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:709
+#: nova/exception.py:713
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:713
+#: nova/exception.py:717
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:717
+#: nova/exception.py:721
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:722
+#: nova/exception.py:726
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:726
+#: nova/exception.py:730
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:731
+#: nova/exception.py:735
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:736
+#: nova/exception.py:740
+#, python-format
+msgid "Security group default rule (%rule_id)s not found."
+msgstr ""
+
+#: nova/exception.py:744
+msgid ""
+"Network requires port_security_enabled and subnet associated in order to "
+"apply security groups."
+msgstr ""
+
+#: nova/exception.py:749
+msgid "No Unique Match Found."
+msgstr ""
+
+#: nova/exception.py:754
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:740
+#: nova/exception.py:758
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:745
+#: nova/exception.py:763
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:749
+#: nova/exception.py:767
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:755
+#: nova/exception.py:773
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:759
+#: nova/exception.py:777
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:763
+#: nova/exception.py:781
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:768
+#: nova/exception.py:786
#, python-format
msgid "Invalid console type %(console_type)s"
msgstr ""
-#: nova/exception.py:772
+#: nova/exception.py:790
#, python-format
msgid "Instance type %(instance_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:776
+#: nova/exception.py:794
#, python-format
msgid "Instance type with name %(instance_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:781
+#: nova/exception.py:799
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:785
+#: nova/exception.py:803
#, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
-#: nova/exception.py:790
+#: nova/exception.py:808
#, python-format
msgid "Cell %(cell_name)s doesn't exist."
msgstr ""
-#: nova/exception.py:794
+#: nova/exception.py:812
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
-#: nova/exception.py:798
+#: nova/exception.py:816
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr ""
-#: nova/exception.py:802
+#: nova/exception.py:820
msgid "Timeout waiting for response from cell"
msgstr ""
-#: nova/exception.py:806
+#: nova/exception.py:824
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:828
msgid "No cells available matching scheduling criteria."
msgstr ""
-#: nova/exception.py:814
+#: nova/exception.py:832
#, python-format
msgid "Exception received during cell processing: %(exc_name)s."
msgstr ""
-#: nova/exception.py:818
+#: nova/exception.py:836
#, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:822
+#: nova/exception.py:840
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:826
+#: nova/exception.py:844
#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:831
+#: nova/exception.py:849
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:835
+#: nova/exception.py:853
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:840
+#: nova/exception.py:858
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:845
+#: nova/exception.py:863
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:850
+#: nova/exception.py:868
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:854
+#: nova/exception.py:872
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:858
+#: nova/exception.py:876
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:863
+#: nova/exception.py:881
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:867
+#: nova/exception.py:885
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:871
+#: nova/exception.py:889
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:875
+#: nova/exception.py:893
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:879
+#: nova/exception.py:897
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:883
+#: nova/exception.py:901
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:887
+#: nova/exception.py:905
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:891
+#: nova/exception.py:909
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:895
+#: nova/exception.py:913
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:899
+#: nova/exception.py:917
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:904
+#: nova/exception.py:922
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:908
+#: nova/exception.py:926
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:912
+#: nova/exception.py:930
msgid "Migration error"
msgstr ""
-#: nova/exception.py:916
+#: nova/exception.py:934
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:922
+#: nova/exception.py:940
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:926
+#: nova/exception.py:944
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:930
+#: nova/exception.py:948
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:934
+#: nova/exception.py:952
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:938
+#: nova/exception.py:956
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:942
+#: nova/exception.py:960
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:946
+#: nova/exception.py:964
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:950
+#: nova/exception.py:968
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:954
+#: nova/exception.py:972
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:958
+#: nova/exception.py:976
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:962
+#: nova/exception.py:980
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:969
+#: nova/exception.py:987
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:974
+#: nova/exception.py:992
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:978
+#: nova/exception.py:996
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:982
+#: nova/exception.py:1000
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:986
+#: nova/exception.py:1004
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:990
+#: nova/exception.py:1008
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:994
+#: nova/exception.py:1012
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:998
+#: nova/exception.py:1016
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1002
+#: nova/exception.py:1020
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1007
+#: nova/exception.py:1025
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1011
+#: nova/exception.py:1029
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1015
+#: nova/exception.py:1033
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1019
+#: nova/exception.py:1037
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1024
+#: nova/exception.py:1042
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1028
+#: nova/exception.py:1046
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:1032
+#: nova/exception.py:1050
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1038
+#: nova/exception.py:1056
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1042
+#: nova/exception.py:1060
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1046
+#: nova/exception.py:1064
+#, python-format
+msgid "Node %(node_id)s could not be found."
+msgstr ""
+
+#: nova/exception.py:1068
+#, python-format
+msgid "Node with UUID %(node_uuid)s could not be found."
+msgstr ""
+
+#: nova/exception.py:1072
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1050
+#: nova/exception.py:1076
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1054
+#: nova/exception.py:1080
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1058
+#: nova/exception.py:1084
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1062
+#: nova/exception.py:1088
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1066
+#: nova/exception.py:1092
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1070
+#: nova/exception.py:1096
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1075
+#: nova/exception.py:1101
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1080
+#: nova/exception.py:1106
+#, python-format
+msgid "Failed to attach network adapter device to %(instance)s"
+msgstr ""
+
+#: nova/exception.py:1110
+#, python-format
+msgid "Failed to detach network adapter device from %(instance)s"
+msgstr ""
+
+#: nova/exception.py:1114
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1086
+#: nova/exception.py:1120
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1090
+#: nova/exception.py:1124
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1095
+#: nova/exception.py:1129
#, python-format
msgid ""
"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
"found"
msgstr ""
-#: nova/exception.py:1100
+#: nova/exception.py:1134
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr ""
-#: nova/exception.py:1104
+#: nova/exception.py:1138
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1108
+#: nova/exception.py:1142
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1112
+#: nova/exception.py:1146
msgid "Instance recreate is not implemented by this virt driver."
msgstr ""
-#: nova/exception.py:1116
+#: nova/exception.py:1150
#, python-format
msgid "The service from servicegroup driver %(driver) is temporarily unavailable."
msgstr ""
-#: nova/exception.py:1121
+#: nova/exception.py:1155
#, python-format
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
-#: nova/exception.py:1126
+#: nova/exception.py:1160
#, python-format
msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
msgstr ""
-#: nova/exception.py:1131
+#: nova/exception.py:1165
#, python-format
msgid ""
"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
@@ -1229,43 +1268,43 @@ msgstr ""
msgid "Running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/manager.py:161
+#: nova/manager.py:171
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr ""
-#: nova/manager.py:166
+#: nova/manager.py:176
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr ""
-#: nova/manager.py:225
+#: nova/manager.py:237
#, python-format
msgid "Running periodic task %(full_task_name)s"
msgstr ""
-#: nova/manager.py:233
+#: nova/manager.py:245
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: nova/manager.py:306
+#: nova/manager.py:318
msgid "Notifying Schedulers of capabilities ..."
msgstr ""
-#: nova/notifications.py:111 nova/notifications.py:151
+#: nova/notifications.py:112 nova/notifications.py:152
msgid "Failed to send state update notification"
msgstr ""
-#: nova/notifications.py:229
+#: nova/notifications.py:230
msgid "Failed to get nw_info"
msgstr ""
-#: nova/policy.py:31
+#: nova/policy.py:32
msgid "JSON file representing policy"
msgstr ""
-#: nova/policy.py:34
+#: nova/policy.py:35
msgid "Rule checked when requested rule is not found"
msgstr ""
@@ -1383,113 +1422,114 @@ msgstr ""
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:196
+#: nova/utils.py:201 nova/openstack/common/processutils.py:90
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:207
+#: nova/utils.py:212 nova/openstack/common/processutils.py:99
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:231 nova/utils.py:309 nova/virt/powervm/common.py:86
+#: nova/utils.py:236 nova/utils.py:314
+#: nova/openstack/common/processutils.py:114 nova/virt/powervm/common.py:86
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:244
+#: nova/utils.py:249 nova/openstack/common/processutils.py:128
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:284
+#: nova/utils.py:289
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:286
+#: nova/utils.py:291
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:290
+#: nova/utils.py:295
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:325
+#: nova/utils.py:330
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:487
+#: nova/utils.py:492
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:490
+#: nova/utils.py:495
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:529
+#: nova/utils.py:534
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:601
+#: nova/utils.py:606
msgid "in fixed duration looping call"
msgstr ""
-#: nova/utils.py:636
+#: nova/utils.py:641
#, python-format
msgid "Periodic task processor sleeping for %.02f seconds"
msgstr ""
-#: nova/utils.py:643
+#: nova/utils.py:648
msgid "in dynamic looping call"
msgstr ""
-#: nova/utils.py:697
+#: nova/utils.py:742
#, python-format
msgid "Unknown byte multiplier: %s"
msgstr ""
-#: nova/utils.py:826
+#: nova/utils.py:871
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:855
+#: nova/utils.py:900
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:987
+#: nova/utils.py:1054
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:1053
+#: nova/utils.py:1120
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1185 nova/virt/configdrive.py:176
+#: nova/utils.py:1252 nova/virt/configdrive.py:177
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: nova/utils.py:1363
+#: nova/utils.py:1430
#, python-format
msgid "%s is not a string or unicode"
msgstr ""
-#: nova/utils.py:1367
+#: nova/utils.py:1434
#, python-format
msgid "%(name)s has less than %(min_length)s characters."
msgstr ""
-#: nova/utils.py:1372
+#: nova/utils.py:1439
#, python-format
msgid "%(name)s has more than %(max_length)s characters."
msgstr ""
@@ -1550,11 +1590,12 @@ msgstr ""
msgid "Sourcing roles from deprecated X-Role HTTP header"
msgstr ""
-#: nova/api/sizelimit.py:50 nova/api/metadata/password.py:62
+#: nova/api/sizelimit.py:55 nova/api/sizelimit.py:64 nova/api/sizelimit.py:78
+#: nova/api/metadata/password.py:62
msgid "Request is too large."
msgstr ""
-#: nova/api/validator.py:138
+#: nova/api/validator.py:140
#, python-format
msgid "%(key)s with value %(value)s failed validator %(name)s"
msgstr ""
@@ -1701,235 +1742,235 @@ msgstr ""
msgid "Unsupported API request: controller = %(controller)s, action = %(action)s"
msgstr ""
-#: nova/api/ec2/cloud.py:389
+#: nova/api/ec2/cloud.py:391
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:415
+#: nova/api/ec2/cloud.py:417
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:432
+#: nova/api/ec2/cloud.py:434
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:439 nova/api/ec2/cloud.py:458
+#: nova/api/ec2/cloud.py:441 nova/api/ec2/cloud.py:460
#: nova/api/openstack/compute/contrib/keypairs.py:93
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/ec2/cloud.py:448
+#: nova/api/ec2/cloud.py:450
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:468
+#: nova/api/ec2/cloud.py:470
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:602 nova/api/ec2/cloud.py:723
+#: nova/api/ec2/cloud.py:604 nova/api/ec2/cloud.py:725
msgid "Not enough parameters, need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:607
+#: nova/api/ec2/cloud.py:609
#, python-format
msgid "%s Not enough parameters to build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:645 nova/api/ec2/cloud.py:677
+#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:679
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:668
+#: nova/api/ec2/cloud.py:670
#, python-format
msgid "%s - This rule already exists in group"
msgstr ""
-#: nova/api/ec2/cloud.py:751
+#: nova/api/ec2/cloud.py:753
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:827
+#: nova/api/ec2/cloud.py:829
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:831 nova/api/openstack/compute/contrib/volumes.py:244
+#: nova/api/ec2/cloud.py:833 nova/api/openstack/compute/contrib/volumes.py:243
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:859
+#: nova/api/ec2/cloud.py:861
msgid "Delete Failed"
msgstr ""
-#: nova/api/ec2/cloud.py:872
+#: nova/api/ec2/cloud.py:874
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:880
+#: nova/api/ec2/cloud.py:882
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:893 nova/api/openstack/compute/contrib/volumes.py:436
+#: nova/api/ec2/cloud.py:895 nova/api/openstack/compute/contrib/volumes.py:435
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:899
+#: nova/api/ec2/cloud.py:901
msgid "Detach Volume Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:925 nova/api/ec2/cloud.py:982
-#: nova/api/ec2/cloud.py:1535 nova/api/ec2/cloud.py:1550
+#: nova/api/ec2/cloud.py:927 nova/api/ec2/cloud.py:984
+#: nova/api/ec2/cloud.py:1537 nova/api/ec2/cloud.py:1552
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1056
+#: nova/api/ec2/cloud.py:1058
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1215
+#: nova/api/ec2/cloud.py:1217
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1219
+#: nova/api/ec2/cloud.py:1221
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1223
+#: nova/api/ec2/cloud.py:1225
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1228
+#: nova/api/ec2/cloud.py:1230
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1231
+#: nova/api/ec2/cloud.py:1233
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1239
+#: nova/api/ec2/cloud.py:1241
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1247
+#: nova/api/ec2/cloud.py:1249
#: nova/api/openstack/compute/contrib/floating_ips.py:238
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1256
+#: nova/api/ec2/cloud.py:1258
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1259
+#: nova/api/ec2/cloud.py:1261
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1262
+#: nova/api/ec2/cloud.py:1264
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1270
+#: nova/api/ec2/cloud.py:1272
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1275
+#: nova/api/ec2/cloud.py:1277
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1278
+#: nova/api/ec2/cloud.py:1280
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1305
+#: nova/api/ec2/cloud.py:1307
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1337
+#: nova/api/ec2/cloud.py:1339
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1347
+#: nova/api/ec2/cloud.py:1349
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1356
+#: nova/api/ec2/cloud.py:1358
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1365
+#: nova/api/ec2/cloud.py:1367
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1456
+#: nova/api/ec2/cloud.py:1458
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1472
+#: nova/api/ec2/cloud.py:1474
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1491
+#: nova/api/ec2/cloud.py:1493
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1553
+#: nova/api/ec2/cloud.py:1555
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1555
+#: nova/api/ec2/cloud.py:1557
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1557
+#: nova/api/ec2/cloud.py:1559
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1559
+#: nova/api/ec2/cloud.py:1561
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1572
+#: nova/api/ec2/cloud.py:1574
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1601
+#: nova/api/ec2/cloud.py:1603
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1631
+#: nova/api/ec2/cloud.py:1633
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1649
+#: nova/api/ec2/cloud.py:1651
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1682
+#: nova/api/ec2/cloud.py:1684
msgid "Invalid CIDR"
msgstr ""
@@ -1983,7 +2024,7 @@ msgstr ""
msgid "Caught error: %s"
msgstr ""
-#: nova/api/openstack/__init__.py:60 nova/api/openstack/wsgi.py:992
+#: nova/api/openstack/__init__.py:60 nova/api/openstack/wsgi.py:993
#, python-format
msgid "%(url)s returned with HTTP %(status)d"
msgstr ""
@@ -2032,7 +2073,7 @@ msgstr ""
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:540
+#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:539
#, python-format
msgid "marker [%s] not found"
msgstr ""
@@ -2136,57 +2177,57 @@ msgstr ""
msgid "Failed to load extension %(ext_name)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:199 nova/api/openstack/wsgi.py:621
+#: nova/api/openstack/wsgi.py:200 nova/api/openstack/wsgi.py:622
msgid "cannot understand JSON"
msgstr ""
-#: nova/api/openstack/wsgi.py:223
+#: nova/api/openstack/wsgi.py:224
#: nova/api/openstack/compute/contrib/cells.py:104
#: nova/api/openstack/compute/contrib/hosts.py:77
msgid "cannot understand XML"
msgstr ""
-#: nova/api/openstack/wsgi.py:626
+#: nova/api/openstack/wsgi.py:627
msgid "too many body keys"
msgstr ""
-#: nova/api/openstack/wsgi.py:669
+#: nova/api/openstack/wsgi.py:670
#, python-format
msgid "Exception handling resource: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:673
+#: nova/api/openstack/wsgi.py:674
#, python-format
msgid "Fault thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:676
+#: nova/api/openstack/wsgi.py:677
#, python-format
msgid "HTTP exception thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:790
+#: nova/api/openstack/wsgi.py:791
msgid "Unrecognized Content-Type provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:794
+#: nova/api/openstack/wsgi.py:795
msgid "No Content-Type provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:798
+#: nova/api/openstack/wsgi.py:799
msgid "Empty body provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:899
+#: nova/api/openstack/wsgi.py:900
msgid "Invalid XML in request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:918
+#: nova/api/openstack/wsgi.py:919
#, python-format
msgid "There is no such action: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:921 nova/api/openstack/wsgi.py:938
+#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:939
#: nova/api/openstack/compute/server_metadata.py:58
#: nova/api/openstack/compute/server_metadata.py:76
#: nova/api/openstack/compute/server_metadata.py:101
@@ -2196,30 +2237,30 @@ msgstr ""
msgid "Malformed request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:925
+#: nova/api/openstack/wsgi.py:926
#, python-format
msgid "Action: '%(action)s', body: %(body)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:926
+#: nova/api/openstack/wsgi.py:927
#, python-format
msgid "Calling method %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:935
+#: nova/api/openstack/wsgi.py:936
msgid "Unsupported Content-Type"
msgstr ""
-#: nova/api/openstack/wsgi.py:947
+#: nova/api/openstack/wsgi.py:948
msgid "Malformed request url"
msgstr ""
-#: nova/api/openstack/wsgi.py:995
+#: nova/api/openstack/wsgi.py:996
#, python-format
msgid "%(url)s returned a fault: %(e)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:1188
+#: nova/api/openstack/wsgi.py:1189
#, python-format
msgid ""
"API request failed, fault raised to the top of the stack. Detailed "
@@ -2243,7 +2284,7 @@ msgstr ""
msgid "subclasses must implement construct()!"
msgstr ""
-#: nova/api/openstack/compute/extensions.py:38
+#: nova/api/openstack/compute/extensions.py:39
msgid "Initializing extension manager."
msgstr ""
@@ -2264,7 +2305,7 @@ msgstr ""
#: nova/api/openstack/compute/image_metadata.py:36
#: nova/api/openstack/compute/images.py:144
-#: nova/api/openstack/compute/images.py:159
+#: nova/api/openstack/compute/images.py:160
msgid "Image not found."
msgstr ""
@@ -2318,233 +2359,233 @@ msgstr ""
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:505
+#: nova/api/openstack/compute/servers.py:504
#: nova/api/openstack/compute/contrib/cells.py:278
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:524
+#: nova/api/openstack/compute/servers.py:523
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:543
+#: nova/api/openstack/compute/servers.py:542
#, python-format
msgid "Flavor '%s' could not be found "
msgstr ""
-#: nova/api/openstack/compute/servers.py:560
-#: nova/api/openstack/compute/servers.py:731
-#: nova/api/openstack/compute/servers.py:1003
-#: nova/api/openstack/compute/servers.py:1109
-#: nova/api/openstack/compute/servers.py:1282
+#: nova/api/openstack/compute/servers.py:559
+#: nova/api/openstack/compute/servers.py:730
+#: nova/api/openstack/compute/servers.py:1002
+#: nova/api/openstack/compute/servers.py:1108
+#: nova/api/openstack/compute/servers.py:1281
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:579
+#: nova/api/openstack/compute/servers.py:578
msgid "Device name cannot include spaces."
msgstr ""
-#: nova/api/openstack/compute/servers.py:596
+#: nova/api/openstack/compute/servers.py:595
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:599
+#: nova/api/openstack/compute/servers.py:598
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:602
+#: nova/api/openstack/compute/servers.py:601
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:633
+#: nova/api/openstack/compute/servers.py:632
msgid "Unknown argment : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:636
+#: nova/api/openstack/compute/servers.py:635
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:646
+#: nova/api/openstack/compute/servers.py:645
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:656
+#: nova/api/openstack/compute/servers.py:655
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:669
+#: nova/api/openstack/compute/servers.py:668
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:675
+#: nova/api/openstack/compute/servers.py:674
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:678
+#: nova/api/openstack/compute/servers.py:677
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:704
+#: nova/api/openstack/compute/servers.py:703
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:711
+#: nova/api/openstack/compute/servers.py:710
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:718
+#: nova/api/openstack/compute/servers.py:717
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:747
+#: nova/api/openstack/compute/servers.py:746
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:795
-#: nova/api/openstack/compute/servers.py:909
+#: nova/api/openstack/compute/servers.py:794
+#: nova/api/openstack/compute/servers.py:908
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:836
+#: nova/api/openstack/compute/servers.py:835
msgid "min_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:839
+#: nova/api/openstack/compute/servers.py:838
msgid "min_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:845
+#: nova/api/openstack/compute/servers.py:844
msgid "max_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:848
+#: nova/api/openstack/compute/servers.py:847
msgid "max_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:852
+#: nova/api/openstack/compute/servers.py:851
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/openstack/compute/servers.py:904
+#: nova/api/openstack/compute/servers.py:903
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:912
+#: nova/api/openstack/compute/servers.py:911
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:991
+#: nova/api/openstack/compute/servers.py:990
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:995
+#: nova/api/openstack/compute/servers.py:994
msgid "Personality cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1021
-#: nova/api/openstack/compute/servers.py:1041
+#: nova/api/openstack/compute/servers.py:1020
+#: nova/api/openstack/compute/servers.py:1040
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1027
+#: nova/api/openstack/compute/servers.py:1026
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1044
+#: nova/api/openstack/compute/servers.py:1043
msgid "Flavor used by the instance could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1050
+#: nova/api/openstack/compute/servers.py:1049
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1063
+#: nova/api/openstack/compute/servers.py:1062
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1067
+#: nova/api/openstack/compute/servers.py:1066
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1080
+#: nova/api/openstack/compute/servers.py:1079
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1092
+#: nova/api/openstack/compute/servers.py:1091
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1095
+#: nova/api/openstack/compute/servers.py:1094
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1119
+#: nova/api/openstack/compute/servers.py:1118
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1128
+#: nova/api/openstack/compute/servers.py:1127
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1155
+#: nova/api/openstack/compute/servers.py:1154
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1168
+#: nova/api/openstack/compute/servers.py:1167
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1172
-#: nova/api/openstack/compute/servers.py:1379
+#: nova/api/openstack/compute/servers.py:1171
+#: nova/api/openstack/compute/servers.py:1378
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1178
+#: nova/api/openstack/compute/servers.py:1177
msgid "Unable to set password on instance"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1187
+#: nova/api/openstack/compute/servers.py:1186
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1200
+#: nova/api/openstack/compute/servers.py:1199
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1203
+#: nova/api/openstack/compute/servers.py:1202
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1221
+#: nova/api/openstack/compute/servers.py:1220
#: nova/api/openstack/compute/contrib/aggregates.py:143
#: nova/api/openstack/compute/contrib/coverage_ext.py:256
#: nova/api/openstack/compute/contrib/keypairs.py:78
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1227
+#: nova/api/openstack/compute/servers.py:1226
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1289
+#: nova/api/openstack/compute/servers.py:1288
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1322
+#: nova/api/openstack/compute/servers.py:1321
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1406
+#: nova/api/openstack/compute/servers.py:1405
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
@@ -2582,7 +2623,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/admin_actions.py:154
#: nova/api/openstack/compute/contrib/admin_actions.py:170
#: nova/api/openstack/compute/contrib/admin_actions.py:186
-#: nova/api/openstack/compute/contrib/admin_actions.py:314
+#: nova/api/openstack/compute/contrib/admin_actions.py:318
#: nova/api/openstack/compute/contrib/multinic.py:41
#: nova/api/openstack/compute/contrib/rescue.py:44
msgid "Server not found"
@@ -2631,17 +2672,22 @@ msgstr ""
msgid "host and block_migration must be specified."
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:288
+#: nova/api/openstack/compute/contrib/admin_actions.py:289
+#, python-format
+msgid "Live migration of instance %(id)s to another host failed"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:292
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:306
+#: nova/api/openstack/compute/contrib/admin_actions.py:310
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:317
+#: nova/api/openstack/compute/contrib/admin_actions.py:321
#, python-format
msgid "Compute.api::resetState %s"
msgstr ""
@@ -2683,6 +2729,29 @@ msgstr ""
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr ""
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:102
+msgid "Attach interface"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:109
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:142
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:165
+msgid "Network driver does not support this function."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:113
+msgid "Failed to attach interface"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:121
+msgid "Attachments update is not supported"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:132
+#, python-format
+msgid "Detach interface %s"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/baremetal_nodes.py:184
msgid "Must specify id or address"
msgstr ""
@@ -2716,7 +2785,7 @@ msgstr ""
msgid "Only root certificate can be retrieved."
msgstr ""
-#: nova/api/openstack/compute/contrib/cloudpipe.py:150
+#: nova/api/openstack/compute/contrib/cloudpipe.py:151
msgid ""
"Unable to claim IP for VPN instances, ensure it isn't running, and try "
"again in a few minutes"
@@ -2857,7 +2926,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/floating_ips.py:215
#: nova/api/openstack/compute/contrib/floating_ips.py:271
-#: nova/api/openstack/compute/contrib/security_groups.py:421
+#: nova/api/openstack/compute/contrib/security_groups.py:417
msgid "Missing parameter dict"
msgstr ""
@@ -2896,7 +2965,7 @@ msgstr ""
msgid "/%s should be specified as single address(es) not in cidr format"
msgstr ""
-#: nova/api/openstack/compute/contrib/fping.py:56
+#: nova/api/openstack/compute/contrib/fping.py:55
msgid "fping utility is not found."
msgstr ""
@@ -3095,29 +3164,34 @@ msgstr ""
msgid "Malformed scheduler_hints attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:222
-msgid "Security group id should be integer"
+#: nova/api/openstack/compute/contrib/security_group_default_rules.py:129
+#: nova/api/openstack/compute/contrib/security_groups.py:328
+msgid "Not enough parameters to build a valid rule."
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:331
-msgid "Not enough parameters to build a valid rule."
+#: nova/api/openstack/compute/contrib/security_group_default_rules.py:133
+msgid "This default rule already exists."
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:339
+#: nova/api/openstack/compute/contrib/security_group_default_rules.py:154
#, python-format
-msgid "Bad prefix for network in cidr %s"
+msgid "Showing security_group_default_rule with id %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/security_group_default_rules.py:158
+msgid "security group default rule not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:343
+#: nova/api/openstack/compute/contrib/security_groups.py:336
#, python-format
-msgid "This rule already exists in group %s"
+msgid "Bad prefix for network in cidr %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:424
+#: nova/api/openstack/compute/contrib/security_groups.py:420
msgid "Security group not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:428
+#: nova/api/openstack/compute/contrib/security_groups.py:424
msgid "Security group name cannot be empty"
msgstr ""
@@ -3133,43 +3207,43 @@ msgstr ""
msgid "Invalid start time. The start time cannot occur after the end time."
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:76
+#: nova/api/openstack/compute/contrib/volumes.py:75
#, python-format
msgid "vol=%s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:188
+#: nova/api/openstack/compute/contrib/volumes.py:187
#, python-format
msgid "Delete volume with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:356
-#: nova/api/openstack/compute/contrib/volumes.py:446
+#: nova/api/openstack/compute/contrib/volumes.py:355
+#: nova/api/openstack/compute/contrib/volumes.py:445
#, python-format
msgid "Instance %s is not attached."
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:377
+#: nova/api/openstack/compute/contrib/volumes.py:376
#, python-format
msgid "Bad volumeId format: volumeId is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:396
+#: nova/api/openstack/compute/contrib/volumes.py:395
#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:560
+#: nova/api/openstack/compute/contrib/volumes.py:559
#, python-format
msgid "Delete snapshot with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:603
+#: nova/api/openstack/compute/contrib/volumes.py:602
#, python-format
msgid "Create snapshot from volume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:607
+#: nova/api/openstack/compute/contrib/volumes.py:606
#, python-format
msgid "Invalid value '%s' for force. "
msgstr ""
@@ -3270,276 +3344,294 @@ msgstr ""
msgid "Updating parents with our capacities: %(capacities)s"
msgstr ""
-#: nova/cells/scheduler.py:94
+#: nova/cells/scheduler.py:98
#, python-format
msgid "Scheduling with routing_path=%(routing_path)s"
msgstr ""
-#: nova/cells/scheduler.py:117
+#: nova/cells/scheduler.py:121
#, python-format
msgid ""
"No cells available when scheduling. Will retry in %(sleep_time)s "
"second(s)"
msgstr ""
-#: nova/cells/scheduler.py:124
+#: nova/cells/scheduler.py:128
#, python-format
msgid "Error scheduling instances %(instance_uuids)s"
msgstr ""
-#: nova/cells/state.py:263
+#: nova/cells/state.py:264
msgid "Updating cell cache from db."
msgstr ""
-#: nova/cells/state.py:308
+#: nova/cells/state.py:309
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
msgstr ""
-#: nova/cells/state.py:323
+#: nova/cells/state.py:324
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr ""
-#: nova/cloudpipe/pipelib.py:47
+#: nova/cloudpipe/pipelib.py:48
msgid "Instance type for vpn instances"
msgstr ""
-#: nova/cloudpipe/pipelib.py:50
+#: nova/cloudpipe/pipelib.py:51
msgid "Template for cloudpipe instance boot script"
msgstr ""
-#: nova/cloudpipe/pipelib.py:53
+#: nova/cloudpipe/pipelib.py:54
msgid "Network to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:56
+#: nova/cloudpipe/pipelib.py:57
msgid "Netmask to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:125
+#: nova/cloudpipe/pipelib.py:126
#, python-format
msgid "Launching VPN for %s"
msgstr ""
-#: nova/compute/api.py:273
+#: nova/compute/api.py:277
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:280
+#: nova/compute/api.py:284
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:289
+#: nova/compute/api.py:293
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:309
+#: nova/compute/api.py:313
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:319
+#: nova/compute/api.py:323
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:323
+#: nova/compute/api.py:327
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:327
+#: nova/compute/api.py:331
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:452
+#: nova/compute/api.py:436
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:476
msgid "Cannot attach one or more volumes to multiple instances"
msgstr ""
-#: nova/compute/api.py:565
+#: nova/compute/api.py:589
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:676
+#: nova/compute/api.py:701
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:703
+#: nova/compute/api.py:728
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:965
+#: nova/compute/api.py:1001
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:1099
+#: nova/compute/api.py:1135
#, python-format
msgid "instance type %(old_inst_type_id)d not found"
msgstr ""
-#: nova/compute/api.py:1105
+#: nova/compute/api.py:1141
msgid "going to delete a resizing instance"
msgstr ""
-#: nova/compute/api.py:1115
+#: nova/compute/api.py:1151
#, python-format
msgid "instance's host %s is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:1159
+#: nova/compute/api.py:1195
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:1181
+#: nova/compute/api.py:1217
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1231
+#: nova/compute/api.py:1267
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1248
+#: nova/compute/api.py:1284
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1318
+#: nova/compute/api.py:1354
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1562
+#: nova/compute/api.py:1598
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1918
+#: nova/compute/api.py:1954
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1927
+#: nova/compute/api.py:1963
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1969
+#: nova/compute/api.py:2005
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:2200
+#: nova/compute/api.py:2238
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:2208
+#: nova/compute/api.py:2246
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:2276
+#: nova/compute/api.py:2314
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2369
+#: nova/compute/api.py:2421
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2388
+#: nova/compute/api.py:2440
msgid "vm evacuation scheduled"
msgstr ""
-#: nova/compute/api.py:2392
+#: nova/compute/api.py:2444
#, python-format
msgid ""
"Instance compute service state on %(host)s expected to be down, but it "
"was up."
msgstr ""
-#: nova/compute/api.py:2612
+#: nova/compute/api.py:2664
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2616
+#: nova/compute/api.py:2668
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2717
+#: nova/compute/api.py:2773
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2720
+#: nova/compute/api.py:2776
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2728
+#: nova/compute/api.py:2784
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2734
+#: nova/compute/api.py:2790
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2754
+#: nova/compute/api.py:2810
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2757
+#: nova/compute/api.py:2813
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2764
+#: nova/compute/api.py:2820
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2829
+#: nova/compute/api.py:2885
+#, python-format
+msgid "Unable to delete system group '%s'"
+msgstr ""
+
+#: nova/compute/api.py:2890
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2837
+#: nova/compute/api.py:2898
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2840
+#: nova/compute/api.py:2901
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:3098
+#: nova/compute/api.py:3010 nova/compute/api.py:3087
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:3107
+#: nova/compute/api.py:3026
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:3110
+#: nova/compute/api.py:3029
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:3121
+#: nova/compute/api.py:3040
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
+#: nova/compute/api.py:3094
+msgid "Security group id should be integer"
+msgstr ""
+
+#: nova/compute/api.py:3099
+#, python-format
+msgid "This rule already exists in group %s"
+msgstr ""
+
#: nova/compute/claims.py:94 nova/compute/claims.py:218
#, python-format
msgid "Aborting claim: %s"
@@ -3594,728 +3686,748 @@ msgid ""
"%(requested)d %(unit)s"
msgstr ""
-#: nova/compute/instance_types.py:95
+#: nova/compute/instance_types.py:96
msgid "names can only contain [a-zA-Z0-9_.- ]"
msgstr ""
-#: nova/compute/instance_types.py:104
+#: nova/compute/instance_types.py:105
#, python-format
msgid "'%s' argument must be a positive integer"
msgstr ""
-#: nova/compute/instance_types.py:112
+#: nova/compute/instance_types.py:113
msgid "'rxtx_factor' argument must be a positive float"
msgstr ""
-#: nova/compute/instance_types.py:120
+#: nova/compute/instance_types.py:121
#, python-format
msgid "'%s' argument must be greater than 0"
msgstr ""
-#: nova/compute/instance_types.py:130
+#: nova/compute/instance_types.py:131
msgid "is_public must be a boolean"
msgstr ""
-#: nova/compute/instance_types.py:137
+#: nova/compute/instance_types.py:138
#, python-format
msgid "DB error: %s"
msgstr ""
-#: nova/compute/instance_types.py:147
+#: nova/compute/instance_types.py:148
#, python-format
msgid "Instance type %s not found for deletion"
msgstr ""
-#: nova/compute/manager.py:192
+#: nova/compute/manager.py:195
msgid "Possibly task preempted."
msgstr ""
-#: nova/compute/manager.py:344
+#: nova/compute/manager.py:350
#, python-format
msgid "%(nodename)s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:373
+#: nova/compute/manager.py:379
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:392
+#: nova/compute/manager.py:398
#, python-format
msgid "Instance %(uuid)s found in the hypervisor, but not in the database"
msgstr ""
-#: nova/compute/manager.py:410
+#: nova/compute/manager.py:416
#, python-format
msgid ""
"Instance %(driver_instance)s found in the hypervisor, but not in the "
"database"
msgstr ""
-#: nova/compute/manager.py:431
+#: nova/compute/manager.py:437
#, python-format
msgid ""
"Deleting instance as its host (%(instance_host)s) is not equal to our "
"host (%(our_host)s)."
msgstr ""
-#: nova/compute/manager.py:458
+#: nova/compute/manager.py:464
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:470
+#: nova/compute/manager.py:476
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:484
+#: nova/compute/manager.py:490
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:489
+#: nova/compute/manager.py:495
msgid "Failed to resume instance"
msgstr ""
-#: nova/compute/manager.py:499
+#: nova/compute/manager.py:505
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:533
+#: nova/compute/manager.py:509
+#, python-format
+msgid "Lifecycle event %(state)d on VM %(uuid)s"
+msgstr ""
+
+#: nova/compute/manager.py:525
+#, python-format
+msgid "Unexpected power state %d"
+msgstr ""
+
+#: nova/compute/manager.py:537
+#, python-format
+msgid "Ignoring event %s"
+msgstr ""
+
+#: nova/compute/manager.py:575
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:608
+#: nova/compute/manager.py:655
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:683 nova/compute/manager.py:2016
+#: nova/compute/manager.py:737 nova/compute/manager.py:2077
#, python-format
msgid "No node specified, defaulting to %(node)s"
msgstr ""
-#: nova/compute/manager.py:726
+#: nova/compute/manager.py:780
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:751
+#: nova/compute/manager.py:805
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:786 nova/compute/manager.py:2070
+#: nova/compute/manager.py:840 nova/compute/manager.py:2131
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:804
+#: nova/compute/manager.py:858
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:809
+#: nova/compute/manager.py:863
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:815
+#: nova/compute/manager.py:869
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:843
+#: nova/compute/manager.py:897
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:919
+#: nova/compute/manager.py:973
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:925
+#: nova/compute/manager.py:979
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:935
+#: nova/compute/manager.py:989
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:957
+#: nova/compute/manager.py:1014
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:961
+#: nova/compute/manager.py:1018
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:971
+#: nova/compute/manager.py:1028
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:988
+#: nova/compute/manager.py:1045
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:1012
+#: nova/compute/manager.py:1069
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:1088
+#: nova/compute/manager.py:1145
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:1119
+#: nova/compute/manager.py:1176
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1122
+#: nova/compute/manager.py:1179
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1129
+#: nova/compute/manager.py:1186
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:1154
+#: nova/compute/manager.py:1211
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1193 nova/compute/manager.py:2245
-#: nova/compute/manager.py:3635
+#: nova/compute/manager.py:1254 nova/compute/manager.py:2306
+#: nova/compute/manager.py:3739
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1333
+#: nova/compute/manager.py:1394
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1346
+#: nova/compute/manager.py:1407
msgid "Invalid state of instance files on shared storage"
msgstr ""
-#: nova/compute/manager.py:1350
+#: nova/compute/manager.py:1411
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
-#: nova/compute/manager.py:1354
+#: nova/compute/manager.py:1415
#, python-format
msgid "disk not on shared storagerebuilding from: '%s'"
msgstr ""
-#: nova/compute/manager.py:1438
+#: nova/compute/manager.py:1499
#, python-format
msgid "bringing vm to original state: '%s'"
msgstr ""
-#: nova/compute/manager.py:1462
+#: nova/compute/manager.py:1523
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1481
+#: nova/compute/manager.py:1542
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1491
+#: nova/compute/manager.py:1552
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1527
+#: nova/compute/manager.py:1588
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1533
+#: nova/compute/manager.py:1594
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1594
+#: nova/compute/manager.py:1655
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1601
+#: nova/compute/manager.py:1662
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1606
+#: nova/compute/manager.py:1667
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1634
+#: nova/compute/manager.py:1695
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1641
+#: nova/compute/manager.py:1702
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1648
+#: nova/compute/manager.py:1709
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:1663
+#: nova/compute/manager.py:1724
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1670
+#: nova/compute/manager.py:1731
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1683
+#: nova/compute/manager.py:1744
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1687
+#: nova/compute/manager.py:1748
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1707
+#: nova/compute/manager.py:1768
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:1721
+#: nova/compute/manager.py:1782
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1756
+#: nova/compute/manager.py:1817
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1777
+#: nova/compute/manager.py:1838
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1974
+#: nova/compute/manager.py:2035
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:1980
+#: nova/compute/manager.py:2041
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1997
+#: nova/compute/manager.py:2058
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2242
+#: nova/compute/manager.py:2303
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2297
+#: nova/compute/manager.py:2358
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2315
+#: nova/compute/manager.py:2376
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2353
+#: nova/compute/manager.py:2414
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2384
+#: nova/compute/manager.py:2445
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2404
+#: nova/compute/manager.py:2465
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2409
+#: nova/compute/manager.py:2470
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2412
+#: nova/compute/manager.py:2473
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2429
+#: nova/compute/manager.py:2490
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2454
+#: nova/compute/manager.py:2515
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2482
+#: nova/compute/manager.py:2543
msgid "Getting spice console"
msgstr ""
-#: nova/compute/manager.py:2512
+#: nova/compute/manager.py:2583
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2563
+#: nova/compute/manager.py:2634
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2572
+#: nova/compute/manager.py:2643
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2587
+#: nova/compute/manager.py:2658
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2617
+#: nova/compute/manager.py:2688
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2627
+#: nova/compute/manager.py:2698
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2634
+#: nova/compute/manager.py:2705
#, python-format
msgid "Failed to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2658
+#: nova/compute/manager.py:2729
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2695
+#: nova/compute/manager.py:2785
+#, python-format
+msgid "Port %(port_id)s is not attached"
+msgstr ""
+
+#: nova/compute/manager.py:2799
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2824
+#: nova/compute/manager.py:2928
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2852
+#: nova/compute/manager.py:2956
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2907
+#: nova/compute/manager.py:3011
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2909
+#: nova/compute/manager.py:3013
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2923
+#: nova/compute/manager.py:3027
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:3063
+#: nova/compute/manager.py:3167
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:3108
+#: nova/compute/manager.py:3212
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:3114
+#: nova/compute/manager.py:3218
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:3123
+#: nova/compute/manager.py:3227
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:3130
+#: nova/compute/manager.py:3234
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:3134
+#: nova/compute/manager.py:3238
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:3141
+#: nova/compute/manager.py:3245
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:3149
+#: nova/compute/manager.py:3253
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:3166
+#: nova/compute/manager.py:3270
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:3185
+#: nova/compute/manager.py:3289
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:3209
+#: nova/compute/manager.py:3313
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3327
+#: nova/compute/manager.py:3431
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3345
+#: nova/compute/manager.py:3449
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3371
+#: nova/compute/manager.py:3475
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3376 nova/compute/manager.py:3425
+#: nova/compute/manager.py:3480 nova/compute/manager.py:3529
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3412
+#: nova/compute/manager.py:3516
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3450
+#: nova/compute/manager.py:3554
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3462 nova/compute/manager.py:3471
-#: nova/compute/manager.py:3501
+#: nova/compute/manager.py:3566 nova/compute/manager.py:3575
+#: nova/compute/manager.py:3605
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3466
+#: nova/compute/manager.py:3570
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3482
+#: nova/compute/manager.py:3586
msgid "Instance is paused unexpectedly. Ignore."
msgstr ""
-#: nova/compute/manager.py:3488
+#: nova/compute/manager.py:3592
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:3494
+#: nova/compute/manager.py:3598
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3510
+#: nova/compute/manager.py:3614
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3518
+#: nova/compute/manager.py:3622
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3533
+#: nova/compute/manager.py:3637
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3588
+#: nova/compute/manager.py:3692
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3595
+#: nova/compute/manager.py:3699
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3602
+#: nova/compute/manager.py:3706
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/resource_tracker.py:91
+#: nova/compute/resource_tracker.py:92
msgid ""
"Host field should not be set on the instance until resources have been "
"claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:96
+#: nova/compute/resource_tracker.py:97
msgid ""
"Node field should be not be set on the instance until resources have been"
" claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:236
+#: nova/compute/resource_tracker.py:237
msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:240
+#: nova/compute/resource_tracker.py:241
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:292
+#: nova/compute/resource_tracker.py:293
#, python-format
msgid "Compute_service record created for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:298
+#: nova/compute/resource_tracker.py:299
#, python-format
msgid "Compute_service record updated for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:312
+#: nova/compute/resource_tracker.py:313
#, python-format
msgid "No service record for host %s"
msgstr ""
-#: nova/compute/resource_tracker.py:322
+#: nova/compute/resource_tracker.py:323
#, python-format
msgid "Hypervisor: free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:323
+#: nova/compute/resource_tracker.py:324
#, python-format
msgid "Hypervisor: free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:328
+#: nova/compute/resource_tracker.py:329
#, python-format
msgid "Hypervisor: free VCPUs: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:330
+#: nova/compute/resource_tracker.py:331
msgid "Hypervisor: VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:337
+#: nova/compute/resource_tracker.py:338
#, python-format
msgid "Free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:338
+#: nova/compute/resource_tracker.py:339
#, python-format
msgid "Free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:343
+#: nova/compute/resource_tracker.py:344
#, python-format
msgid "Free VCPUS: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:345
+#: nova/compute/resource_tracker.py:346
msgid "Free VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:383
+#: nova/compute/resource_tracker.py:384
#, python-format
msgid "Updating from migration %s"
msgstr ""
-#: nova/compute/resource_tracker.py:439
+#: nova/compute/resource_tracker.py:440
msgid "Instance not resizing, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:453
+#: nova/compute/resource_tracker.py:454
msgid "InstanceType could not be found, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:537
+#: nova/compute/resource_tracker.py:538
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
"memory"
msgstr ""
-#: nova/compute/resource_tracker.py:550
+#: nova/compute/resource_tracker.py:551
#, python-format
msgid "Missing keys: %s"
msgstr ""
-#: nova/compute/rpcapi.py:50
+#: nova/compute/rpcapi.py:51
msgid "No compute host specified"
msgstr ""
-#: nova/compute/rpcapi.py:53
+#: nova/compute/rpcapi.py:54
#, python-format
msgid "Unable to find host for Instance %s"
msgstr ""
-#: nova/compute/utils.py:139
+#: nova/compute/utils.py:140
#, python-format
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/api.py:366
+#: nova/conductor/api.py:367
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service "
"start before nova-conductor?"
msgstr ""
-#: nova/conductor/manager.py:87
+#: nova/conductor/manager.py:89
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:252
+#: nova/conductor/manager.py:254
msgid "Invalid block_device_mapping_destroy invocation"
msgstr ""
-#: nova/console/manager.py:79 nova/console/vmrc_manager.py:62
+#: nova/console/manager.py:80 nova/console/vmrc_manager.py:63
msgid "Adding console"
msgstr ""
-#: nova/console/manager.py:99 nova/console/vmrc_manager.py:112
+#: nova/console/manager.py:100 nova/console/vmrc_manager.py:113
#, python-format
msgid "Tried to remove non-existent console %(console_id)s."
msgstr ""
-#: nova/console/vmrc_manager.py:115
+#: nova/console/vmrc_manager.py:116
#, python-format
msgid "Removing console %(console_id)s."
msgstr ""
@@ -4366,53 +4478,58 @@ msgstr ""
msgid "Failed to run xvp."
msgstr ""
-#: nova/consoleauth/manager.py:64
+#: nova/consoleauth/manager.py:85
#, python-format
msgid "Received Token: %(token)s, %(token_dict)s)"
msgstr ""
-#: nova/consoleauth/manager.py:69
+#: nova/consoleauth/manager.py:101
#, python-format
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
-#: nova/db/api.py:582
+#: nova/db/api.py:577
msgid "Failed to notify cells of instance destroy"
msgstr ""
-#: nova/db/api.py:680 nova/db/api.py:701
+#: nova/db/api.py:670 nova/db/api.py:691
msgid "Failed to notify cells of instance update"
msgstr ""
-#: nova/db/api.py:741
+#: nova/db/api.py:731
msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/db/api.py:1452
+#: nova/db/api.py:1454
msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/api.py:1606
+#: nova/db/api.py:1608
msgid "Failed to notify cells of instance fault"
msgstr ""
+#: nova/db/sqlalchemy/api.py:152
+#, python-format
+msgid "Deadlock detected when running '%(func_name)s': Retrying..."
+msgstr ""
+
#: nova/db/sqlalchemy/api.py:202
msgid "model or base_model parameter should be subclass of NovaBase"
msgstr ""
-#: nova/db/sqlalchemy/api.py:215 nova/virt/baremetal/db/sqlalchemy/api.py:60
+#: nova/db/sqlalchemy/api.py:215 nova/virt/baremetal/db/sqlalchemy/api.py:63
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1397
+#: nova/db/sqlalchemy/api.py:1388
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:2747
+#: nova/db/sqlalchemy/api.py:2704
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
@@ -4427,20 +4544,25 @@ msgstr ""
msgid "Upgrade DB using Essex release first."
msgstr ""
-#: nova/db/sqlalchemy/utils.py:53
+#: nova/db/sqlalchemy/utils.py:61
#, python-format
msgid ""
"Please specify column %s in col_name_col_instance param. It is required "
"because column has unsupported type by sqlite)."
msgstr ""
-#: nova/db/sqlalchemy/utils.py:59
+#: nova/db/sqlalchemy/utils.py:67
#, python-format
msgid ""
"col_name_col_instance param has wrong type of column instance for column "
"%s It should be instance of sqlalchemy.Column."
msgstr ""
+#: nova/db/sqlalchemy/utils.py:155
+#, python-format
+msgid "Deleted duplicated row with id: %(id)s from table: %(table)s"
+msgstr ""
+
#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:62
msgid "Exception while seeding instance_types table"
msgstr ""
@@ -4529,91 +4651,91 @@ msgstr ""
msgid "Bad project_id for to_global_ipv6: %s"
msgstr ""
-#: nova/network/api.py:53 nova/network/api_deprecated.py:56
+#: nova/network/api.py:54 nova/network/api_deprecated.py:56
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
-#: nova/network/api.py:78 nova/network/api_deprecated.py:79
+#: nova/network/api.py:79 nova/network/api_deprecated.py:79
msgid "Failed storing info cache"
msgstr ""
-#: nova/network/api.py:226 nova/network/api_deprecated.py:216
+#: nova/network/api.py:227 nova/network/api_deprecated.py:216
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
-#: nova/network/driver.py:39
+#: nova/network/driver.py:40
msgid "Network driver option required, but not specified"
msgstr ""
-#: nova/network/driver.py:42
+#: nova/network/driver.py:43
#, python-format
msgid "Loading network driver '%s'"
msgstr ""
-#: nova/network/floating_ips.py:86
+#: nova/network/floating_ips.py:87
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/floating_ips.py:96 nova/network/floating_ips.py:372
+#: nova/network/floating_ips.py:97 nova/network/floating_ips.py:373
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/floating_ips.py:120
+#: nova/network/floating_ips.py:121
#, python-format
msgid "floating IP allocation for instance |%(floating_address)s|"
msgstr ""
-#: nova/network/floating_ips.py:177
+#: nova/network/floating_ips.py:178
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/floating_ips.py:195
+#: nova/network/floating_ips.py:196
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/floating_ips.py:199
+#: nova/network/floating_ips.py:200
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/floating_ips.py:219
+#: nova/network/floating_ips.py:220
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/floating_ips.py:280
+#: nova/network/floating_ips.py:281
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/floating_ips.py:529
+#: nova/network/floating_ips.py:530
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/floating_ips.py:536
+#: nova/network/floating_ips.py:537
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/floating_ips.py:567
+#: nova/network/floating_ips.py:568
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/floating_ips.py:575
+#: nova/network/floating_ips.py:576
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/floating_ips.py:623
+#: nova/network/floating_ips.py:624
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4621,175 +4743,175 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/floating_ips.py:663
+#: nova/network/floating_ips.py:664
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/floating_ips.py:672
+#: nova/network/floating_ips.py:673
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/ldapdns.py:99
+#: nova/network/ldapdns.py:100
#, python-format
msgid ""
"Found multiple matches for domain %(domain)s.\n"
"%(entry)s"
msgstr ""
-#: nova/network/ldapdns.py:126
+#: nova/network/ldapdns.py:127
#, python-format
msgid "Unable to dequalify. %(name)s is not in %(domain)s.\n"
msgstr ""
-#: nova/network/ldapdns.py:321
+#: nova/network/ldapdns.py:322
msgid "This driver only supports type 'a' entries."
msgstr ""
-#: nova/network/ldapdns.py:364
+#: nova/network/ldapdns.py:365
msgid "This shouldn't be getting called except during testing."
msgstr ""
-#: nova/network/linux_net.py:208
+#: nova/network/linux_net.py:213
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:243
+#: nova/network/linux_net.py:248
#, python-format
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:268
+#: nova/network/linux_net.py:273
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:405
+#: nova/network/linux_net.py:410
msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:654
+#: nova/network/linux_net.py:667
#, python-format
msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:934
+#: nova/network/linux_net.py:947
#, python-format
msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:979
+#: nova/network/linux_net.py:992
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:981
+#: nova/network/linux_net.py:994
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:1056
+#: nova/network/linux_net.py:1069
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:1058
+#: nova/network/linux_net.py:1071
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1190
+#: nova/network/linux_net.py:1203
#, python-format
msgid "Error clearing stale veth %s"
msgstr ""
-#: nova/network/linux_net.py:1349
+#: nova/network/linux_net.py:1362
#, python-format
msgid "Starting VLAN interface %s"
msgstr ""
-#: nova/network/linux_net.py:1380
+#: nova/network/linux_net.py:1393
#, python-format
msgid "Failed unplugging VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1383
+#: nova/network/linux_net.py:1396
#, python-format
msgid "Unplugged VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1405
+#: nova/network/linux_net.py:1418
#, python-format
msgid "Starting Bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1417
+#: nova/network/linux_net.py:1430
#, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr ""
-#: nova/network/linux_net.py:1450
+#: nova/network/linux_net.py:1463
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1486
+#: nova/network/linux_net.py:1499
#, python-format
msgid "Failed unplugging bridge interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1489
+#: nova/network/linux_net.py:1502
#, python-format
msgid "Unplugged bridge interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1658
+#: nova/network/linux_net.py:1671
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1666
+#: nova/network/linux_net.py:1679
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1685
+#: nova/network/linux_net.py:1698
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1687
+#: nova/network/linux_net.py:1700
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:354
+#: nova/network/manager.py:355
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:358
+#: nova/network/manager.py:359
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:485
+#: nova/network/manager.py:486
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:492
+#: nova/network/manager.py:493
#, python-format
msgid "networks retrieved for instance: |%(networks_list)s|"
msgstr ""
-#: nova/network/manager.py:540
+#: nova/network/manager.py:541
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:803
+#: nova/network/manager.py:804
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4797,89 +4919,89 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:895
+#: nova/network/manager.py:896
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:914
+#: nova/network/manager.py:915
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:918
+#: nova/network/manager.py:919
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:927
+#: nova/network/manager.py:928
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:932
+#: nova/network/manager.py:933
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:936
+#: nova/network/manager.py:937
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:940
+#: nova/network/manager.py:941
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:959
+#: nova/network/manager.py:960
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:983
+#: nova/network/manager.py:984
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1003
+#: nova/network/manager.py:1004
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1084
+#: nova/network/manager.py:1085
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1087
+#: nova/network/manager.py:1088
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1098
+#: nova/network/manager.py:1099
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1155
+#: nova/network/manager.py:1156
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1174
+#: nova/network/manager.py:1175
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:1716
+#: nova/network/manager.py:1748
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:1723
+#: nova/network/manager.py:1755
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s networks. "
@@ -4913,66 +5035,144 @@ msgstr ""
msgid "v4 subnets are required for legacy nw_info"
msgstr ""
-#: nova/network/quantumv2/__init__.py:42
+#: nova/network/quantumv2/__init__.py:43
msgid "_get_auth_token() failed"
msgstr ""
-#: nova/network/quantumv2/api.py:134
+#: nova/network/quantumv2/api.py:149
#, python-format
msgid "allocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:137
+#: nova/network/quantumv2/api.py:152
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:198
+#: nova/network/quantumv2/api.py:197
+#, python-format
+msgid ""
+"Multiple security groups found matching '%s'. Use an ID to be more "
+"specific."
+msgstr ""
+
+#: nova/network/quantumv2/api.py:265
msgid "Port not found"
msgstr ""
-#: nova/network/quantumv2/api.py:206
+#: nova/network/quantumv2/api.py:273
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:219
+#: nova/network/quantumv2/api.py:303
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:228
+#: nova/network/quantumv2/api.py:312
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:241
+#: nova/network/quantumv2/api.py:329
+#, python-format
+msgid "Failed to delete quantum port %(port_id)s "
+msgstr ""
+
+#: nova/network/quantumv2/api.py:352
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:271
+#: nova/network/quantumv2/api.py:382
#, python-format
msgid ""
"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: "
"%(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:300
+#: nova/network/quantumv2/api.py:411
#, python-format
msgid "Unable to update port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:310
+#: nova/network/quantumv2/api.py:421
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:562
+#: nova/network/quantumv2/api.py:678
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
+#: nova/network/security_group/quantum_driver.py:55
+#, python-format
+msgid "Quantum Error creating security group %s"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:103
+#: nova/network/security_group/quantum_driver.py:164
+#, python-format
+msgid "Quantum Error getting security group %s"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:107
+#: nova/network/security_group/quantum_driver.py:146
+#: nova/network/security_group/quantum_driver.py:240
+#, python-format
+msgid "Quantum Error: %s"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:120
+msgid "Quantum Error getting security groups"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:130
+msgid "Security group id should be uuid"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:168
+#: nova/network/security_group/quantum_driver.py:299
+#: nova/network/security_group/quantum_driver.py:305
+#: nova/network/security_group/quantum_driver.py:331
+#: nova/network/security_group/quantum_driver.py:347
+#: nova/network/security_group/quantum_driver.py:353
+#: nova/network/security_group/quantum_driver.py:382
+msgid "Quantum Error:"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:225
+#, python-format
+msgid "Quantum Error unable to delete %s"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:236
+#, python-format
+msgid "Quantum Error getting security group rule %s."
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:315
+#, python-format
+msgid ""
+"Cannot add security group %(name)s to %(instance)s since the port "
+"%(port_id)s does not meet security requirements"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:325
+#: nova/network/security_group/quantum_driver.py:375
+#, python-format
+msgid "Adding security group %(security_group_id)s to port %(port_id)s"
+msgstr ""
+
+#: nova/network/security_group/quantum_driver.py:385
+#, python-format
+msgid ""
+"Security group %(security_group_name)s not assocaited with the instance "
+"%(instance)s"
+msgstr ""
+
#: nova/openstack/common/excutils.py:48
#, python-format
msgid "Original exception being dropped: %s"
@@ -4983,42 +5183,42 @@ msgstr ""
msgid "Max serialization depth exceeded on object: %d %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:98
+#: nova/openstack/common/lockutils.py:97
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/lockutils.py:184
+#: nova/openstack/common/lockutils.py:183
#, python-format
msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:195
+#: nova/openstack/common/lockutils.py:194
#, python-format
msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:223
+#: nova/openstack/common/lockutils.py:222
#, python-format
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:231
+#: nova/openstack/common/lockutils.py:230
#, python-format
msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/log.py:224
+#: nova/openstack/common/log.py:225
#, python-format
msgid "Deprecated: %s"
msgstr ""
-#: nova/openstack/common/log.py:362
+#: nova/openstack/common/log.py:363
#, python-format
msgid "syslog facility must be one of: %s"
msgstr ""
-#: nova/openstack/common/log.py:522
+#: nova/openstack/common/log.py:523
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr ""
@@ -5038,20 +5238,16 @@ msgstr ""
msgid "Failed to understand rule %(rule)r"
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/session.py:431
+#: nova/openstack/common/db/sqlalchemy/session.py:423
msgid "DB exception wrapped."
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/session.py:481
+#: nova/openstack/common/db/sqlalchemy/session.py:473
#, python-format
msgid "Got mysql server has gone away: %s"
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/session.py:522
-msgid "Using mysql/eventlet db_pool."
-msgstr ""
-
-#: nova/openstack/common/db/sqlalchemy/session.py:578
+#: nova/openstack/common/db/sqlalchemy/session.py:543
#, python-format
msgid "SQL connection failed. %s attempts left."
msgstr ""
@@ -5064,19 +5260,19 @@ msgstr ""
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/openstack/common/notifier/api.py:125
+#: nova/openstack/common/notifier/api.py:126
#, python-format
msgid "%s not in valid priorities"
msgstr ""
-#: nova/openstack/common/notifier/api.py:141
+#: nova/openstack/common/notifier/api.py:142
#, python-format
msgid ""
"Problem '%(e)s' attempting to send to notification system. "
"Payload=%(payload)s"
msgstr ""
-#: nova/openstack/common/notifier/api.py:171
+#: nova/openstack/common/notifier/api.py:172
#, python-format
msgid "Failed to load notifier %s. These notifications will not be sent."
msgstr ""
@@ -5086,17 +5282,17 @@ msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead."
msgstr ""
#: nova/openstack/common/notifier/rpc_notifier.py:45
-#: nova/openstack/common/notifier/rpc_notifier2.py:50
+#: nova/openstack/common/notifier/rpc_notifier2.py:51
#, python-format
msgid "Could not send notification to %(topic)s. Payload=%(message)s"
msgstr ""
-#: nova/openstack/common/plugin/pluginmanager.py:65
+#: nova/openstack/common/plugin/pluginmanager.py:66
#, python-format
msgid "Failed to load plugin %(plug)s: %(exc)s"
msgstr ""
-#: nova/openstack/common/rpc/__init__.py:105
+#: nova/openstack/common/rpc/__init__.py:106
#, python-format
msgid ""
"A RPC is being made while holding a lock. The locks currently held are "
@@ -5104,154 +5300,170 @@ msgid ""
"following: [%(stack)s]."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:57
+#: nova/openstack/common/rpc/amqp.py:72
msgid "Pool creating new connection"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:209
+#: nova/openstack/common/rpc/amqp.py:197
+#, python-format
+msgid "no calling threads waiting for msg_id : %s, message : %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:205
+#, python-format
+msgid ""
+"Number of call waiters is greater than warning threshhold: %d. There "
+"could be a MulticallProxyWaiter leak."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:287
#, python-format
msgid "unpacked context: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:253
+#: nova/openstack/common/rpc/amqp.py:370
#, python-format
msgid "received %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:259
+#: nova/openstack/common/rpc/amqp.py:376
#, python-format
msgid "no method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:260
+#: nova/openstack/common/rpc/amqp.py:377
#, python-format
msgid "No method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:286
-#: nova/openstack/common/rpc/impl_zmq.py:277
+#: nova/openstack/common/rpc/amqp.py:403
+#: nova/openstack/common/rpc/impl_zmq.py:283
#, python-format
msgid "Expected exception during message handling (%s)"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:292
-#: nova/openstack/common/rpc/impl_zmq.py:283
+#: nova/openstack/common/rpc/amqp.py:409
+#: nova/openstack/common/rpc/impl_zmq.py:289
msgid "Exception during message handling"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:362
+#: nova/openstack/common/rpc/amqp.py:457
+msgid "Timed out waiting for RPC response."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:541
#, python-format
msgid "Making synchronous call on %s ..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:365
+#: nova/openstack/common/rpc/amqp.py:544
#, python-format
msgid "MSG_ID is %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:387
+#: nova/openstack/common/rpc/amqp.py:577
#, python-format
msgid "Making asynchronous cast on %s..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:395
+#: nova/openstack/common/rpc/amqp.py:585
msgid "Making asynchronous fanout cast..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:420
+#: nova/openstack/common/rpc/amqp.py:610
#, python-format
msgid "Sending %(event_type)s on %(topic)s"
msgstr ""
-#: nova/openstack/common/rpc/common.py:77
+#: nova/openstack/common/rpc/common.py:78
msgid "An unknown RPC related exception occurred."
msgstr ""
-#: nova/openstack/common/rpc/common.py:107
+#: nova/openstack/common/rpc/common.py:108
#, python-format
msgid ""
"Remote error: %(exc_type)s %(value)s\n"
"%(traceback)s."
msgstr ""
-#: nova/openstack/common/rpc/common.py:124
+#: nova/openstack/common/rpc/common.py:125
msgid "Timeout while waiting on RPC response."
msgstr ""
-#: nova/openstack/common/rpc/common.py:128
+#: nova/openstack/common/rpc/common.py:129
msgid "Invalid reuse of an RPC connection."
msgstr ""
-#: nova/openstack/common/rpc/common.py:132
+#: nova/openstack/common/rpc/common.py:133
#, python-format
msgid "Specified RPC version, %(version)s, not supported by this endpoint."
msgstr ""
-#: nova/openstack/common/rpc/common.py:137
+#: nova/openstack/common/rpc/common.py:138
#, python-format
msgid ""
"Specified RPC envelope version, %(version)s, not supported by this "
"endpoint."
msgstr ""
-#: nova/openstack/common/rpc/common.py:239
+#: nova/openstack/common/rpc/common.py:262
#, python-format
msgid "Failed to sanitize %(item)s. Key error %(err)s"
msgstr ""
-#: nova/openstack/common/rpc/common.py:261
+#: nova/openstack/common/rpc/common.py:284
#, python-format
msgid "Returning exception %s to caller"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:170
+#: nova/openstack/common/rpc/impl_kombu.py:169
#: nova/openstack/common/rpc/impl_qpid.py:133
msgid "Failed to process message... skipping it."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:478
+#: nova/openstack/common/rpc/impl_kombu.py:480
#, python-format
msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:500
+#: nova/openstack/common/rpc/impl_kombu.py:502
#, python-format
msgid "Connected to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:537
+#: nova/openstack/common/rpc/impl_kombu.py:539
#, python-format
msgid ""
"Unable to connect to AMQP server on %(hostname)s:%(port)d after "
"%(max_retries)d tries: %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:553
+#: nova/openstack/common/rpc/impl_kombu.py:555
#, python-format
msgid ""
"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying "
"again in %(sleep_time)d seconds."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:607
+#: nova/openstack/common/rpc/impl_kombu.py:609
#: nova/openstack/common/rpc/impl_qpid.py:403
#, python-format
msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:625
+#: nova/openstack/common/rpc/impl_kombu.py:627
#: nova/openstack/common/rpc/impl_qpid.py:418
#, python-format
msgid "Timed out waiting for RPC response: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:629
+#: nova/openstack/common/rpc/impl_kombu.py:631
#: nova/openstack/common/rpc/impl_qpid.py:422
#, python-format
msgid "Failed to consume message from queue: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:668
+#: nova/openstack/common/rpc/impl_kombu.py:670
#: nova/openstack/common/rpc/impl_qpid.py:457
#, python-format
msgid "Failed to publish message to topic '%(topic)s': %(err_str)s"
@@ -5316,263 +5528,274 @@ msgstr ""
msgid "You cannot send on this socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:265
+#: nova/openstack/common/rpc/impl_zmq.py:271
#, python-format
msgid "Running func with context: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:297
+#: nova/openstack/common/rpc/impl_zmq.py:303
msgid "Sending reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:331
+#: nova/openstack/common/rpc/impl_zmq.py:337
msgid "RPC message did not include method."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:366
+#: nova/openstack/common/rpc/impl_zmq.py:372
msgid "Registering reactor"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:378
+#: nova/openstack/common/rpc/impl_zmq.py:384
msgid "In reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:393
+#: nova/openstack/common/rpc/impl_zmq.py:399
msgid "Out reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:397
+#: nova/openstack/common/rpc/impl_zmq.py:403
msgid "Consuming socket"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:438
+#: nova/openstack/common/rpc/impl_zmq.py:443
#, python-format
msgid "CONSUMER GOT %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:447
+#: nova/openstack/common/rpc/impl_zmq.py:455
#, python-format
msgid "Creating proxy for topic: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:471
+#: nova/openstack/common/rpc/impl_zmq.py:479
#, python-format
msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:480
+#: nova/openstack/common/rpc/impl_zmq.py:488
msgid "Topic socket file creation failed."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:485
+#: nova/openstack/common/rpc/impl_zmq.py:493
#, python-format
msgid "ROUTER RELAY-OUT QUEUED %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:488
+#: nova/openstack/common/rpc/impl_zmq.py:496
#, python-format
msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:506
+#: nova/openstack/common/rpc/impl_zmq.py:514
#, python-format
msgid "Could not create IPC directory %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:516
+#: nova/openstack/common/rpc/impl_zmq.py:524
msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:536
+#: nova/openstack/common/rpc/impl_zmq.py:559
#, python-format
msgid "CONSUMER RECEIVED DATA: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:538
+#: nova/openstack/common/rpc/impl_zmq.py:561
#, python-format
msgid "ROUTER RELAY-OUT %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:564
-#, python-format
-msgid "Create Consumer for topic (%(topic)s)"
+#: nova/openstack/common/rpc/impl_zmq.py:583
+msgid "ZMQ Envelope version unsupported or unknown."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:580
+#: nova/openstack/common/rpc/impl_zmq.py:608
+msgid "Skipping topic registration. Already registered."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:615
#, python-format
msgid "Consumer is a zmq.%s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:625
+#: nova/openstack/common/rpc/impl_zmq.py:662
msgid "Creating payload"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:638
+#: nova/openstack/common/rpc/impl_zmq.py:675
msgid "Creating queue socket for reply waiter"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:649
+#: nova/openstack/common/rpc/impl_zmq.py:688
msgid "Sending cast"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:653
+#: nova/openstack/common/rpc/impl_zmq.py:691
msgid "Cast sent; Waiting reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:656
+#: nova/openstack/common/rpc/impl_zmq.py:694
#, python-format
msgid "Received message: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:657
+#: nova/openstack/common/rpc/impl_zmq.py:695
msgid "Unpacking response"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:663
+#: nova/openstack/common/rpc/impl_zmq.py:704
+msgid "Unsupported or unknown ZMQ envelope returned."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:711
msgid "RPC Message Invalid."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:687
+#: nova/openstack/common/rpc/impl_zmq.py:735
#, python-format
msgid "%(msg)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:690
+#: nova/openstack/common/rpc/impl_zmq.py:738
#, python-format
msgid "Sending message(s) to: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:694
+#: nova/openstack/common/rpc/impl_zmq.py:742
msgid "No matchmaker results. Not casting."
msgstr ""
-#: nova/openstack/common/rpc/matchmaker.py:45
+#: nova/openstack/common/rpc/matchmaker.py:46
msgid "Match not found by MatchMaker."
msgstr ""
-#: nova/openstack/common/rpc/matchmaker.py:177
-#: nova/openstack/common/rpc/matchmaker.py:195
+#: nova/openstack/common/rpc/matchmaker.py:178
+#: nova/openstack/common/rpc/matchmaker.py:196
#, python-format
msgid "No key defining hosts for topic '%s', see ringfile"
msgstr ""
-#: nova/scheduler/chance.py:50
+#: nova/scheduler/chance.py:51
msgid "Is the appropriate service running?"
msgstr ""
-#: nova/scheduler/chance.py:55
+#: nova/scheduler/chance.py:56
msgid "Could not find another compute"
msgstr ""
-#: nova/scheduler/driver.py:58
+#: nova/scheduler/driver.py:60
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:60 nova/scheduler/manager.py:203
+#: nova/scheduler/driver.py:62 nova/scheduler/manager.py:205
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:152
+#: nova/scheduler/driver.py:155
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:160
+#: nova/scheduler/driver.py:163
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:287
+#: nova/scheduler/driver.py:168
+msgid "Driver must implement select_hosts"
+msgstr ""
+
+#: nova/scheduler/driver.py:333
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
"memory(host:%(avail)s <= instance:%(mem_inst)s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:73
+#: nova/scheduler/filter_scheduler.py:74
#, python-format
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:218
+#: nova/scheduler/filter_scheduler.py:228
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:235
+#: nova/scheduler/filter_scheduler.py:245
#, python-format
msgid "Error from last host: %(last_host)s (node %(last_node)s): %(exc)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:264
+#: nova/scheduler/filter_scheduler.py:274
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:330
+#: nova/scheduler/filter_scheduler.py:340
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:343
+#: nova/scheduler/filter_scheduler.py:353
#, python-format
msgid "Choosing host %(chosen_host)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:376
+#: nova/scheduler/filter_scheduler.py:386
#, python-format
msgid "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory"
msgstr ""
-#: nova/scheduler/host_manager.py:306
+#: nova/scheduler/host_manager.py:307
#, python-format
msgid "Host filter ignoring hosts: %(ignored_hosts_str)s"
msgstr ""
-#: nova/scheduler/host_manager.py:315
+#: nova/scheduler/host_manager.py:316
#, python-format
msgid ""
"No hosts matched due to not matching 'force_hosts'value of "
"'%(forced_hosts_str)s'"
msgstr ""
-#: nova/scheduler/host_manager.py:320
+#: nova/scheduler/host_manager.py:321
#, python-format
msgid "Host filter forcing available hosts to %(forced_hosts_str)s"
msgstr ""
-#: nova/scheduler/host_manager.py:352
+#: nova/scheduler/host_manager.py:353
#, python-format
msgid "Ignoring %(service_name)s service update from %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:357
+#: nova/scheduler/host_manager.py:358
#, python-format
msgid "Received %(service_name)s service update from %(state_key)s."
msgstr ""
-#: nova/scheduler/host_manager.py:376
+#: nova/scheduler/host_manager.py:377
#: nova/scheduler/filters/trusted_filter.py:220
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/host_manager.py:398
+#: nova/scheduler/host_manager.py:399
#, python-format
msgid "Removing dead compute node %(host)s:%(node)s from scheduler"
msgstr ""
-#: nova/scheduler/manager.py:189
+#: nova/scheduler/manager.py:191
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
-#: nova/scheduler/scheduler_options.py:68
+#: nova/scheduler/scheduler_options.py:69
#, python-format
msgid "Could not stat scheduler options file %(filename)s: '%(e)s'"
msgstr ""
-#: nova/scheduler/scheduler_options.py:77
+#: nova/scheduler/scheduler_options.py:78
#, python-format
msgid "Could not decode scheduler options: '%(e)s'"
msgstr ""
@@ -5595,12 +5818,17 @@ msgstr ""
msgid "%(host_state)s fails instance_type extra_specs requirements"
msgstr ""
-#: nova/scheduler/filters/compute_filter.py:39
+#: nova/scheduler/filters/aggregate_multitenancy_isolation.py:44
+#, python-format
+msgid "%(host_state)s fails tenant id on aggregate"
+msgstr ""
+
+#: nova/scheduler/filters/compute_filter.py:40
#, python-format
msgid "%(host_state)s is disabled or has not been heard from in a while"
msgstr ""
-#: nova/scheduler/filters/compute_filter.py:43
+#: nova/scheduler/filters/compute_filter.py:44
#, python-format
msgid "%(host_state)s is disabled via capabilities"
msgstr ""
@@ -5609,7 +5837,7 @@ msgstr ""
msgid "VCPUs not set; assuming CPU collection broken"
msgstr ""
-#: nova/scheduler/filters/disk_filter.py:46
+#: nova/scheduler/filters/disk_filter.py:47
#, python-format
msgid ""
"%(host_state)s does not have %(requested_disk)s MB usable disk, it only "
@@ -5642,21 +5870,21 @@ msgstr ""
msgid "%(host_state)s does not support requested instance_properties"
msgstr ""
-#: nova/scheduler/filters/io_ops_filter.py:41
+#: nova/scheduler/filters/io_ops_filter.py:42
#, python-format
msgid ""
"%(host_state)s fails I/O ops check: Max IOs per host is set to "
"%(max_io_ops)s"
msgstr ""
-#: nova/scheduler/filters/num_instances_filter.py:38
+#: nova/scheduler/filters/num_instances_filter.py:39
#, python-format
msgid ""
"%(host_state)s fails num_instances check: Max instances per host is set "
"to %(max_instances)s"
msgstr ""
-#: nova/scheduler/filters/ram_filter.py:45
+#: nova/scheduler/filters/ram_filter.py:46
#, python-format
msgid ""
"%(host_state)s does not have %(requested_ram)s MB usable ram, it only has"
@@ -5672,69 +5900,89 @@ msgstr ""
msgid "least_cost has been deprecated in favor of the RAM Weigher."
msgstr ""
-#: nova/servicegroup/api.py:58
+#: nova/servicegroup/api.py:61
#, python-format
msgid "ServiceGroup driver defined as an instance of %s"
msgstr ""
-#: nova/servicegroup/api.py:64
+#: nova/servicegroup/api.py:67
#, python-format
msgid "unknown ServiceGroup driver name: %s"
msgstr ""
-#: nova/servicegroup/api.py:81
+#: nova/servicegroup/api.py:84
#, python-format
msgid ""
"Join new ServiceGroup member %(member_id)s to the %(group_id)s group, "
"service = %(service)s"
msgstr ""
-#: nova/servicegroup/api.py:88
+#: nova/servicegroup/api.py:91
#, python-format
msgid "Check if the given member [%s] is part of the ServiceGroup, is up"
msgstr ""
-#: nova/servicegroup/api.py:97
+#: nova/servicegroup/api.py:100
#, python-format
msgid ""
"Explicitly remove the given member %(member_id)s from the%(group_id)s "
"group monitoring"
msgstr ""
-#: nova/servicegroup/api.py:104
+#: nova/servicegroup/api.py:107
#, python-format
msgid "Returns ALL members of the [%s] ServiceGroup"
msgstr ""
-#: nova/servicegroup/api.py:112
+#: nova/servicegroup/api.py:115
#, python-format
msgid "Returns one member of the [%s] group"
msgstr ""
-#: nova/servicegroup/drivers/db.py:41
+#: nova/servicegroup/drivers/db.py:42
#, python-format
msgid ""
"DB_Driver: join new ServiceGroup member %(member_id)s to the %(group_id)s"
" group, service = %(service)s"
msgstr ""
-#: nova/servicegroup/drivers/db.py:45
+#: nova/servicegroup/drivers/db.py:46
msgid "service is a mandatory argument for DB based ServiceGroup driver"
msgstr ""
-#: nova/servicegroup/drivers/db.py:74
+#: nova/servicegroup/drivers/db.py:75
#, python-format
msgid "DB_Driver: get_all members of the %s group"
msgstr ""
-#: nova/servicegroup/drivers/db.py:97
+#: nova/servicegroup/drivers/db.py:98 nova/servicegroup/drivers/mc.py:103
msgid "Recovered model server connection!"
msgstr ""
-#: nova/servicegroup/drivers/db.py:103
+#: nova/servicegroup/drivers/db.py:104 nova/servicegroup/drivers/mc.py:109
msgid "model server went away"
msgstr ""
+#: nova/servicegroup/drivers/mc.py:45
+msgid "memcached_servers not defined"
+msgstr ""
+
+#: nova/servicegroup/drivers/mc.py:53
+#, python-format
+msgid ""
+"Memcached_Driver: join new ServiceGroup member %(member_id)s to the "
+"%(group_id)s group, service = %(service)s"
+msgstr ""
+
+#: nova/servicegroup/drivers/mc.py:58
+msgid "service is a mandatory argument for Memcached based ServiceGroup driver"
+msgstr ""
+
+#: nova/servicegroup/drivers/mc.py:78
+#, python-format
+msgid "Memcached_Driver: get_all members of the %s group"
+msgstr ""
+
#: nova/servicegroup/drivers/zk.py:79
#, python-format
msgid ""
@@ -5759,6 +6007,26 @@ msgstr ""
msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group"
msgstr ""
+#: nova/storage/linuxscsi.py:70
+#, python-format
+msgid "Trying (%(tries)s) to remove device %(device)s"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:99
+#, python-format
+msgid "Multipath call failed exit (%(code)s)"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:117
+#, python-format
+msgid "Couldn't find multipath device %(line)s"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:121
+#, python-format
+msgid "Found multipath device = %(mdev)s"
+msgstr ""
+
#: nova/tests/fake_ldap.py:34
msgid "Attempted to instantiate singleton"
msgstr ""
@@ -5783,39 +6051,39 @@ msgstr ""
msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
msgstr ""
-#: nova/tests/fake_volume.py:185 nova/volume/cinder.py:205
+#: nova/tests/fake_volume.py:186 nova/volume/cinder.py:205
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:208
+#: nova/tests/fake_volume.py:190 nova/volume/cinder.py:208
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:193 nova/volume/cinder.py:212
+#: nova/tests/fake_volume.py:194 nova/volume/cinder.py:212
msgid "Instance and volume not in same availability_zone"
msgstr ""
-#: nova/tests/fake_volume.py:198 nova/volume/cinder.py:218
+#: nova/tests/fake_volume.py:199 nova/volume/cinder.py:218
msgid "already detached"
msgstr ""
-#: nova/tests/fakelibvirt.py:861
+#: nova/tests/fakelibvirt.py:891
msgid "Please extend mock libvirt module to support flags"
msgstr ""
-#: nova/tests/fakelibvirt.py:865
+#: nova/tests/fakelibvirt.py:895
msgid "Expected a list for 'auth' parameter"
msgstr ""
-#: nova/tests/fakelibvirt.py:869
+#: nova/tests/fakelibvirt.py:899
msgid "Expected a function in 'auth[0]' parameter"
msgstr ""
-#: nova/tests/fakelibvirt.py:873
+#: nova/tests/fakelibvirt.py:903
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
-#: nova/tests/test_hypervapi.py:413
+#: nova/tests/test_hypervapi.py:414
msgid "fake vswitch not found"
msgstr ""
@@ -5862,12 +6130,12 @@ msgstr ""
msgid "uuid"
msgstr ""
-#: nova/tests/test_xenapi.py:802
+#: nova/tests/test_xenapi.py:803
#, python-format
msgid "Creating files in %s to simulate guest agent"
msgstr ""
-#: nova/tests/test_xenapi.py:813
+#: nova/tests/test_xenapi.py:814
#, python-format
msgid "Removing simulated guest agent files in %s"
msgstr ""
@@ -5884,107 +6152,56 @@ msgstr ""
msgid "unexpected role header"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3215
+#: nova/tests/api/openstack/compute/test_servers.py:3218
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3220
+#: nova/tests/api/openstack/compute/test_servers.py:3223
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3225
+#: nova/tests/api/openstack/compute/test_servers.py:3228
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:53
-#, python-format
-msgid "_create: %s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:62
-#, python-format
-msgid "_delete: %s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:71
-#, python-format
-msgid "_get: %s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:81
-#, python-format
-msgid "_get_all: %s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:125
-#, python-format
-msgid "test_snapshot_create: param=%s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:134
-#, python-format
-msgid "test_snapshot_create: resp_dict=%s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:156
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:182
-#, python-format
-msgid "test_snapshot_create_force: param=%s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:165
-#, python-format
-msgid "test_snapshot_create_force: resp_dict=%s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:218
-#, python-format
-msgid "test_snapshot_show: resp=%s"
-msgstr ""
-
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:244
-#, python-format
-msgid "test_snapshot_detail: resp_dict=%s"
-msgstr ""
-
-#: nova/tests/compute/test_compute.py:716
-#: nova/tests/compute/test_compute.py:734
-#: nova/tests/compute/test_compute.py:785
-#: nova/tests/compute/test_compute.py:812
-#: nova/tests/compute/test_compute.py:2909
+#: nova/tests/compute/test_compute.py:717
+#: nova/tests/compute/test_compute.py:735
+#: nova/tests/compute/test_compute.py:786
+#: nova/tests/compute/test_compute.py:813
+#: nova/tests/compute/test_compute.py:2979
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:722
-#: nova/tests/compute/test_compute.py:757
-#: nova/tests/compute/test_compute.py:800
-#: nova/tests/compute/test_compute.py:830
+#: nova/tests/compute/test_compute.py:723
+#: nova/tests/compute/test_compute.py:758
+#: nova/tests/compute/test_compute.py:801
+#: nova/tests/compute/test_compute.py:831
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1263
+#: nova/tests/compute/test_compute.py:1264
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2920
+#: nova/tests/compute/test_compute.py:2990
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:3418
+#: nova/tests/compute/test_compute.py:3488
msgid "wrong host/node"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:171
+#: nova/tests/integrated/test_api_samples.py:172
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:175
+#: nova/tests/integrated/test_api_samples.py:176
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5992,21 +6209,21 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:183
+#: nova/tests/integrated/test_api_samples.py:184
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:201
+#: nova/tests/integrated/test_api_samples.py:202
msgid "Extra items in expected:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:205
+#: nova/tests/integrated/test_api_samples.py:206
msgid "Extra items in result:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:224
-#: nova/tests/integrated/test_api_samples.py:237
+#: nova/tests/integrated/test_api_samples.py:225
+#: nova/tests/integrated/test_api_samples.py:238
#, python-format
msgid ""
"Values do not match:\n"
@@ -6068,48 +6285,57 @@ msgstr ""
msgid "Decoding JSON: %s"
msgstr ""
-#: nova/virt/configdrive.py:96
+#: nova/virt/configdrive.py:97
#, python-format
msgid "Added %(filepath)s to config drive"
msgstr ""
-#: nova/virt/driver.py:872
+#: nova/virt/driver.py:892
+msgid "Event must be an instance of nova.virt.event.Event"
+msgstr ""
+
+#: nova/virt/driver.py:898
+#, python-format
+msgid "Exception dispatching event %(event)s: %(ex)s"
+msgstr ""
+
+#: nova/virt/driver.py:920
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/virt/driver.py:875
+#: nova/virt/driver.py:923
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/virt/driver.py:882
+#: nova/virt/driver.py:930
#, python-format
msgid "Unable to load the virtualization driver: %s"
msgstr ""
-#: nova/virt/fake.py:205
+#: nova/virt/fake.py:206
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr ""
-#: nova/virt/firewall.py:179 nova/virt/libvirt/firewall.py:267
+#: nova/virt/firewall.py:180 nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/firewall.py:190
+#: nova/virt/firewall.py:191
msgid "Filters added to instance"
msgstr ""
-#: nova/virt/firewall.py:192
+#: nova/virt/firewall.py:193
msgid "Provider Firewall Rules refreshed"
msgstr ""
-#: nova/virt/firewall.py:360
+#: nova/virt/firewall.py:361
#, python-format
msgid "Adding security group rule: %r"
msgstr ""
-#: nova/virt/firewall.py:491 nova/virt/xenapi/firewall.py:74
+#: nova/virt/firewall.py:494 nova/virt/xenapi/firewall.py:74
#, python-format
msgid "Adding provider rule: %s"
msgstr ""
@@ -6127,110 +6353,160 @@ msgstr ""
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:232
+#: nova/virt/images.py:233
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:94
+#: nova/virt/baremetal/driver.py:90
#, python-format
msgid "Request for baremetal node %s sent to wrong service host"
msgstr ""
-#: nova/virt/baremetal/driver.py:146
+#: nova/virt/baremetal/driver.py:143
msgid "cpu_arch is not found in instance_type_extra_specs"
msgstr ""
#: nova/virt/baremetal/driver.py:185
#, python-format
-msgid "Node %(id)r assigned to instance %(uuid)r which cannot be found."
+msgid "Baremetal node id not supplied to driver for %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:200
+#: nova/virt/baremetal/driver.py:257
#, python-format
-msgid "Baremetal node id not supplied to driver for %r"
+msgid "Error deploying instance %(instance)s on baremetal node %(node)s."
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:284
+#, python-format
+msgid "Baremetal power manager failed to restart node for instance %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:271
+#: nova/virt/baremetal/driver.py:295
#, python-format
-msgid "Failed to update state record for baremetal node %s"
+msgid "Destroy called on non-existing instance %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:290
+#: nova/virt/baremetal/driver.py:313
#, python-format
-msgid "Delete called on non-existing instance %s"
+msgid "Error from baremetal driver during destroy: %s"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:84
+#: nova/virt/baremetal/driver.py:318
+#, python-format
+msgid "Error while recording destroy failure in baremetal database: %s"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:328
+#, python-format
+msgid "Baremetal power manager failed to stop node for instance %r"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:340
+#, python-format
+msgid "Baremetal power manager failed to start node for instance %r"
+msgstr ""
+
+#: nova/virt/baremetal/ipmi.py:85
#, python-format
msgid "pid file %s does not contain any pid"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:107
+#: nova/virt/baremetal/ipmi.py:108
msgid "Node id not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:110
+#: nova/virt/baremetal/ipmi.py:111
msgid "Address not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:113
+#: nova/virt/baremetal/ipmi.py:114
msgid "User not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:116
+#: nova/virt/baremetal/ipmi.py:117
msgid "Password not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:133
+#: nova/virt/baremetal/ipmi.py:134
#, python-format
msgid "ipmitool stdout: '%(out)s', stderr: '%(err)s'"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:159
+#: nova/virt/baremetal/ipmi.py:160
msgid "IPMI power on failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:181
+#: nova/virt/baremetal/ipmi.py:182
msgid "IPMI power off failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:191
+#: nova/virt/baremetal/ipmi.py:192
msgid "IPMI set next bootdev failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:196
+#: nova/virt/baremetal/ipmi.py:197
#, python-format
msgid "Activate node called, but node %s is already active"
msgstr ""
-#: nova/virt/baremetal/pxe.py:85
+#: nova/virt/baremetal/pxe.py:92
#, python-format
msgid "Building PXE config for deployment %s."
msgstr ""
-#: nova/virt/baremetal/pxe.py:224
+#: nova/virt/baremetal/pxe.py:231
#, python-format
msgid ""
"Can not activate PXE bootloader. The following boot parameters were not "
"passed to baremetal driver: %s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:249
+#: nova/virt/baremetal/pxe.py:256
#, python-format
msgid "Fetching kernel and ramdisk for instance %s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:281
+#: nova/virt/baremetal/pxe.py:288
#, python-format
msgid "Fetching image %(ami)s for instance %(name)s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:321
+#: nova/virt/baremetal/pxe.py:328
#, python-format
msgid "Injecting files into image for instance %(name)s"
msgstr ""
+#: nova/virt/baremetal/pxe.py:450
+#, python-format
+msgid "Node associated with another instance while waiting for deploy of %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:457
+#, python-format
+msgid "PXE deploy started for instance %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:462
+#, python-format
+msgid "PXE deploy completed for instance %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:466
+#, python-format
+msgid "PXE deploy failed for instance %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:468
+#, python-format
+msgid "Baremetal node deleted while waiting for deployment of instance %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:473
+#, python-format
+msgid "Timeout reached while waiting for PXE deploy of instance %s"
+msgstr ""
+
#: nova/virt/baremetal/utils.py:41
#, python-format
msgid "Failed to inject data into image %(image)s. Error: %(e)s"
@@ -6251,34 +6527,34 @@ msgstr ""
msgid "Failed to create symlink from %(source)s to %(link)s, error: %(e)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:36
+#: nova/virt/baremetal/vif_driver.py:37
#, python-format
msgid "plug: instance_uuid=%(uuid)s vif=%(vif)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:49
+#: nova/virt/baremetal/vif_driver.py:50
#, python-format
msgid "pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:56
+#: nova/virt/baremetal/vif_driver.py:57
#, python-format
msgid ""
"Baremetal node: %(id)s has no available physical interface for virtual "
"interface %(vif_uuid)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:62
+#: nova/virt/baremetal/vif_driver.py:63
#, python-format
msgid "unplug: instance_uuid=%(uuid)s vif=%(vif)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:70
+#: nova/virt/baremetal/vif_driver.py:71
#, python-format
msgid "pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:74
+#: nova/virt/baremetal/vif_driver.py:75
#, python-format
msgid "no pif for vif_uuid=%s"
msgstr ""
@@ -6295,82 +6571,82 @@ msgstr ""
msgid "virtual_power_host_pass not defined. Can not Start"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:120
+#: nova/virt/baremetal/volume_driver.py:121
#, python-format
msgid "baremetal driver was unable to delete tid %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:184
+#: nova/virt/baremetal/volume_driver.py:185
#, python-format
msgid "Could not determine iscsi initiator name for instance %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:225
+#: nova/virt/baremetal/volume_driver.py:226
#, python-format
msgid "No fixed PXE IP is associated to %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:257
+#: nova/virt/baremetal/volume_driver.py:258
#, python-format
msgid "detach volume could not find tid for %s"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:163
-msgid "instance_uuid must be supplied to bm_node_set_uuid_safe"
+#: nova/virt/baremetal/db/sqlalchemy/api.py:201
+msgid "instance_uuid must be supplied to bm_node_associate_and_update"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:175
+#: nova/virt/baremetal/db/sqlalchemy/api.py:213
#, python-format
-msgid "Failed to associate instance %(uuid)s to baremetal node %(id)s."
+msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s."
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:283
+#: nova/virt/baremetal/db/sqlalchemy/api.py:331
msgid "No more PXE IPs available"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:305
-#: nova/virt/baremetal/db/sqlalchemy/api.py:347
+#: nova/virt/baremetal/db/sqlalchemy/api.py:353
+#: nova/virt/baremetal/db/sqlalchemy/api.py:395
#, python-format
msgid "Baremetal interface %s not found"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:357
+#: nova/virt/baremetal/db/sqlalchemy/api.py:405
#, python-format
msgid "Baremetal interface %s already in use"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:371
+#: nova/virt/baremetal/db/sqlalchemy/api.py:419
#, python-format
msgid "Baremetal virtual interface %s not found"
msgstr ""
-#: nova/virt/disk/api.py:131
+#: nova/virt/disk/api.py:132
#, python-format
msgid "Checking if we can resize image %(image)s. size=%(size)s, CoW=%(use_cow)s"
msgstr ""
-#: nova/virt/disk/api.py:137
+#: nova/virt/disk/api.py:138
#, python-format
msgid "Cannot resize filesystem %s to a smaller size."
msgstr ""
-#: nova/virt/disk/api.py:148
+#: nova/virt/disk/api.py:149
#, python-format
msgid "Unable to mount image %(image)s with error %(error)s. Cannot resize."
msgstr ""
-#: nova/virt/disk/api.py:158
+#: nova/virt/disk/api.py:159
#, python-format
msgid ""
"Unable to determine label for image %(image)s with error %(errror)s. "
"Cannot resize."
msgstr ""
-#: nova/virt/disk/api.py:238
+#: nova/virt/disk/api.py:239
msgid "image already mounted"
msgstr ""
-#: nova/virt/disk/api.py:294
+#: nova/virt/disk/api.py:295
#, python-format
msgid ""
"Inject data image=%(image)s key=%(key)s net=%(net)s metadata=%(metadata)s"
@@ -6378,68 +6654,68 @@ msgid ""
"partition=%(partition)s use_cow=%(use_cow)s"
msgstr ""
-#: nova/virt/disk/api.py:311
+#: nova/virt/disk/api.py:312
#, python-format
msgid "Ignoring error injecting data into image (%(e)s)"
msgstr ""
-#: nova/virt/disk/api.py:330
+#: nova/virt/disk/api.py:331
#, python-format
msgid ""
"Failed to mount container filesystem '%(image)s' on '%(target)s': "
"%(errors)s"
msgstr ""
-#: nova/virt/disk/api.py:347
+#: nova/virt/disk/api.py:348
#, python-format
msgid "Failed to teardown ntainer filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:360
+#: nova/virt/disk/api.py:361
#, python-format
msgid "Failed to umount container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:385
+#: nova/virt/disk/api.py:386
#, python-format
msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
msgstr ""
-#: nova/virt/disk/api.py:397
+#: nova/virt/disk/api.py:398
#, python-format
msgid "Inject file fs=%(fs)s path=%(path)s append=%(append)s"
msgstr ""
-#: nova/virt/disk/api.py:406
+#: nova/virt/disk/api.py:407
#, python-format
msgid "Inject metadata fs=%(fs)s metadata=%(metadata)s"
msgstr ""
-#: nova/virt/disk/api.py:447
+#: nova/virt/disk/api.py:448
#, python-format
msgid "Inject key fs=%(fs)s key=%(key)s"
msgstr ""
-#: nova/virt/disk/api.py:476
+#: nova/virt/disk/api.py:477
#, python-format
msgid "Inject key fs=%(fs)s net=%(net)s"
msgstr ""
-#: nova/virt/disk/api.py:502
+#: nova/virt/disk/api.py:503
#, python-format
msgid "Inject admin password fs=%(fs)s admin_passwd=ha-ha-not-telling-you"
msgstr ""
-#: nova/virt/disk/api.py:547
+#: nova/virt/disk/api.py:548
msgid "Not implemented on Windows"
msgstr ""
-#: nova/virt/disk/api.py:576
+#: nova/virt/disk/api.py:577
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/disk/api.py:592
+#: nova/virt/disk/api.py:593
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr ""
@@ -6540,44 +6816,44 @@ msgstr ""
msgid "Release loop device %s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:54 nova/virt/disk/mount/nbd.py:68
+#: nova/virt/disk/mount/nbd.py:55 nova/virt/disk/mount/nbd.py:69
msgid "No free nbd devices"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:59
+#: nova/virt/disk/mount/nbd.py:60
msgid "nbd module not loaded"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:60
+#: nova/virt/disk/mount/nbd.py:61
msgid "nbd unavailable: module not loaded"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:85
+#: nova/virt/disk/mount/nbd.py:86
#, python-format
msgid "Get nbd device %(dev)s for %(imgfile)s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:90
+#: nova/virt/disk/mount/nbd.py:91
#, python-format
msgid "qemu-nbd error: %s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:91 nova/virt/disk/mount/nbd.py:104
+#: nova/virt/disk/mount/nbd.py:92 nova/virt/disk/mount/nbd.py:105
#, python-format
msgid "NBD mount error: %s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:103
+#: nova/virt/disk/mount/nbd.py:104
#, python-format
msgid "nbd device %s did not show up"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:110
+#: nova/virt/disk/mount/nbd.py:111
#, python-format
msgid "Detaching from erroneous nbd device returned error: %s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:125
+#: nova/virt/disk/mount/nbd.py:126
#, python-format
msgid "Release nbd device %s"
msgstr ""
@@ -6601,111 +6877,111 @@ msgstr ""
msgid "Falling back to VFSLocalFS"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:52
+#: nova/virt/disk/vfs/guestfs.py:54
#, python-format
msgid "Mount guest OS image %(imgfile)s partition %(part)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:61
+#: nova/virt/disk/vfs/guestfs.py:63
#, python-format
msgid "Inspecting guest OS image %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:65
+#: nova/virt/disk/vfs/guestfs.py:67
#, python-format
msgid "No operating system found in %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:69
+#: nova/virt/disk/vfs/guestfs.py:71
#, python-format
msgid "Multi-boot OS %(roots)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:71
+#: nova/virt/disk/vfs/guestfs.py:73
#, python-format
msgid "Multi-boot operating system found in %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:77
+#: nova/virt/disk/vfs/guestfs.py:79
#, python-format
msgid "Inspecting guest OS root filesystem %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:82
+#: nova/virt/disk/vfs/guestfs.py:84
#, python-format
msgid "No mount points found in %(root)s of %(imgfile)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:87
+#: nova/virt/disk/vfs/guestfs.py:89
#, python-format
msgid "Mounting %(dev)s at %(dir)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:92
+#: nova/virt/disk/vfs/guestfs.py:94
#, python-format
msgid "Setting up appliance for %(imgfile)s %(imgfmt)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:107
+#: nova/virt/disk/vfs/guestfs.py:112
#, python-format
msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:114
+#: nova/virt/disk/vfs/guestfs.py:119
msgid "Tearing down appliance"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:120
+#: nova/virt/disk/vfs/guestfs.py:125
#, python-format
msgid "Failed to close augeas %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:128
+#: nova/virt/disk/vfs/guestfs.py:133
#, python-format
msgid "Failed to shutdown appliance %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:136
+#: nova/virt/disk/vfs/guestfs.py:141
#, python-format
msgid "Failed to close guest handle %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:148 nova/virt/disk/vfs/localfs.py:102
+#: nova/virt/disk/vfs/guestfs.py:153 nova/virt/disk/vfs/localfs.py:102
#, python-format
msgid "Make directory path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:153 nova/virt/disk/vfs/localfs.py:107
+#: nova/virt/disk/vfs/guestfs.py:158 nova/virt/disk/vfs/localfs.py:107
#, python-format
msgid "Append file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:158 nova/virt/disk/vfs/localfs.py:116
+#: nova/virt/disk/vfs/guestfs.py:163 nova/virt/disk/vfs/localfs.py:116
#, python-format
msgid "Replace file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:163 nova/virt/disk/vfs/localfs.py:125
+#: nova/virt/disk/vfs/guestfs.py:168 nova/virt/disk/vfs/localfs.py:125
#, python-format
msgid "Read file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:168 nova/virt/disk/vfs/localfs.py:131
+#: nova/virt/disk/vfs/guestfs.py:173 nova/virt/disk/vfs/localfs.py:131
#, python-format
msgid "Has file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:177
+#: nova/virt/disk/vfs/guestfs.py:182
#, python-format
msgid "Set permissions path=%(path)s mode=%(mode)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:182
+#: nova/virt/disk/vfs/guestfs.py:187
#, python-format
msgid "Set ownership path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:195
+#: nova/virt/disk/vfs/guestfs.py:200
#, python-format
msgid "chown uid=%(uid)d gid=%(gid)s"
msgstr ""
@@ -6740,7 +7016,7 @@ msgstr ""
msgid "Set permissions path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:74
+#: nova/virt/hyperv/basevolumeutils.py:75
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
@@ -6764,25 +7040,25 @@ msgstr ""
msgid "get_console_output called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:92
+#: nova/virt/hyperv/hostops.py:93
#, python-format
msgid "Windows version: %s "
msgstr ""
-#: nova/virt/hyperv/hostops.py:104
+#: nova/virt/hyperv/hostops.py:105
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:134 nova/virt/libvirt/driver.py:3325
-#: nova/virt/xenapi/host.py:149
+#: nova/virt/hyperv/hostops.py:135 nova/virt/libvirt/driver.py:3642
+#: nova/virt/xenapi/host.py:148
msgid "Updating host stats"
msgstr ""
-#: nova/virt/hyperv/hostops.py:158
+#: nova/virt/hyperv/hostops.py:159
msgid "get_host_stats called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:173
+#: nova/virt/hyperv/hostops.py:174
#, python-format
msgid "Host IP address is: %s"
msgstr ""
@@ -6792,29 +7068,29 @@ msgstr ""
msgid "The image is not a valid VHD: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:47
+#: nova/virt/hyperv/livemigrationops.py:48
msgid "live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:54
+#: nova/virt/hyperv/livemigrationops.py:55
#, python-format
msgid "Calling live migration recover_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:58
+#: nova/virt/hyperv/livemigrationops.py:59
#, python-format
msgid "Calling live migration post_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:64
+#: nova/virt/hyperv/livemigrationops.py:65
msgid "pre_live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:75
+#: nova/virt/hyperv/livemigrationops.py:76
msgid "post_live_migration_at_destination called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:79
+#: nova/virt/hyperv/livemigrationops.py:80
#, python-format
msgid "compare_cpu called %s"
msgstr ""
@@ -6842,12 +7118,12 @@ msgstr ""
msgid "Live migration networks are not configured on this host"
msgstr ""
-#: nova/virt/hyperv/livemigrationutils.py:67 nova/virt/hyperv/vmutils.py:94
+#: nova/virt/hyperv/livemigrationutils.py:67 nova/virt/hyperv/vmutils.py:95
#, python-format
msgid "VM not found: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationutils.py:69 nova/virt/hyperv/vmutils.py:103
+#: nova/virt/hyperv/livemigrationutils.py:69 nova/virt/hyperv/vmutils.py:104
#, python-format
msgid "Duplicate VM name found: %s"
msgstr ""
@@ -6953,66 +7229,66 @@ msgstr ""
msgid "Failed to create vswitch port %(port_name)s on switch %(vswitch_path)s"
msgstr ""
-#: nova/virt/hyperv/pathutils.py:83
+#: nova/virt/hyperv/pathutils.py:84
#, python-format
msgid "Creating directory: %s"
msgstr ""
-#: nova/virt/hyperv/pathutils.py:88 nova/virt/hyperv/snapshotops.py:115
+#: nova/virt/hyperv/pathutils.py:89 nova/virt/hyperv/snapshotops.py:116
#, python-format
msgid "Removing directory: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:55
+#: nova/virt/hyperv/snapshotops.py:56
#, python-format
msgid "Creating snapshot for instance %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:64
+#: nova/virt/hyperv/snapshotops.py:65
#, python-format
msgid "Getting info for VHD %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:72
+#: nova/virt/hyperv/snapshotops.py:73
#, python-format
msgid "Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:82
+#: nova/virt/hyperv/snapshotops.py:83
#, python-format
msgid "Copying base disk %(src_vhd_path)s to %(dest_base_disk_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:87
+#: nova/virt/hyperv/snapshotops.py:88
#, python-format
msgid ""
"Reconnecting copied base VHD %(dest_base_disk_path)s and diff VHD "
"%(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:93
+#: nova/virt/hyperv/snapshotops.py:94
#, python-format
msgid "Merging base disk %(dest_base_disk_path)s and diff disk %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:98
+#: nova/virt/hyperv/snapshotops.py:99
#, python-format
msgid ""
"Updating Glance image %(image_id)s with content from merged disk "
"%(image_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:104
+#: nova/virt/hyperv/snapshotops.py:105
#, python-format
msgid "Snapshot image %(image_id)s updated for VM %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:108
+#: nova/virt/hyperv/snapshotops.py:109
#, python-format
msgid "Removing snapshot %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:112
+#: nova/virt/hyperv/snapshotops.py:113
#, python-format
msgid "Failed to remove snapshot for VM %s"
msgstr ""
@@ -7022,199 +7298,199 @@ msgstr ""
msgid "Creating vswitch port for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:89
+#: nova/virt/hyperv/vmops.py:90
#, python-format
msgid "VIF driver not found for network_api_class: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:98
+#: nova/virt/hyperv/vmops.py:99
msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:118
+#: nova/virt/hyperv/vmops.py:119
#, python-format
msgid ""
"Creating differencing VHD. Parent: %(base_vhd_path)s, Target: "
"%(boot_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:130
+#: nova/virt/hyperv/vmops.py:131
msgid "Spawning new instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:153
+#: nova/virt/hyperv/vmops.py:154
msgid "Spawn instance failed"
msgstr ""
-#: nova/virt/hyperv/vmops.py:178
+#: nova/virt/hyperv/vmops.py:179
#, python-format
msgid "Creating nic for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:186
+#: nova/virt/hyperv/vmops.py:187
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:189
+#: nova/virt/hyperv/vmops.py:190
#, python-format
msgid "Using config drive for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:202 nova/virt/libvirt/driver.py:1532
+#: nova/virt/hyperv/vmops.py:203 nova/virt/libvirt/driver.py:1848
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:210 nova/virt/libvirt/driver.py:1538
+#: nova/virt/hyperv/vmops.py:211 nova/virt/libvirt/driver.py:1854
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:246
+#: nova/virt/hyperv/vmops.py:247
#, python-format
msgid "Got request to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:259
+#: nova/virt/hyperv/vmops.py:260
#, python-format
msgid "Instance not found: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:265
+#: nova/virt/hyperv/vmops.py:266
#, python-format
msgid "Failed to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:270
+#: nova/virt/hyperv/vmops.py:271
msgid "reboot instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:276
+#: nova/virt/hyperv/vmops.py:277
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:282
+#: nova/virt/hyperv/vmops.py:283
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:289
+#: nova/virt/hyperv/vmops.py:290
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:295
+#: nova/virt/hyperv/vmops.py:296
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:301
+#: nova/virt/hyperv/vmops.py:302
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:307
+#: nova/virt/hyperv/vmops.py:308
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:314
+#: nova/virt/hyperv/vmops.py:315
#, python-format
msgid "Successfully changed state of VM %(vm_name)s to: %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:318
+#: nova/virt/hyperv/vmops.py:319
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:74
+#: nova/virt/hyperv/vmutils.py:75
#, python-format
msgid "Cannot get VM summary data for: %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:151
+#: nova/virt/hyperv/vmutils.py:152
#, python-format
msgid "Creating VM %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:160
+#: nova/virt/hyperv/vmutils.py:161
#, python-format
msgid "Setting memory for vm %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:163
+#: nova/virt/hyperv/vmutils.py:164
#, python-format
msgid "Set vCPUs for vm %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:261
+#: nova/virt/hyperv/vmutils.py:262
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:324
+#: nova/virt/hyperv/vmutils.py:325
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:365
+#: nova/virt/hyperv/vmutils.py:366
#, python-format
msgid "Operation failed with return value: %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:383
+#: nova/virt/hyperv/vmutils.py:384
#, python-format
msgid ""
"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s"
" - %(err_desc)s - Error code: %(err_code)d"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:391
+#: nova/virt/hyperv/vmutils.py:392
#, python-format
msgid "WMI job failed with status %(job_state)d. Error details: %(error)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:395
+#: nova/virt/hyperv/vmutils.py:396
#, python-format
msgid "WMI job failed with status %(job_state)d. No error description available"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:401
+#: nova/virt/hyperv/vmutils.py:402
#, python-format
msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:90
+#: nova/virt/hyperv/volumeops.py:91
#, python-format
msgid "Attach_volume: %(connection_info)s to %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:120
+#: nova/virt/hyperv/volumeops.py:121
#, python-format
msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:122
+#: nova/virt/hyperv/volumeops.py:123
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:136
+#: nova/virt/hyperv/volumeops.py:137
#, python-format
msgid "Detach_volume: %(connection_info)s from %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:145
+#: nova/virt/hyperv/volumeops.py:146
#, python-format
msgid "Detaching physical disk from instance: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:156 nova/virt/libvirt/driver.py:647
+#: nova/virt/hyperv/volumeops.py:157 nova/virt/libvirt/driver.py:893
msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:167 nova/virt/hyperv/volumeops.py:181
+#: nova/virt/hyperv/volumeops.py:168 nova/virt/hyperv/volumeops.py:182
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:170
+#: nova/virt/hyperv/volumeops.py:171
#, python-format
msgid "Device number: %(device_number)s, target lun: %(target_lun)s"
msgstr ""
@@ -7224,204 +7500,250 @@ msgstr ""
msgid "An error has occurred when calling the iscsi initiator: %s"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:121
+#: nova/virt/libvirt/blockinfo.py:123
#, python-format
msgid "Unable to determine disk prefix for %s"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:165
+#: nova/virt/libvirt/blockinfo.py:170
#, python-format
msgid "No free disk device names for prefix '%s'"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:252
+#: nova/virt/libvirt/blockinfo.py:260
#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:354
+#: nova/virt/libvirt/driver.py:339
+#, python-format
+msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:545
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:360
+#: nova/virt/libvirt/driver.py:553
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:381
+#: nova/virt/libvirt/driver.py:570
+#, python-format
+msgid "URI %s does not support events"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:585
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:403 nova/virt/libvirt/driver.py:406
+#: nova/virt/libvirt/driver.py:607 nova/virt/libvirt/driver.py:610
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:424
+#: nova/virt/libvirt/driver.py:628
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:512
+#: nova/virt/libvirt/driver.py:718
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:526
+#: nova/virt/libvirt/driver.py:734
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:531
+#: nova/virt/libvirt/driver.py:739
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:554
+#: nova/virt/libvirt/driver.py:749
+msgid "Instance may be started again."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:758
+msgid "Going to destroy instance again."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:777
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:569
+#: nova/virt/libvirt/driver.py:792
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:583
+#: nova/virt/libvirt/driver.py:813
+msgid "Instance may be still running, destroy it again."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:819
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:601
+#: nova/virt/libvirt/driver.py:847
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:610
+#: nova/virt/libvirt/driver.py:856
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:762
+#: nova/virt/libvirt/driver.py:899
+msgid "Could not determine fibre channel world wide node names"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:906
+msgid "Could not determine fibre channel world wide port names"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:911
+msgid "No Volume Connector found."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1033
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:855
+#: nova/virt/libvirt/driver.py:1057
+msgid "attaching network adapter failed."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1077
+msgid "During detach_interface, instance disappeared."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1081
+msgid "detaching network adapter failed."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1170
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:858
+#: nova/virt/libvirt/driver.py:1173
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:887
+#: nova/virt/libvirt/driver.py:1202
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:899
+#: nova/virt/libvirt/driver.py:1214
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:972
+#: nova/virt/libvirt/driver.py:1287
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:976
+#: nova/virt/libvirt/driver.py:1291
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:1011
+#: nova/virt/libvirt/driver.py:1326
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1019
+#: nova/virt/libvirt/driver.py:1334
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:1061
+#: nova/virt/libvirt/driver.py:1376
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1204
+#: nova/virt/libvirt/driver.py:1519
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1211 nova/virt/powervm/operator.py:289
+#: nova/virt/libvirt/driver.py:1526 nova/virt/powervm/operator.py:219
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1227
+#: nova/virt/libvirt/driver.py:1542
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1264 nova/virt/libvirt/driver.py:1290
+#: nova/virt/libvirt/driver.py:1579 nova/virt/libvirt/driver.py:1605
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:1279
+#: nova/virt/libvirt/driver.py:1594
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1348
+#: nova/virt/libvirt/driver.py:1663
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1352
+#: nova/virt/libvirt/driver.py:1667
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1356 nova/virt/libvirt/driver.py:1360
+#: nova/virt/libvirt/driver.py:1671 nova/virt/libvirt/driver.py:1675
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1426
+#: nova/virt/libvirt/driver.py:1741
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1523
+#: nova/virt/libvirt/driver.py:1839
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:1571
+#: nova/virt/libvirt/driver.py:1887
#, python-format
msgid "Injecting %(inj)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1580
+#: nova/virt/libvirt/driver.py:1896
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1638
+#: nova/virt/libvirt/driver.py:1954
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1644
+#: nova/virt/libvirt/driver.py:1960
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1648
+#: nova/virt/libvirt/driver.py:1964
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1652
+#: nova/virt/libvirt/driver.py:1968
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1668
+#: nova/virt/libvirt/driver.py:1984
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1969
+#: nova/virt/libvirt/driver.py:2294
#, python-format
msgid ""
"Start to_xml instance=%(instance)s network_info=%(network_info)s "
@@ -7429,80 +7751,80 @@ msgid ""
"rescue=%(rescue)sblock_device_info=%(block_device_info)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1985
+#: nova/virt/libvirt/driver.py:2309
#, python-format
msgid "End to_xml instance=%(instance)s xml=%(xml)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2002
+#: nova/virt/libvirt/driver.py:2326
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2169
+#: nova/virt/libvirt/driver.py:2494
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2220
+#: nova/virt/libvirt/driver.py:2547
#, python-format
msgid "List of domains returned by libVirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2222
+#: nova/virt/libvirt/driver.py:2549
#, python-format
msgid "libVirt can't find a domain with id: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2304
+#: nova/virt/libvirt/driver.py:2613
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2387
+#: nova/virt/libvirt/driver.py:2696
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2411
+#: nova/virt/libvirt/driver.py:2720
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2415
+#: nova/virt/libvirt/driver.py:2724
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2531
+#: nova/virt/libvirt/driver.py:2858
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2539
+#: nova/virt/libvirt/driver.py:2866
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2576
+#: nova/virt/libvirt/driver.py:2903
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2601
+#: nova/virt/libvirt/driver.py:2928
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2613
+#: nova/virt/libvirt/driver.py:2940
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -7512,62 +7834,62 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2630
+#: nova/virt/libvirt/driver.py:2957
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2678
+#: nova/virt/libvirt/driver.py:3005
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2750
+#: nova/virt/libvirt/driver.py:3077
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2843
+#: nova/virt/libvirt/driver.py:3170
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2953
+#: nova/virt/libvirt/driver.py:3280
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error"
" Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2970
+#: nova/virt/libvirt/driver.py:3297
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:3019
+#: nova/virt/libvirt/driver.py:3335
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3067
+#: nova/virt/libvirt/driver.py:3381
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:3126
+#: nova/virt/libvirt/driver.py:3440
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:3132
+#: nova/virt/libvirt/driver.py:3446
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3188
+#: nova/virt/libvirt/driver.py:3502
msgid "Starting finish_revert_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3301
+#: nova/virt/libvirt/driver.py:3615
#, python-format
msgid "Checking instance files accessability%(instance_path)s"
msgstr ""
@@ -7586,25 +7908,25 @@ msgstr ""
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:209
+#: nova/virt/libvirt/firewall.py:247
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:232
+#: nova/virt/libvirt/firewall.py:270
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:248
+#: nova/virt/libvirt/firewall.py:286
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:236
+#: nova/virt/libvirt/imagebackend.py:278
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:315
+#: nova/virt/libvirt/imagebackend.py:363
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
@@ -7624,140 +7946,148 @@ msgstr ""
msgid "Writing stored info to %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:294
+#: nova/virt/libvirt/imagecache.py:299
#, python-format
msgid "%s is a valid instance name"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:297
+#: nova/virt/libvirt/imagecache.py:302
#, python-format
msgid "%s has a disk file"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:299
+#: nova/virt/libvirt/imagecache.py:304
#, python-format
msgid "Instance %(instance)s is backed by %(backing)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:312
+#: nova/virt/libvirt/imagecache.py:317
#, python-format
msgid ""
"Instance %(instance)s is using a backing file %(backing)s which does not "
"appear in the image service"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:388
+#: nova/virt/libvirt/imagecache.py:393
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:398
+#: nova/virt/libvirt/imagecache.py:403
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash "
"stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:407
+#: nova/virt/libvirt/imagecache.py:412
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:422
+#: nova/virt/libvirt/imagecache.py:427
#, python-format
msgid "Cannot remove %(base_file)s, it does not exist"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:434
+#: nova/virt/libvirt/imagecache.py:439
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:437
+#: nova/virt/libvirt/imagecache.py:442
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:444
+#: nova/virt/libvirt/imagecache.py:449
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:455
+#: nova/virt/libvirt/imagecache.py:460
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:479
+#: nova/virt/libvirt/imagecache.py:484
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
"%(remote)d on other nodes sharing this instance storage"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:491
+#: nova/virt/libvirt/imagecache.py:496
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): warning -- an absent base file is in "
"use! instances: %(instance_list)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:503
+#: nova/virt/libvirt/imagecache.py:508
#, python-format
msgid "image %(id)s at (%(base_file)s): image is not in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:510
+#: nova/virt/libvirt/imagecache.py:515
#, python-format
msgid "image %(id)s at (%(base_file)s): image is in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:534
+#: nova/virt/libvirt/imagecache.py:539
#, python-format
msgid "Skipping verification, no base directory at %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:538
+#: nova/virt/libvirt/imagecache.py:543
msgid "Verify base images"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:545
+#: nova/virt/libvirt/imagecache.py:550
#, python-format
msgid "Image id %(id)s yields fingerprint %(fingerprint)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:563
+#: nova/virt/libvirt/imagecache.py:568
#, python-format
msgid "Unknown base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:568
+#: nova/virt/libvirt/imagecache.py:573
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:571
+#: nova/virt/libvirt/imagecache.py:576
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:575
+#: nova/virt/libvirt/imagecache.py:580
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:583
+#: nova/virt/libvirt/imagecache.py:588
msgid "Verification complete"
msgstr ""
-#: nova/virt/libvirt/utils.py:124
+#: nova/virt/libvirt/utils.py:67
+msgid "systool is not installed"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:71
+msgid "Cannot find any Fibre Channel HBAs"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:213
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
msgstr ""
-#: nova/virt/libvirt/utils.py:133
+#: nova/virt/libvirt/utils.py:222
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. "
@@ -7765,90 +8095,137 @@ msgid ""
"%(free_space)db."
msgstr ""
-#: nova/virt/libvirt/utils.py:183
+#: nova/virt/libvirt/utils.py:253
#, python-format
msgid "vg %s must be LVM volume group"
msgstr ""
-#: nova/virt/libvirt/utils.py:210
+#: nova/virt/libvirt/utils.py:282
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:483
+#: nova/virt/libvirt/utils.py:555
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/vif.py:276 nova/virt/libvirt/vif.py:395
-#: nova/virt/libvirt/vif.py:482
+#: nova/virt/libvirt/vif.py:263 nova/virt/libvirt/vif.py:382
+#: nova/virt/libvirt/vif.py:469
#, python-format
msgid ""
"vif_type=%(vif_type)s instance=%(instance)s network=%(network)s "
"mapping=%(mapping)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:282 nova/virt/libvirt/vif.py:401
-#: nova/virt/libvirt/vif.py:488
+#: nova/virt/libvirt/vif.py:269 nova/virt/libvirt/vif.py:388
+#: nova/virt/libvirt/vif.py:475
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
-#: nova/virt/libvirt/vif.py:303 nova/virt/libvirt/vif.py:414
-#: nova/virt/libvirt/vif.py:501
+#: nova/virt/libvirt/vif.py:290 nova/virt/libvirt/vif.py:401
+#: nova/virt/libvirt/vif.py:488
#, python-format
msgid "Unexpected vif_type=%s"
msgstr ""
-#: nova/virt/libvirt/vif.py:315
+#: nova/virt/libvirt/vif.py:302
#, python-format
msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:325
+#: nova/virt/libvirt/vif.py:312
#, python-format
msgid "Ensuring bridge %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:431 nova/virt/libvirt/vif.py:460
+#: nova/virt/libvirt/vif.py:418 nova/virt/libvirt/vif.py:447
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/volume.py:228
+#: nova/virt/libvirt/volume.py:237
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:231
+#: nova/virt/libvirt/volume.py:240
#, python-format
msgid ""
"ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try "
"number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:243
+#: nova/virt/libvirt/volume.py:252
#, python-format
msgid "Found iSCSI node %(disk_dev)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/volume.py:316 nova/virt/libvirt/volume.py:437
+#: nova/virt/libvirt/volume.py:488 nova/virt/libvirt/volume.py:609
#, python-format
msgid "%s is already mounted"
msgstr ""
-#: nova/virt/libvirt/volume.py:372
+#: nova/virt/libvirt/volume.py:544
#, python-format
msgid "AoE device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:374
+#: nova/virt/libvirt/volume.py:546
#, python-format
msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:388
+#: nova/virt/libvirt/volume.py:560
#, python-format
msgid "Found AoE device %(aoedevpath)s (after %(tries)s rediscover)"
msgstr ""
+#: nova/virt/libvirt/volume.py:688
+msgid "We are unable to locate any Fibre Channel devices"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:697
+#, python-format
+msgid "Looking for Fibre Channel dev %(device)s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:707
+msgid "Fibre Channel device not found."
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:710
+#, python-format
+msgid ""
+"Fibre volume not yet found at: %(mount_device)s. Will rescan & retry. "
+"Try number: %(tries)s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:726
+#, python-format
+msgid "Found Fibre Channel volume %(mount_device)s (after %(tries)s rescans)"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:733
+#, python-format
+msgid "Multipath device discovered %(device)s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:799
+msgid "Value required for 'scality_sofs_config'"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:810
+#, python-format
+msgid "Cannot access 'scality_sofs_config': %s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:816
+msgid "Cannot execute /sbin/mount.sofs"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:831
+msgid "Cannot mount Scality SOFS, check syslog for errors"
+msgstr ""
+
#: nova/virt/libvirt/volume_nfs.py:36
msgid ""
"The nova.virt.libvirt.volume_nfs.NfsVolumeDriver class is deprecated and "
@@ -7857,71 +8234,71 @@ msgid ""
"nova.virt.libvirt.volume.LibvirtNFSVolumeDriver."
msgstr ""
-#: nova/virt/powervm/blockdev.py:73
+#: nova/virt/powervm/blockdev.py:143
#, python-format
msgid "Removing the logical volume '%s'"
msgstr ""
-#: nova/virt/powervm/blockdev.py:91
+#: nova/virt/powervm/blockdev.py:161
#, python-format
msgid "Fetching image '%s' from glance"
msgstr ""
-#: nova/virt/powervm/blockdev.py:96
+#: nova/virt/powervm/blockdev.py:166
#, python-format
msgid "Using image found at '%s'"
msgstr ""
-#: nova/virt/powervm/blockdev.py:98
+#: nova/virt/powervm/blockdev.py:168
#, python-format
msgid "Ensuring image '%s' exists on IVM"
msgstr ""
-#: nova/virt/powervm/blockdev.py:109
+#: nova/virt/powervm/blockdev.py:179
#, python-format
msgid "Creating logical volume of size %s bytes"
msgstr ""
-#: nova/virt/powervm/blockdev.py:112
+#: nova/virt/powervm/blockdev.py:182
#, python-format
msgid "Copying image to the device '%s'"
msgstr ""
-#: nova/virt/powervm/blockdev.py:115
+#: nova/virt/powervm/blockdev.py:185
msgid "Error while creating logical volume from image. Will attempt cleanup."
msgstr ""
-#: nova/virt/powervm/blockdev.py:122
+#: nova/virt/powervm/blockdev.py:192
msgid "Error while attempting cleanup of failed deploy to logical volume."
msgstr ""
-#: nova/virt/powervm/blockdev.py:158
+#: nova/virt/powervm/blockdev.py:228
msgid "Snapshot added to glance."
msgstr ""
-#: nova/virt/powervm/blockdev.py:164
+#: nova/virt/powervm/blockdev.py:234
#, python-format
msgid "Failed to clean up snapshot file %(snapshot_file_path)s"
msgstr ""
-#: nova/virt/powervm/blockdev.py:249
+#: nova/virt/powervm/blockdev.py:319
msgid "Could not create logical volume. No space left on any volume group."
msgstr ""
-#: nova/virt/powervm/blockdev.py:337 nova/virt/powervm/blockdev.py:409
+#: nova/virt/powervm/blockdev.py:407 nova/virt/powervm/blockdev.py:479
msgid "Unable to get checksum"
msgstr ""
-#: nova/virt/powervm/blockdev.py:340 nova/virt/powervm/blockdev.py:432
+#: nova/virt/powervm/blockdev.py:410 nova/virt/powervm/blockdev.py:502
msgid "Image checksums do not match"
msgstr ""
-#: nova/virt/powervm/blockdev.py:361
+#: nova/virt/powervm/blockdev.py:431
#, python-format
msgid "Image found on host at '%s'"
msgstr ""
-#: nova/virt/powervm/blockdev.py:369
+#: nova/virt/powervm/blockdev.py:439
msgid "Uncompressed image file not found"
msgstr ""
@@ -8034,79 +8411,77 @@ msgid ""
"Continuing without storing"
msgstr ""
-#: nova/virt/powervm/operator.py:101
+#: nova/virt/powervm/operator.py:99
#, python-format
msgid "LPAR instance '%s' not found"
msgstr ""
-#: nova/virt/powervm/operator.py:185 nova/virt/powervm/operator.py:394
-#: nova/virt/powervm/operator.py:436
-msgid "Not enough free memory in the host"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:195 nova/virt/powervm/operator.py:405
-#: nova/virt/powervm/operator.py:442
-msgid "Insufficient available CPU on PowerVM"
+#: nova/virt/powervm/operator.py:186
+#, python-format
+msgid "PowerVM image creation failed: %s"
msgstr ""
-#: nova/virt/powervm/operator.py:241 nova/virt/powervm/operator.py:273
+#: nova/virt/powervm/operator.py:200
#, python-format
msgid "Creating LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:246 nova/virt/powervm/operator.py:275
+#: nova/virt/powervm/operator.py:202
#, python-format
msgid "LPAR instance '%s' creation failed"
msgstr ""
-#: nova/virt/powervm/operator.py:263
-#, python-format
-msgid "PowerVM image creation failed: %s"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:280
+#: nova/virt/powervm/operator.py:207
#, python-format
msgid "Activating the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:294
+#: nova/virt/powervm/operator.py:224
#, python-format
msgid "Instance '%s' failed to boot"
msgstr ""
-#: nova/virt/powervm/operator.py:306
+#: nova/virt/powervm/operator.py:236
msgid "Error while attempting to clean up failed instance launch."
msgstr ""
-#: nova/virt/powervm/operator.py:310
+#: nova/virt/powervm/operator.py:240
#, python-format
msgid "Instance spawned in %s seconds"
msgstr ""
-#: nova/virt/powervm/operator.py:321
+#: nova/virt/powervm/operator.py:251
#, python-format
msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
msgstr ""
-#: nova/virt/powervm/operator.py:337
+#: nova/virt/powervm/operator.py:267
#, python-format
msgid "Stopping instance %s for snapshot."
msgstr ""
-#: nova/virt/powervm/operator.py:360
+#: nova/virt/powervm/operator.py:290
#, python-format
msgid "Shutting down the instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:369
+#: nova/virt/powervm/operator.py:295
#, python-format
msgid "Deleting the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:372
+#: nova/virt/powervm/operator.py:307
msgid "PowerVM instance cleanup failed"
msgstr ""
+#: nova/virt/powervm/operator.py:329 nova/virt/powervm/operator.py:385
+msgid "Not enough free memory in the host"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:340 nova/virt/powervm/operator.py:391
+msgid "Insufficient available CPU on PowerVM"
+msgstr ""
+
#: nova/virt/vmwareapi/driver.py:137
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
@@ -8226,99 +8601,99 @@ msgstr ""
msgid "Exception during HTTP connection close in VMwareHTTPWrite. Exception is %s"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:84
+#: nova/virt/vmwareapi/vim.py:85
msgid "Unable to import suds."
msgstr ""
-#: nova/virt/vmwareapi/vim.py:90
+#: nova/virt/vmwareapi/vim.py:91
msgid "Must specify vmwareapi_wsdl_loc"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:141
+#: nova/virt/vmwareapi/vim.py:142
#, python-format
msgid "No such SOAP method '%s' provided by VI SDK"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:146
+#: nova/virt/vmwareapi/vim.py:147
#, python-format
msgid "httplib error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:153
+#: nova/virt/vmwareapi/vim.py:154
#, python-format
msgid "Socket error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:158
+#: nova/virt/vmwareapi/vim.py:159
#, python-format
msgid "Type error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:162
+#: nova/virt/vmwareapi/vim.py:163
#, python-format
msgid "Exception in %s "
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:94
+#: nova/virt/vmwareapi/vmops.py:96
msgid "Getting list of instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:110
+#: nova/virt/vmwareapi/vmops.py:112
#, python-format
msgid "Got total of %s instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:205
+#: nova/virt/vmwareapi/vmops.py:207
msgid "Creating VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:213
+#: nova/virt/vmwareapi/vmops.py:215
msgid "Created VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:237
+#: nova/virt/vmwareapi/vmops.py:239
#, python-format
msgid ""
"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter "
"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:256
+#: nova/virt/vmwareapi/vmops.py:258
#, python-format
msgid ""
"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB and type "
"%(disk_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:265
+#: nova/virt/vmwareapi/vmops.py:267
#, python-format
msgid ""
"Deleting the file %(vmdk_path)s on the ESX host localstore "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:279
+#: nova/virt/vmwareapi/vmops.py:281
#, python-format
msgid ""
"Deleted the file %(vmdk_path)s on the ESX host local store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:287
+#: nova/virt/vmwareapi/vmops.py:289
#, python-format
msgid ""
"Downloading image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:307
+#: nova/virt/vmwareapi/vmops.py:309
#, python-format
msgid ""
"Downloaded image file data %(image_ref)s to %(upload_vmdk_name)s on the "
"ESX data store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:321
+#: nova/virt/vmwareapi/vmops.py:323
#, python-format
msgid ""
"Copying Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter type"
@@ -8326,281 +8701,281 @@ msgid ""
" type %(disk_type)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:342
+#: nova/virt/vmwareapi/vmops.py:344
#, python-format
msgid ""
"Copied Virtual Disk of size %(vmdk_file_size_in_kb)s KB and type "
"%(disk_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:421
+#: nova/virt/vmwareapi/vmops.py:423
msgid "Powering on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:427
+#: nova/virt/vmwareapi/vmops.py:429
msgid "Powered on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:473
+#: nova/virt/vmwareapi/vmops.py:475
msgid "Creating Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:483
+#: nova/virt/vmwareapi/vmops.py:485
msgid "Created Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:528
+#: nova/virt/vmwareapi/vmops.py:530
msgid "Copying disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:541
+#: nova/virt/vmwareapi/vmops.py:543
msgid "Copied disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:550
+#: nova/virt/vmwareapi/vmops.py:552
#, python-format
msgid "Uploading image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:564
+#: nova/virt/vmwareapi/vmops.py:566
#, python-format
msgid "Uploaded image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:577
+#: nova/virt/vmwareapi/vmops.py:579
#, python-format
msgid "Deleting temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:586
+#: nova/virt/vmwareapi/vmops.py:588
#, python-format
msgid "Deleted temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:618
+#: nova/virt/vmwareapi/vmops.py:620
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:625
+#: nova/virt/vmwareapi/vmops.py:627
msgid "Rebooting guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:628
+#: nova/virt/vmwareapi/vmops.py:630
msgid "Rebooted guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:630
+#: nova/virt/vmwareapi/vmops.py:632
msgid "Doing hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:634
+#: nova/virt/vmwareapi/vmops.py:636
msgid "Did hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:646 nova/virt/vmwareapi/vmops.py:678
-#: nova/virt/vmwareapi/vmops.py:971
+#: nova/virt/vmwareapi/vmops.py:648 nova/virt/vmwareapi/vmops.py:680
+#: nova/virt/vmwareapi/vmops.py:973
msgid "instance not present"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:652 nova/virt/vmwareapi/vmops.py:975
+#: nova/virt/vmwareapi/vmops.py:654 nova/virt/vmwareapi/vmops.py:977
msgid "Destroying the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:657 nova/virt/vmwareapi/vmops.py:980
+#: nova/virt/vmwareapi/vmops.py:659 nova/virt/vmwareapi/vmops.py:982
msgid "Destroyed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:659
+#: nova/virt/vmwareapi/vmops.py:661
#, python-format
msgid "In vmwareapi:vmops:delete, got this exception while destroying the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:697 nova/virt/vmwareapi/vmops.py:850
+#: nova/virt/vmwareapi/vmops.py:699 nova/virt/vmwareapi/vmops.py:852
msgid "Powering off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:702 nova/virt/vmwareapi/vmops.py:855
+#: nova/virt/vmwareapi/vmops.py:704 nova/virt/vmwareapi/vmops.py:857
msgid "Powered off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:706
+#: nova/virt/vmwareapi/vmops.py:708
msgid "Unregistering the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:709
+#: nova/virt/vmwareapi/vmops.py:711
msgid "Unregistered the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:711
+#: nova/virt/vmwareapi/vmops.py:713
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:724
+#: nova/virt/vmwareapi/vmops.py:726
#, python-format
msgid "Deleting contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:736
+#: nova/virt/vmwareapi/vmops.py:738
#, python-format
msgid "Deleted contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:741
+#: nova/virt/vmwareapi/vmops.py:743
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
"contents from the disk: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:749
+#: nova/virt/vmwareapi/vmops.py:751
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:753
+#: nova/virt/vmwareapi/vmops.py:755
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:767
+#: nova/virt/vmwareapi/vmops.py:769
msgid "Suspending the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:771
+#: nova/virt/vmwareapi/vmops.py:773
msgid "Suspended the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:774
+#: nova/virt/vmwareapi/vmops.py:776
msgid "instance is powered off and cannot be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:777
+#: nova/virt/vmwareapi/vmops.py:779
msgid "VM was already in suspended state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:790
+#: nova/virt/vmwareapi/vmops.py:792
msgid "Resuming the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:795
+#: nova/virt/vmwareapi/vmops.py:797
msgid "Resumed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:797
+#: nova/virt/vmwareapi/vmops.py:799
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:858
+#: nova/virt/vmwareapi/vmops.py:860
msgid "instance is suspended and cannot be powered off."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:861
+#: nova/virt/vmwareapi/vmops.py:863
msgid "VM was already in powered off state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:874
+#: nova/virt/vmwareapi/vmops.py:876
msgid "VM was already in powered on state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:878
+#: nova/virt/vmwareapi/vmops.py:880
msgid "Powering on the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:883
+#: nova/virt/vmwareapi/vmops.py:885
msgid "Powered on the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:900
+#: nova/virt/vmwareapi/vmops.py:902
#, python-format
msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:931
+#: nova/virt/vmwareapi/vmops.py:933
#, python-format
msgid "Renaming the VM to %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:937
+#: nova/virt/vmwareapi/vmops.py:939
#, python-format
msgid "Renamed the VM to %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:952
+#: nova/virt/vmwareapi/vmops.py:954
#, python-format
msgid "Cloning VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:960
+#: nova/virt/vmwareapi/vmops.py:962
#, python-format
msgid "Cloned VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:982
+#: nova/virt/vmwareapi/vmops.py:984
#, python-format
msgid ""
"In vmwareapi:vmops:confirm_migration, got this exception while destroying"
" the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:997
+#: nova/virt/vmwareapi/vmops.py:999
#, python-format
msgid "Renaming the VM from %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1003
+#: nova/virt/vmwareapi/vmops.py:1005
#, python-format
msgid "Renamed the VM from %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1026
+#: nova/virt/vmwareapi/vmops.py:1028
#, python-format
msgid "Migrating VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1038
+#: nova/virt/vmwareapi/vmops.py:1040
#, python-format
msgid "Migrated VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1048 nova/virt/xenapi/vmops.py:1263
+#: nova/virt/vmwareapi/vmops.py:1050 nova/virt/xenapi/vmops.py:1263
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1052
+#: nova/virt/vmwareapi/vmops.py:1054
#, python-format
msgid "Automatically hard rebooting %d"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1088
+#: nova/virt/vmwareapi/vmops.py:1090
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1170
+#: nova/virt/vmwareapi/vmops.py:1172
msgid "Reconfiguring VM instance to set the machine id"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1176
+#: nova/virt/vmwareapi/vmops.py:1178
msgid "Reconfigured VM instance to set the machine id"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1190
+#: nova/virt/vmwareapi/vmops.py:1192
#, python-format
msgid "Reconfiguring VM instance to enable vnc on port - %(port)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1197
+#: nova/virt/vmwareapi/vmops.py:1199
#, python-format
msgid "Reconfigured VM instance to enable vnc on port - %(port)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1290
+#: nova/virt/vmwareapi/vmops.py:1292
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1296
+#: nova/virt/vmwareapi/vmops.py:1298
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -8721,103 +9096,103 @@ msgstr ""
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:87 nova/virt/xenapi/vmops.py:1506
+#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1506
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:91 nova/virt/xenapi/vmops.py:1510
+#: nova/virt/xenapi/agent.py:92 nova/virt/xenapi/vmops.py:1510
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:96 nova/virt/xenapi/vmops.py:1515
+#: nova/virt/xenapi/agent.py:97 nova/virt/xenapi/vmops.py:1515
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:106
+#: nova/virt/xenapi/agent.py:107
#, python-format
msgid ""
"The agent call to %(method)s returned an invalid response: %(ret)r. "
"path=%(path)s; args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:116
+#: nova/virt/xenapi/agent.py:117
#, python-format
msgid "Failed to query agent version: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:135
+#: nova/virt/xenapi/agent.py:136
msgid "Querying agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:149
+#: nova/virt/xenapi/agent.py:150
msgid "Reached maximum time attempting to query agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:157
+#: nova/virt/xenapi/agent.py:158
#, python-format
msgid "Updating agent to %s"
msgstr ""
-#: nova/virt/xenapi/agent.py:165
+#: nova/virt/xenapi/agent.py:166
#, python-format
msgid "Failed to update agent: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:179
+#: nova/virt/xenapi/agent.py:180
msgid "Setting admin password"
msgstr ""
-#: nova/virt/xenapi/agent.py:190
+#: nova/virt/xenapi/agent.py:191
#, python-format
msgid "Failed to exchange keys: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:210
+#: nova/virt/xenapi/agent.py:211
#, python-format
msgid "Failed to update password: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:227
+#: nova/virt/xenapi/agent.py:228
#, python-format
msgid "Injecting file path: %r"
msgstr ""
-#: nova/virt/xenapi/agent.py:240
+#: nova/virt/xenapi/agent.py:241
#, python-format
msgid "Failed to inject file: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:247
+#: nova/virt/xenapi/agent.py:248
msgid "Resetting network"
msgstr ""
-#: nova/virt/xenapi/agent.py:253
+#: nova/virt/xenapi/agent.py:254
#, python-format
msgid "Failed to reset network: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:276
+#: nova/virt/xenapi/agent.py:277
msgid ""
"XenServer tools installed in this image are capable of network injection."
" Networking files will not bemanipulated"
msgstr ""
-#: nova/virt/xenapi/agent.py:284
+#: nova/virt/xenapi/agent.py:285
msgid ""
"XenServer tools are present in this image but are not capable of network "
"injection"
msgstr ""
-#: nova/virt/xenapi/agent.py:288
+#: nova/virt/xenapi/agent.py:289
msgid "XenServer tools are not installed in this image"
msgstr ""
-#: nova/virt/xenapi/agent.py:340
+#: nova/virt/xenapi/agent.py:341
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
@@ -8887,38 +9262,38 @@ msgid ""
"with the wrong number of arguments"
msgstr ""
-#: nova/virt/xenapi/host.py:69
+#: nova/virt/xenapi/host.py:68
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
" assuming it is a worker VM and skip ping migration to a new host"
msgstr ""
-#: nova/virt/xenapi/host.py:81
+#: nova/virt/xenapi/host.py:80
#, python-format
msgid "Aggregate for host %(host)s count not be found."
msgstr ""
-#: nova/virt/xenapi/host.py:102
+#: nova/virt/xenapi/host.py:101
#, python-format
msgid "Unable to migrate VM %(vm_ref)sfrom %(host)s"
msgstr ""
-#: nova/virt/xenapi/host.py:157
+#: nova/virt/xenapi/host.py:156
#, python-format
msgid "Unable to get SR for this host: %s"
msgstr ""
-#: nova/virt/xenapi/host.py:192
+#: nova/virt/xenapi/host.py:191
#, python-format
msgid "Failed to extract instance support from %s"
msgstr ""
-#: nova/virt/xenapi/host.py:209
+#: nova/virt/xenapi/host.py:208
msgid "Unable to get updated status"
msgstr ""
-#: nova/virt/xenapi/host.py:212
+#: nova/virt/xenapi/host.py:211
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr ""
@@ -8938,54 +9313,54 @@ msgstr ""
msgid "Found no network for bridge %s"
msgstr ""
-#: nova/virt/xenapi/pool.py:70
+#: nova/virt/xenapi/pool.py:71
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: unrecoverable state during operation on "
"%(host)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:157
+#: nova/virt/xenapi/pool.py:158
#, python-format
msgid "Unable to eject %(host)s from the pool; pool not empty"
msgstr ""
-#: nova/virt/xenapi/pool.py:174
+#: nova/virt/xenapi/pool.py:175
#, python-format
msgid "Unable to eject %(host)s from the pool; No master found"
msgstr ""
-#: nova/virt/xenapi/pool.py:191
+#: nova/virt/xenapi/pool.py:192
#, python-format
msgid "Pool-Join failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:194
+#: nova/virt/xenapi/pool.py:195
#, python-format
msgid "Unable to join %(host)s in the pool"
msgstr ""
-#: nova/virt/xenapi/pool.py:210
+#: nova/virt/xenapi/pool.py:211
#, python-format
msgid "Pool-eject failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:222
+#: nova/virt/xenapi/pool.py:223
#, python-format
msgid "Unable to set up pool: %(e)s."
msgstr ""
-#: nova/virt/xenapi/pool.py:233
+#: nova/virt/xenapi/pool.py:234
#, python-format
msgid "Pool-set_name_label failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/vif.py:102
+#: nova/virt/xenapi/vif.py:103
#, python-format
msgid "Found no PIF for device %s"
msgstr ""
-#: nova/virt/xenapi/vif.py:121
+#: nova/virt/xenapi/vif.py:122
#, python-format
msgid ""
"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. "
@@ -9684,7 +10059,7 @@ msgstr ""
msgid "Skipping detach because VBD for %(instance_name)s was not found"
msgstr ""
-#: nova/virt/xenapi/imageupload/glance.py:34
+#: nova/virt/xenapi/imageupload/glance.py:35
#, python-format
msgid "Asking xapi to upload to glance %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 29b882a02..5c20f9ce8 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -31,6 +31,7 @@ from nova import db
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -41,9 +42,9 @@ LOG = logging.getLogger(__name__)
linux_net_opts = [
- cfg.StrOpt('dhcpbridge_flagfile',
- default='/etc/nova/nova-dhcpbridge.conf',
- help='location of flagfile for dhcpbridge'),
+ cfg.MultiStrOpt('dhcpbridge_flagfile',
+ default=['/etc/nova/nova-dhcpbridge.conf'],
+ help='location of flagfiles for dhcpbridge'),
cfg.StrOpt('networks_path',
default=paths.state_path_def('networks'),
help='Location to keep network config files'),
@@ -994,7 +995,7 @@ def restart_dhcp(context, dev, network_ref):
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['env',
- 'CONFIG_FILE=%s' % CONF.dhcpbridge_flagfile,
+ 'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index a177c1ac0..851a544dc 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -72,6 +72,7 @@ quantum_opts = [
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
+CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index d0d6b5f99..82263f85f 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -166,7 +166,8 @@ class NetworkAPI(rpc_proxy.RpcProxy):
return self.call(ctxt, self.make_msg('allocate_for_instance',
instance_id=instance_id, project_id=project_id, host=host,
rxtx_factor=rxtx_factor, vpn=vpn,
- requested_networks=requested_networks, macs=macs),
+ requested_networks=requested_networks,
+ macs=jsonutils.to_primitive(macs)),
topic=topic, version='1.9')
def deallocate_for_instance(self, ctxt, instance_id, project_id, host):
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
deleted file mode 100644
index c35dcb845..000000000
--- a/nova/openstack/common/cfg.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config.cfg import *
diff --git a/nova/openstack/common/db/api.py b/nova/openstack/common/db/api.py
new file mode 100644
index 000000000..90a200875
--- /dev/null
+++ b/nova/openstack/common/db/api.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Multiple DB API backend support.
+
+Supported configuration options:
+
+`db_backend`: DB backend name or full module path to DB backend module.
+`dbapi_use_tpool`: Enable thread pooling of DB API calls.
+
+A DB backend module should implement a method named 'get_backend' which
+takes no arguments. The method can return any object that implements DB
+API methods.
+
+*NOTE*: There are bugs in eventlet when using tpool combined with
+threading locks. The python logging module happens to use such locks. To
+work around this issue, be sure to specify thread=False with
+eventlet.monkey_patch().
+
+A bug for eventlet has been filed here:
+
+https://bitbucket.org/eventlet/eventlet/issue/137/
+"""
+import functools
+
+from oslo.config import cfg
+
+from nova.openstack.common import lockutils
+from nova.openstack.common import importutils
+
+
+db_opts = [
+ cfg.StrOpt('db_backend',
+ default='sqlalchemy',
+ help='The backend to use for db'),
+ cfg.BoolOpt('dbapi_use_tpool',
+ default=False,
+ help='Enable the experimental use of thread pooling for '
+ 'all DB API calls')
+]
+
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
+
+
+class DBAPI(object):
+ def __init__(self, backend_mapping=None):
+ if backend_mapping is None:
+ backend_mapping = {}
+ self.__backend = None
+ self.__backend_mapping = backend_mapping
+
+ @lockutils.synchronized('dbapi_backend', 'nova-')
+ def __get_backend(self):
+ """Get the actual backend. May be a module or an instance of
+ a class. Doesn't matter to us. We do this synchronized as it's
+ possible multiple greenthreads started very quickly trying to do
+ DB calls and eventlet can switch threads before self.__backend gets
+ assigned.
+ """
+ if self.__backend:
+ # Another thread assigned it
+ return self.__backend
+ backend_name = CONF.db_backend
+ self.__use_tpool = CONF.dbapi_use_tpool
+ if self.__use_tpool:
+ from eventlet import tpool
+ self.__tpool = tpool
+ # Import the untranslated name if we don't have a
+ # mapping.
+ backend_path = self.__backend_mapping.get(backend_name,
+ backend_name)
+ backend_mod = importutils.import_module(backend_path)
+ self.__backend = backend_mod.get_backend()
+ return self.__backend
+
+ def __getattr__(self, key):
+ backend = self.__backend or self.__get_backend()
+ attr = getattr(backend, key)
+ if not self.__use_tpool or not hasattr(attr, '__call__'):
+ return attr
+
+ def tpool_wrapper(*args, **kwargs):
+ return self.__tpool.execute(attr, *args, **kwargs)
+
+ functools.update_wrapper(tpool_wrapper, attr)
+ return tpool_wrapper
diff --git a/nova/openstack/common/db/exception.py b/nova/openstack/common/db/exception.py
new file mode 100644
index 000000000..61ba1b3a9
--- /dev/null
+++ b/nova/openstack/common/db/exception.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""DB related custom exceptions."""
+
+from nova.openstack.common.gettextutils import _
+
+
+class DBError(Exception):
+ """Wraps an implementation specific exception."""
+ def __init__(self, inner_exception=None):
+ self.inner_exception = inner_exception
+ super(DBError, self).__init__(str(inner_exception))
+
+
+class DBDuplicateEntry(DBError):
+ """Wraps an implementation specific exception."""
+ def __init__(self, columns=[], inner_exception=None):
+ self.columns = columns
+ super(DBDuplicateEntry, self).__init__(inner_exception)
+
+
+class DBDeadlock(DBError):
+ def __init__(self, inner_exception=None):
+ super(DBDeadlock, self).__init__(inner_exception)
+
+
+class DBInvalidUnicodeParameter(Exception):
+ message = _("Invalid Parameter: "
+ "Unicode is not supported by the current database.")
diff --git a/nova/openstack/common/db/sqlalchemy/session.py b/nova/openstack/common/db/sqlalchemy/session.py
index 93bfa9b15..cf6713581 100644
--- a/nova/openstack/common/db/sqlalchemy/session.py
+++ b/nova/openstack/common/db/sqlalchemy/session.py
@@ -244,23 +244,19 @@ import os.path
import re
import time
-from eventlet import db_pool
from eventlet import greenthread
from oslo.config import cfg
-from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
+from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-from nova.openstack.common import importutils
+from nova.openstack.common.db import exception
from nova.openstack.common import log as logging
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
-MySQLdb = importutils.try_import('MySQLdb')
-if MySQLdb is not None:
- from MySQLdb.constants import CLIENT as mysql_client_constants
sql_opts = [
cfg.StrOpt('sql_connection',
@@ -303,9 +299,6 @@ sql_opts = [
cfg.BoolOpt('sql_connection_trace',
default=False,
help='Add python stack traces to SQL as comment strings'),
- cfg.BoolOpt('sql_dbpool_enable',
- default=False,
- help="enable the use of eventlet's db_pool for MySQL"),
]
CONF = cfg.CONF
@@ -335,25 +328,6 @@ def get_session(autocommit=True, expire_on_commit=False):
return session
-class DBError(Exception):
- """Wraps an implementation specific exception."""
- def __init__(self, inner_exception=None):
- self.inner_exception = inner_exception
- super(DBError, self).__init__(str(inner_exception))
-
-
-class DBDuplicateEntry(DBError):
- """Wraps an implementation specific exception."""
- def __init__(self, columns=[], inner_exception=None):
- self.columns = columns
- super(DBDuplicateEntry, self).__init__(inner_exception)
-
-
-class InvalidUnicodeParameter(Exception):
- message = _("Invalid Parameter: "
- "Unicode is not supported by the current database.")
-
-
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
@@ -372,7 +346,7 @@ class InvalidUnicodeParameter(Exception):
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
-_RE_DB = {
+_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
@@ -398,7 +372,7 @@ def raise_if_duplicate_entry_error(integrity_error, engine_name):
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
- m = _RE_DB[engine_name].match(integrity_error.message)
+ m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
@@ -407,7 +381,32 @@ def raise_if_duplicate_entry_error(integrity_error, engine_name):
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
- raise DBDuplicateEntry(columns, integrity_error)
+ raise exception.DBDuplicateEntry(columns, integrity_error)
+
+
+# NOTE(comstud): In current versions of DB backends, Deadlock violation
+# messages follow the structure:
+#
+# mysql:
+# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
+# 'restarting transaction') <query_str> <query_args>
+_DEADLOCK_RE_DB = {
+ "mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
+}
+
+
+def raise_if_deadlock_error(operational_error, engine_name):
+ """
+ Raise DBDeadlock exception if OperationalError contains a Deadlock
+ condition.
+ """
+ re = _DEADLOCK_RE_DB.get(engine_name)
+ if re is None:
+ return
+ m = re.match(operational_error.message)
+ if not m:
+ return
+ raise exception.DBDeadlock(operational_error)
def wrap_db_error(f):
@@ -415,21 +414,26 @@ def wrap_db_error(f):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
- raise InvalidUnicodeParameter()
+ raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
- except IntegrityError, e:
+ except sqla_exc.OperationalError, e:
+ raise_if_deadlock_error(e, get_engine().name)
+ # NOTE(comstud): A lot of code is checking for OperationalError
+ # so let's not wrap it for now.
+ raise
+ except sqla_exc.IntegrityError, e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
raise_if_duplicate_entry_error(e, get_engine().name)
- raise DBError(e)
+ raise exception.DBError(e)
except Exception, e:
LOG.exception(_('DB exception wrapped.'))
- raise DBError(e)
+ raise exception.DBError(e)
_wrap.func_name = f.func_name
return _wrap
@@ -479,7 +483,7 @@ def ping_listener(dbapi_conn, connection_rec, connection_proxy):
except dbapi_conn.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
- raise DisconnectionError("Database server went away")
+ raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
@@ -517,33 +521,6 @@ def create_engine(sql_connection):
if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
- elif all((CONF.sql_dbpool_enable, MySQLdb,
- "mysql" in connection_dict.drivername)):
- LOG.info(_("Using mysql/eventlet db_pool."))
- # MySQLdb won't accept 'None' in the password field
- password = connection_dict.password or ''
- pool_args = {
- 'db': connection_dict.database,
- 'passwd': password,
- 'host': connection_dict.host,
- 'user': connection_dict.username,
- 'min_size': CONF.sql_min_pool_size,
- 'max_size': CONF.sql_max_pool_size,
- 'max_idle': CONF.sql_idle_timeout,
- 'client_flag': mysql_client_constants.FOUND_ROWS}
-
- pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
-
- def creator():
- conn = pool.create()
- if isinstance(conn, tuple):
- # NOTE(belliott) eventlet >= 0.10 returns a tuple
- now, now, conn = conn
-
- return conn
-
- engine_args['creator'] = creator
-
else:
engine_args['pool_size'] = CONF.sql_max_pool_size
if CONF.sql_max_overflow is not None:
@@ -567,7 +544,7 @@ def create_engine(sql_connection):
try:
engine.connect()
- except OperationalError, e:
+ except sqla_exc.OperationalError, e:
if not is_db_connection_error(e.args[0]):
raise
@@ -583,7 +560,7 @@ def create_engine(sql_connection):
try:
engine.connect()
break
- except OperationalError, e:
+ except sqla_exc.OperationalError, e:
if (remaining != 'infinite' and remaining == 0) or \
not is_db_connection_error(e.args[0]):
raise
diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index 61ceded43..8b81ebf8e 100644
--- a/nova/openstack/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -51,12 +51,20 @@ def _print_greenthreads():
print
+def _print_nativethreads():
+ for threadId, stack in sys._current_frames().items():
+ print threadId
+ traceback.print_stack(stack)
+ print
+
+
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
+ 'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
diff --git a/nova/openstack/common/processutils.py b/nova/openstack/common/processutils.py
new file mode 100644
index 000000000..297f911c4
--- /dev/null
+++ b/nova/openstack/common/processutils.py
@@ -0,0 +1,135 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+System-level utilities and helper functions.
+"""
+
+import logging
+import random
+import shlex
+
+from eventlet.green import subprocess
+from eventlet import greenthread
+
+from nova.openstack.common.gettextutils import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+class UnknownArgumentError(Exception):
+ def __init__(self, message=None):
+ super(UnknownArgumentError, self).__init__(message)
+
+
+class ProcessExecutionError(Exception):
+ def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
+ description=None):
+ if description is None:
+ description = "Unexpected error while running command."
+ if exit_code is None:
+ exit_code = '-'
+ message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
+ % (description, cmd, exit_code, stdout, stderr))
+ super(ProcessExecutionError, self).__init__(message)
+
+
+def execute(*cmd, **kwargs):
+ """
+ Helper method to shell out and execute a command through subprocess with
+ optional retry.
+
+ :param cmd: Passed to subprocess.Popen.
+ :type cmd: string
+ :param process_input: Send to opened process.
+ :type proces_input: string
+ :param check_exit_code: Defaults to 0. Will raise
+ :class:`ProcessExecutionError`
+ if the command exits without returning this value
+ as a returncode
+ :type check_exit_code: int
+ :param delay_on_retry: True | False. Defaults to True. If set to True,
+ wait a short amount of time before retrying.
+ :type delay_on_retry: boolean
+ :param attempts: How many times to retry cmd.
+ :type attempts: int
+ :param run_as_root: True | False. Defaults to False. If set to True,
+ the command is prefixed by the command specified
+ in the root_helper kwarg.
+ :type run_as_root: boolean
+ :param root_helper: command to prefix all cmd's with
+ :type root_helper: string
+ :returns: (stdout, stderr) from process execution
+ :raises: :class:`UnknownArgumentError` on
+ receiving unknown arguments
+ :raises: :class:`ProcessExecutionError`
+ """
+
+ process_input = kwargs.pop('process_input', None)
+ check_exit_code = kwargs.pop('check_exit_code', 0)
+ delay_on_retry = kwargs.pop('delay_on_retry', True)
+ attempts = kwargs.pop('attempts', 1)
+ run_as_root = kwargs.pop('run_as_root', False)
+ root_helper = kwargs.pop('root_helper', '')
+ if len(kwargs):
+ raise UnknownArgumentError(_('Got unknown keyword args '
+ 'to utils.execute: %r') % kwargs)
+ if run_as_root:
+ cmd = shlex.split(root_helper) + list(cmd)
+ cmd = map(str, cmd)
+
+ while attempts > 0:
+ attempts -= 1
+ try:
+ LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
+ _PIPE = subprocess.PIPE # pylint: disable=E1101
+ obj = subprocess.Popen(cmd,
+ stdin=_PIPE,
+ stdout=_PIPE,
+ stderr=_PIPE,
+ close_fds=True)
+ result = None
+ if process_input is not None:
+ result = obj.communicate(process_input)
+ else:
+ result = obj.communicate()
+ obj.stdin.close() # pylint: disable=E1101
+ _returncode = obj.returncode # pylint: disable=E1101
+ if _returncode:
+ LOG.debug(_('Result was %s') % _returncode)
+ if (isinstance(check_exit_code, int) and
+ not isinstance(check_exit_code, bool) and
+ _returncode != check_exit_code):
+ (stdout, stderr) = result
+ raise ProcessExecutionError(exit_code=_returncode,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=' '.join(cmd))
+ return result
+ except ProcessExecutionError:
+ if not attempts:
+ raise
+ else:
+ LOG.debug(_('%r failed. Retrying.'), cmd)
+ if delay_on_retry:
+ greenthread.sleep(random.randint(20, 200) / 100.0)
+ finally:
+ # NOTE(termie): this appears to be necessary to let the subprocess
+ # call clean something up in between calls, without
+ # it two execute calls in a row hangs the second one
+ greenthread.sleep(0)
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index ff72c3f8e..991820b7c 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -34,6 +34,7 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import local
+
LOG = logging.getLogger(__name__)
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index 178e3fc19..55fc5b044 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -49,8 +49,8 @@ deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
- 'nova.version': <RPC Envelope Version as a String>,
- 'nova.message': <Application Message Payload, JSON encoded>
+ 'oslo.version': <RPC Envelope Version as a String>,
+ 'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
@@ -66,8 +66,8 @@ to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
-_VERSION_KEY = 'nova.version'
-_MESSAGE_KEY = 'nova.message'
+_VERSION_KEY = 'oslo.version'
+_MESSAGE_KEY = 'oslo.message'
# TODO(russellb) Turn this on after Grizzly.
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 5e1846f91..180685cf3 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -198,6 +198,7 @@ class DirectConsumer(ConsumerBase):
"""
# Default options
options = {'durable': False,
+ 'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 20a7f923d..c1cca34e8 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -216,12 +216,18 @@ class ZmqClient(object):
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
- def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
+ def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
- if serialize:
- data = rpc_common.serialize_msg(data, force_envelope)
- self.outq.send(map(bytes, (msg_id, topic, 'cast', _serialize(data))))
+ if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
+ self.outq.send(map(bytes,
+ (msg_id, topic, 'cast', _serialize(data))))
+ return
+
+ rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
+ zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
+ self.outq.send(map(bytes,
+ (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
@@ -320,7 +326,7 @@ class ConsumerBase(object):
else:
return [result]
- def process(self, style, target, proxy, ctx, data):
+ def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
@@ -432,12 +438,14 @@ class ZmqProxy(ZmqBaseReactor):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
- msg_id, topic, style, in_msg = data
- topic = topic.split('.', 1)[0]
+ topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
- if topic.startswith('fanout~') or topic.startswith('zmq_replies'):
+ if topic.startswith('fanout~'):
+ sock_type = zmq.PUB
+ topic = topic.split('.', 1)[0]
+ elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
@@ -520,6 +528,21 @@ class ZmqProxy(ZmqBaseReactor):
super(ZmqProxy, self).consume_in_thread()
+def unflatten_envelope(packenv):
+ """Unflattens the RPC envelope.
+ Takes a list and returns a dictionary.
+ i.e. [1,2,3,4] => {1: 2, 3: 4}
+ """
+ i = iter(packenv)
+ h = {}
+ try:
+ while True:
+ k = i.next()
+ h[k] = i.next()
+ except StopIteration:
+ return h
+
+
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
@@ -540,38 +563,50 @@ class ZmqReactor(ZmqBaseReactor):
self.mapping[sock].send(data)
return
- msg_id, topic, style, in_msg = data
+ proxy = self.proxies[sock]
- ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
- ctx = RpcContext.unmarshal(ctx)
+ if data[2] == 'cast': # Legacy protocol
+ packenv = data[3]
- proxy = self.proxies[sock]
+ ctx, msg = _deserialize(packenv)
+ request = rpc_common.deserialize_msg(msg)
+ ctx = RpcContext.unmarshal(ctx)
+ elif data[2] == 'impl_zmq_v2':
+ packenv = data[4:]
+
+ msg = unflatten_envelope(packenv)
+ request = rpc_common.deserialize_msg(msg)
+
+ # Unmarshal only after verifying the message.
+ ctx = RpcContext.unmarshal(data[3])
+ else:
+ LOG.error(_("ZMQ Envelope version unsupported or unknown."))
+ return
- self.pool.spawn_n(self.process, style, topic,
- proxy, ctx, request)
+ self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
+ self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
- # Only consume on the base topic name.
- topic = topic.split('.', 1)[0]
-
- LOG.info(_("Create Consumer for topic (%(topic)s)") %
- {'topic': topic})
-
# Subscription scenarios
if fanout:
- subscribe = ('', fanout)[type(fanout) == str]
sock_type = zmq.SUB
- topic = 'fanout~' + topic
+ subscribe = ('', fanout)[type(fanout) == str]
+ topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
+ topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
+
+ if topic in self.topics:
+ LOG.info(_("Skipping topic registration. Already registered."))
+ return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
@@ -582,9 +617,11 @@ class Connection(rpc_common.Connection):
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
+ self.topics.append(topic)
def close(self):
self.reactor.close()
+ self.topics = []
def wait(self):
self.reactor.wait()
@@ -593,8 +630,8 @@ class Connection(rpc_common.Connection):
self.reactor.consume_in_thread()
-def _cast(addr, context, topic, msg, timeout=None, serialize=True,
- force_envelope=False, _msg_id=None):
+def _cast(addr, context, topic, msg, timeout=None, envelope=False,
+ _msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
@@ -603,7 +640,7 @@ def _cast(addr, context, topic, msg, timeout=None, serialize=True,
conn = ZmqClient(addr)
# assumes cast can't return an exception
- conn.cast(_msg_id, topic, payload, serialize, force_envelope)
+ conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
@@ -612,7 +649,7 @@ def _cast(addr, context, topic, msg, timeout=None, serialize=True,
def _call(addr, context, topic, msg, timeout=None,
- serialize=True, force_envelope=False):
+ envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
@@ -642,20 +679,31 @@ def _call(addr, context, topic, msg, timeout=None,
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
- "ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
+ "ipc://%s/zmq_topic_zmq_replies.%s" %
+ (CONF.rpc_zmq_ipc_dir,
+ CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
- _cast(addr, context, topic, payload,
- serialize=serialize, force_envelope=force_envelope)
+ _cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
- responses = _deserialize(msg[-1])[-1]['args']['response']
+
+ if msg[2] == 'cast': # Legacy version
+ raw_msg = _deserialize(msg[-1])[-1]
+ elif msg[2] == 'impl_zmq_v2':
+ rpc_envelope = unflatten_envelope(msg[4:])
+ raw_msg = rpc_common.deserialize_msg(rpc_envelope)
+ else:
+ raise rpc_common.UnsupportedRpcEnvelopeVersion(
+ _("Unsupported or unknown ZMQ envelope returned."))
+
+ responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
@@ -676,8 +724,8 @@ def _call(addr, context, topic, msg, timeout=None,
return responses[-1]
-def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
- force_envelope=False, _msg_id=None):
+def _multi_send(method, context, topic, msg, timeout=None,
+ envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
@@ -703,11 +751,11 @@ def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
- _topic, msg, timeout, serialize,
- force_envelope, _msg_id)
+ _topic, msg, timeout, envelope,
+ _msg_id)
return
return method(_addr, context, _topic, msg, timeout,
- serialize, force_envelope)
+ envelope)
def create_connection(conf, new=True):
@@ -746,8 +794,7 @@ def notify(conf, context, topic, msg, **kwargs):
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
- kwargs['serialize'] = kwargs.pop('envelope')
- kwargs['force_envelope'] = True
+ kwargs['envelope'] = kwargs.get('envelope', True)
cast(conf, context, topic, msg, **kwargs)
diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py
index 92b49f274..24963b7fb 100644
--- a/nova/servicegroup/drivers/zk.py
+++ b/nova/servicegroup/drivers/zk.py
@@ -19,16 +19,17 @@
import os
import eventlet
-import evzookeeper
-from evzookeeper import membership
from oslo.config import cfg
-import zookeeper
from nova import exception
+from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.servicegroup import api
from nova import utils
+evzookeeper = importutils.try_import('evzookeeper')
+membership = importutils.try_import('evzookeeper.membersip')
+zookeeper = importutils.try_import('zookeeper')
zk_driver_opts = [
cfg.StrOpt('address',
@@ -58,16 +59,18 @@ class ZooKeeperDriver(api.ServiceGroupDriver):
def __init__(self, *args, **kwargs):
"""Create the zk session object."""
+ if not all([evzookeeper, membership, zookeeper]):
+ raise ImportError('zookeeper module not found')
null = open(os.devnull, "w")
- self._session = evzookeeper.ZKSession(CONF.zk.address,
+ self._session = evzookeeper.ZKSession(CONF.zookeeper.address,
recv_timeout=
- CONF.zk.recv_timeout,
+ CONF.zookeeper.recv_timeout,
zklog_fd=null)
self._memberships = {}
self._monitors = {}
# Make sure the prefix exists
try:
- self._session.create(CONF.zk.sg_prefix, "",
+ self._session.create(CONF.zookeeper.sg_prefix, "",
acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
except zookeeper.NodeExistsException:
pass
@@ -82,7 +85,7 @@ class ZooKeeperDriver(api.ServiceGroupDriver):
member = self._memberships.get((group, member_id), None)
if member is None:
# the first time to join. Generate a new object
- path = "%s/%s" % (CONF.zk.sg_prefix, group)
+ path = "%s/%s" % (CONF.zookeeper.sg_prefix, group)
try:
member = membership.Membership(self._session, path, member_id)
except RuntimeError:
@@ -90,7 +93,7 @@ class ZooKeeperDriver(api.ServiceGroupDriver):
"another node exists with the same name, or "
"this node just restarted. We will try "
"again in a short while to make sure."))
- eventlet.sleep(CONF.zk.sg_retry_interval)
+ eventlet.sleep(CONF.zookeeper.sg_retry_interval)
member = membership.Membership(self._session, path, member_id)
self._memberships[(group, member_id)] = member
return FakeLoopingCall(self, member_id, group)
@@ -120,7 +123,7 @@ class ZooKeeperDriver(api.ServiceGroupDriver):
"""
monitor = self._monitors.get(group_id, None)
if monitor is None:
- path = "%s/%s" % (CONF.zk.sg_prefix, group_id)
+ path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id)
monitor = membership.MembershipMonitor(self._session, path)
self._monitors[group_id] = monitor
# Note(maoy): When initialized for the first time, it takes a
diff --git a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
index 381d452a7..fc4ae8122 100644
--- a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
@@ -67,6 +67,17 @@ class BareMetalNodesTest(test.TestCase):
self.mox.ReplayAll()
self.controller.delete(self.request, 1)
+ def test_delete_node_not_found(self):
+ self.mox.StubOutWithMock(db, 'bm_node_destroy')
+ db.bm_node_destroy(self.context, 1).\
+ AndRaise(exception.NodeNotFound(node_id=1))
+ self.mox.ReplayAll()
+ self.assertRaises(
+ exc.HTTPNotFound,
+ self.controller.delete,
+ self.request,
+ 1)
+
def test_index(self):
nodes = [{'id': 1},
{'id': 2},
@@ -78,7 +89,7 @@ class BareMetalNodesTest(test.TestCase):
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get_all(self.context).AndReturn(nodes)
db.bm_interface_get_all_by_bm_node_id(self.context, 1).\
- AndRaise(exception.InstanceNotFound(instance_id=1))
+ AndRaise(exception.NodeNotFound(node_id=1))
db.bm_interface_get_all_by_bm_node_id(self.context, 2).\
AndReturn(interfaces)
self.mox.ReplayAll()
@@ -103,6 +114,19 @@ class BareMetalNodesTest(test.TestCase):
self.assertEqual(node_id, res_dict['node']['id'])
self.assertEqual(2, len(res_dict['node']['interfaces']))
+ def test_show_no_interfaces(self):
+ node_id = 1
+ node = {'id': node_id}
+ self.mox.StubOutWithMock(db, 'bm_node_get')
+ self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
+ db.bm_node_get(self.context, node_id).AndReturn(node)
+ db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
+ AndRaise(exception.NodeNotFound(node_id=node_id))
+ self.mox.ReplayAll()
+ res_dict = self.controller.show(self.request, node_id)
+ self.assertEqual(node_id, res_dict['node']['id'])
+ self.assertEqual(0, len(res_dict['node']['interfaces']))
+
def test_add_interface(self):
node_id = 1
address = '11:22:33:44:55:66'
@@ -174,7 +198,7 @@ class BareMetalNodesTest(test.TestCase):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id).\
- AndRaise(exception.InstanceNotFound(instance_id=node_id))
+ AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
body = {'add_interface': {'address': '11:11:11:11:11:11'}}
self.assertRaises(exc.HTTPNotFound,
@@ -187,7 +211,7 @@ class BareMetalNodesTest(test.TestCase):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id).\
- AndRaise(exception.InstanceNotFound(instance_id=node_id))
+ AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
body = {'remove_interface': {'address': '11:11:11:11:11:11'}}
self.assertRaises(exc.HTTPNotFound,
diff --git a/nova/tests/api/openstack/compute/contrib/test_image_size.py b/nova/tests/api/openstack/compute/contrib/test_image_size.py
new file mode 100644
index 000000000..886bccfa7
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_image_size.py
@@ -0,0 +1,130 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import image_size
+from nova.image import glance
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+IMAGES = [{
+ 'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 10,
+ 'minRam': 128,
+ 'size': 12345678,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/123",
+ }],
+ },
+ {
+ 'id': '124',
+ 'name': 'queued snapshot',
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'size': 87654321,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/124",
+ }],
+ }]
+
+
+def fake_show(*args, **kwargs):
+ return IMAGES[0]
+
+
+def fake_detail(*args, **kwargs):
+ return IMAGES
+
+
+class ImageSizeTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IMG-SIZE'
+
+ def setUp(self):
+ super(ImageSizeTest, self).setUp()
+ self.stubs.Set(glance.GlanceImageService, 'show', fake_show)
+ self.stubs.Set(glance.GlanceImageService, 'detail', fake_detail)
+ self.flags(osapi_compute_extension=['nova.api.openstack.compute'
+ '.contrib.image_size.Image_size'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app())
+ return res
+
+ def _get_image(self, body):
+ return jsonutils.loads(body).get('image')
+
+ def _get_images(self, body):
+ return jsonutils.loads(body).get('images')
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(image.get('%s:size' % self.prefix), size)
+
+ def test_show(self):
+ url = '/v2/fake/images/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ image = self._get_image(res.body)
+ self.assertImageSize(image, 12345678)
+
+ def test_detail(self):
+ url = '/v2/fake/images/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ images = self._get_images(res.body)
+ self.assertImageSize(images[0], 12345678)
+ self.assertImageSize(images[1], 87654321)
+
+
+class ImageSizeXmlTest(ImageSizeTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % image_size.Image_size.namespace
+
+ def _get_image(self, body):
+ return etree.XML(body)
+
+ def _get_images(self, body):
+ return etree.XML(body).getchildren()
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(int(image.get('%ssize' % self.prefix)), size)
diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
index fa0c521fe..a890abe6f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py
+++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
@@ -18,71 +18,12 @@ import webob
from nova.api.openstack.compute.contrib import volumes
from nova import context
-from nova import exception
from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import cinder
-LOG = logging.getLogger(__name__)
-
-_last_param = {}
-
-
-def _get_default_snapshot_param():
- return {
- 'id': 123,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': None,
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- }
-
-
-def stub_snapshot_create(self, context, volume_id, name, description):
- global _last_param
- snapshot = _get_default_snapshot_param()
- snapshot['volume_id'] = volume_id
- snapshot['display_name'] = name
- snapshot['display_description'] = description
-
- LOG.debug(_("_create: %s"), snapshot)
- _last_param = snapshot
- return snapshot
-
-
-def stub_snapshot_delete(self, context, snapshot):
- global _last_param
- _last_param = snapshot
-
- LOG.debug(_("_delete: %s"), locals())
- if snapshot['id'] != '123':
- raise exception.NotFound
-
-
-def stub_snapshot_get(self, context, snapshot_id):
- global _last_param
- _last_param = dict(snapshot_id=snapshot_id)
-
- LOG.debug(_("_get: %s"), locals())
- if snapshot_id != '123':
- raise exception.NotFound
-
- param = _get_default_snapshot_param()
- param['id'] = snapshot_id
- return param
-
-
-def stub_snapshot_get_all(self, context):
- LOG.debug(_("_get_all: %s"), locals())
- param = _get_default_snapshot_param()
- param['id'] = 123
- return [param]
-
class SnapshotApiTest(test.TestCase):
def setUp(self):
@@ -90,14 +31,14 @@ class SnapshotApiTest(test.TestCase):
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "create_snapshot",
- stub_snapshot_create)
+ fakes.stub_snapshot_create)
self.stubs.Set(cinder.API, "create_snapshot_force",
- stub_snapshot_create)
+ fakes.stub_snapshot_create)
self.stubs.Set(cinder.API, "delete_snapshot",
- stub_snapshot_delete)
- self.stubs.Set(cinder.API, "get_snapshot", stub_snapshot_get)
+ fakes.stub_snapshot_delete)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
self.stubs.Set(cinder.API, "get_all_snapshots",
- stub_snapshot_get_all)
+ fakes.stub_snapshot_get_all)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.flags(
osapi_compute_extension=[
@@ -108,9 +49,6 @@ class SnapshotApiTest(test.TestCase):
self.app = fakes.wsgi_app(init_only=('os-snapshots',))
def test_snapshot_create(self):
- global _last_param
- _last_param = {}
-
snapshot = {"volume_id": 12,
"force": False,
"display_name": "Snapshot Test Name",
@@ -122,16 +60,8 @@ class SnapshotApiTest(test.TestCase):
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
- LOG.debug(_("test_snapshot_create: param=%s"), _last_param)
self.assertEqual(resp.status_int, 200)
-
- # Compare if parameters were correctly passed to stub
- self.assertEqual(_last_param['display_name'], "Snapshot Test Name")
- self.assertEqual(_last_param['display_description'],
- "Snapshot Test Desc")
-
resp_dict = jsonutils.loads(resp.body)
- LOG.debug(_("test_snapshot_create: resp_dict=%s"), resp_dict)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['displayName'],
snapshot['display_name'])
@@ -139,9 +69,6 @@ class SnapshotApiTest(test.TestCase):
snapshot['display_description'])
def test_snapshot_create_force(self):
- global _last_param
- _last_param = {}
-
snapshot = {"volume_id": 12,
"force": True,
"display_name": "Snapshot Test Name",
@@ -153,16 +80,9 @@ class SnapshotApiTest(test.TestCase):
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
- LOG.debug(_("test_snapshot_create_force: param=%s"), _last_param)
self.assertEqual(resp.status_int, 200)
- # Compare if parameters were correctly passed to stub
- self.assertEqual(_last_param['display_name'], "Snapshot Test Name")
- self.assertEqual(_last_param['display_description'],
- "Snapshot Test Desc")
-
resp_dict = jsonutils.loads(resp.body)
- LOG.debug(_("test_snapshot_create_force: resp_dict=%s"), resp_dict)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['displayName'],
snapshot['display_name'])
@@ -179,60 +99,41 @@ class SnapshotApiTest(test.TestCase):
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
- LOG.debug(_("test_snapshot_create_force: param=%s"), _last_param)
self.assertEqual(resp.status_int, 400)
def test_snapshot_delete(self):
- global _last_param
- _last_param = {}
-
snapshot_id = 123
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
- self.assertEqual(str(_last_param['id']), str(snapshot_id))
def test_snapshot_delete_invalid_id(self):
- global _last_param
- _last_param = {}
-
- snapshot_id = 234
+ snapshot_id = -1
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
- self.assertEqual(str(_last_param['snapshot_id']), str(snapshot_id))
def test_snapshot_show(self):
- global _last_param
- _last_param = {}
-
snapshot_id = 123
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'GET'
resp = req.get_response(self.app)
- LOG.debug(_("test_snapshot_show: resp=%s"), resp)
self.assertEqual(resp.status_int, 200)
- self.assertEqual(str(_last_param['snapshot_id']), str(snapshot_id))
-
resp_dict = jsonutils.loads(resp.body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['id'], str(snapshot_id))
def test_snapshot_show_invalid_id(self):
- global _last_param
- _last_param = {}
-
- snapshot_id = 234
+ snapshot_id = -1
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'GET'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
- self.assertEqual(str(_last_param['snapshot_id']), str(snapshot_id))
def test_snapshot_detail(self):
req = webob.Request.blank('/v2/fake/os-snapshots/detail')
@@ -241,13 +142,12 @@ class SnapshotApiTest(test.TestCase):
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
- LOG.debug(_("test_snapshot_detail: resp_dict=%s"), resp_dict)
self.assertTrue('snapshots' in resp_dict)
resp_snapshots = resp_dict['snapshots']
- self.assertEqual(len(resp_snapshots), 1)
+ self.assertEqual(len(resp_snapshots), 3)
resp_snapshot = resp_snapshots.pop()
- self.assertEqual(resp_snapshot['id'], 123)
+ self.assertEqual(resp_snapshot['id'], 102)
class SnapshotSerializerTest(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index bd3fde1f3..68e5e1b99 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -185,6 +185,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"FloatingIpsBulk",
"Fox In Socks",
"Hosts",
+ "ImageSize",
"InstanceActions",
"Keypairs",
"Multinic",
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index beb99a3f5..c906dae7f 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -227,7 +227,7 @@ def _make_image_fixtures():
# Public image
add_fixture(id=image_id, name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
- min_ram="128", min_disk="10")
+ min_ram="128", min_disk="10", size='25165824')
image_id += 1
# Snapshot for User 1
@@ -238,7 +238,7 @@ def _make_image_fixtures():
'deleted', 'pending_delete'):
add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
- properties=snapshot_properties)
+ properties=snapshot_properties, size='25165824')
image_id += 1
# Image without a name
@@ -597,11 +597,23 @@ def stub_snapshot(id, **kwargs):
return snapshot
-def stub_snapshot_get_all(self):
+def stub_snapshot_create(self, context, volume_id, name, description):
+ return stub_snapshot(100, volume_id=volume_id, display_name=name,
+ display_description=description)
+
+
+def stub_snapshot_delete(self, context, snapshot):
+ if snapshot['id'] == '-1':
+ raise exc.NotFound
+
+
+def stub_snapshot_get(self, context, snapshot_id):
+ if snapshot_id == '-1':
+ raise exc.NotFound
+ return stub_snapshot(snapshot_id)
+
+
+def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
-
-
-def stub_snapshot_get_all_by_project(self, context):
- return [stub_snapshot(1)]
diff --git a/nova/tests/api/test_sizelimit.py b/nova/tests/api/test_sizelimit.py
index 862a0d65f..9e7a33d29 100644
--- a/nova/tests/api/test_sizelimit.py
+++ b/nova/tests/api/test_sizelimit.py
@@ -13,6 +13,7 @@
# under the License.
from oslo.config import cfg
+import StringIO
import webob
import nova.api.sizelimit
@@ -22,6 +23,52 @@ CONF = cfg.CONF
MAX_REQUEST_BODY_SIZE = CONF.osapi_max_request_body_size
+class TestLimitingReader(test.TestCase):
+
+ def test_limiting_reader(self):
+ BYTES = 1024
+ bytes_read = 0
+ data = StringIO.StringIO("*" * BYTES)
+ for chunk in nova.api.sizelimit.LimitingReader(data, BYTES):
+ bytes_read += len(chunk)
+
+ self.assertEquals(bytes_read, BYTES)
+
+ bytes_read = 0
+ data = StringIO.StringIO("*" * BYTES)
+ reader = nova.api.sizelimit.LimitingReader(data, BYTES)
+ byte = reader.read(1)
+ while len(byte) != 0:
+ bytes_read += 1
+ byte = reader.read(1)
+
+ self.assertEquals(bytes_read, BYTES)
+
+ def test_limiting_reader_fails(self):
+ BYTES = 1024
+
+ def _consume_all_iter():
+ bytes_read = 0
+ data = StringIO.StringIO("*" * BYTES)
+ for chunk in nova.api.sizelimit.LimitingReader(data, BYTES - 1):
+ bytes_read += len(chunk)
+
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ _consume_all_iter)
+
+ def _consume_all_read():
+ bytes_read = 0
+ data = StringIO.StringIO("*" * BYTES)
+ reader = nova.api.sizelimit.LimitingReader(data, BYTES - 1)
+ byte = reader.read(1)
+ while len(byte) != 0:
+ bytes_read += 1
+ byte = reader.read(1)
+
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ _consume_all_read)
+
+
class TestRequestBodySizeLimiter(test.TestCase):
def setUp(self):
@@ -29,7 +76,7 @@ class TestRequestBodySizeLimiter(test.TestCase):
@webob.dec.wsgify()
def fake_app(req):
- return webob.Response()
+ return webob.Response(req.body)
self.middleware = nova.api.sizelimit.RequestBodySizeLimiter(fake_app)
self.request = webob.Request.blank('/', method='POST')
@@ -40,12 +87,14 @@ class TestRequestBodySizeLimiter(test.TestCase):
response = self.request.get_response(self.middleware)
self.assertEqual(response.status_int, 200)
- def test_content_length_to_large(self):
+ def test_content_length_too_large(self):
self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1
+ self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1)
response = self.request.get_response(self.middleware)
- self.assertEqual(response.status_int, 400)
+ self.assertEqual(response.status_int, 413)
- def test_request_to_large(self):
+ def test_request_too_large_no_content_length(self):
self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1)
+ self.request.headers['Content-Length'] = None
response = self.request.get_response(self.middleware)
- self.assertEqual(response.status_int, 400)
+ self.assertEqual(response.status_int, 413)
diff --git a/nova/tests/baremetal/db/test_bm_interface.py b/nova/tests/baremetal/db/test_bm_interface.py
index 32beb1ce0..e870ec5e0 100644
--- a/nova/tests/baremetal/db/test_bm_interface.py
+++ b/nova/tests/baremetal/db/test_bm_interface.py
@@ -18,7 +18,7 @@ Bare-metal DB testcase for BareMetalInterface
"""
from nova import exception
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base
from nova.virt.baremetal import db
@@ -28,7 +28,7 @@ class BareMetalInterfaceTestCase(base.BMDBTestCase):
def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
- self.assertRaises(db_session.DBError,
+ self.assertRaises(db_exc.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
diff --git a/nova/tests/baremetal/db/test_bm_pxe_ip.py b/nova/tests/baremetal/db/test_bm_pxe_ip.py
index 9820f3af0..fe8ba5b3e 100644
--- a/nova/tests/baremetal/db/test_bm_pxe_ip.py
+++ b/nova/tests/baremetal/db/test_bm_pxe_ip.py
@@ -18,7 +18,7 @@ Bare-metal DB testcase for BareMetalPxeIp
"""
from nova import exception
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base
from nova.tests.baremetal.db import utils
from nova.virt.baremetal import db
@@ -51,14 +51,14 @@ class BareMetalPxeIpTestCase(base.BMDBTestCase):
# address duplicates
i = utils.new_bm_pxe_ip(address='10.1.1.1',
server_address='10.1.1.201')
- self.assertRaises(db_session.DBError,
+ self.assertRaises(db_exc.DBError,
db.bm_pxe_ip_create_direct,
self.context, i)
# server_address duplicates
i = utils.new_bm_pxe_ip(address='10.1.1.3',
server_address='10.1.1.101')
- self.assertRaises(db_session.DBError,
+ self.assertRaises(db_exc.DBError,
db.bm_pxe_ip_create_direct,
self.context, i)
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
index 8e23908f4..bf0a4740b 100644
--- a/nova/tests/baremetal/test_driver.py
+++ b/nova/tests/baremetal/test_driver.py
@@ -152,7 +152,7 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
- def test_macs_for_instance(self):
+ def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index d9e41bc67..4f4c9f7db 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -27,7 +27,7 @@ from oslo.config import cfg
from testtools import matchers
from nova import exception
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
@@ -529,7 +529,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
AndRaise(exception.NovaException)
bm_utils.unlink_without_raise(pxe_path)
self.driver._collect_mac_addresses(self.context, self.node).\
- AndRaise(db_session.DBError)
+ AndRaise(db_exc.DBError)
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
self.mox.ReplayAll()
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index e19470db5..5ad333c9e 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1420,6 +1420,54 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
+ def test_validate_console_port_vnc(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ def fake_driver_get_console(*args, **kwargs):
+ return {'host': "fake_host", 'port': "5900",
+ 'internal_access_path': None}
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(self.context,
+ instance,
+ "5900",
+ "novnc"))
+
+ def test_validate_console_port_spice(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ def fake_driver_get_console(*args, **kwargs):
+ return {'host': "fake_host", 'port': "5900",
+ 'internal_access_path': None}
+ self.stubs.Set(self.compute.driver, "get_spice_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(self.context,
+ instance,
+ "5900",
+ "spice-html5"))
+
+ def test_validate_console_port_wrong_port(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ def fake_driver_get_console(*args, **kwargs):
+ return {'host': "fake_host", 'port': "5900",
+ 'internal_access_path': None}
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.assertFalse(self.compute.validate_console_port(self.context,
+ instance,
+ "wrongport",
+ "spice-html5"))
+
def test_xvpvnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
@@ -1715,6 +1763,25 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance),
bdms={})
+ def test_delete_instance_deletes_console_auth_tokens(self):
+ instance = self._create_fake_instance()
+ self.flags(vnc_enabled=True)
+
+ self.tokens_deleted = False
+
+ def fake_delete_tokens(*args, **kwargs):
+ self.tokens_deleted = True
+
+ cauth_rpcapi = self.compute.consoleauth_rpcapi
+ self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
+ fake_delete_tokens)
+
+ self.compute._delete_instance(self.context,
+ instance=jsonutils.to_primitive(instance),
+ bdms={})
+
+ self.assertTrue(self.tokens_deleted)
+
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
which is propagated up from the underlying driver.
@@ -2035,10 +2102,11 @@ class ComputeTestCase(BaseTestCase):
new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
new_connection_data['target_iqn'] = new_iqn
- def fake_init_conn(self, context, volume, session):
+ def fake_init_conn_with_data(self, context, volume, session):
connection_info['data'] = new_connection_data
return connection_info
- self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
+ self.stubs.Set(cinder.API, "initialize_connection",
+ fake_init_conn_with_data)
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
@@ -5735,7 +5803,8 @@ class ComputeAPITestCase(BaseTestCase):
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
- 'internal_access_path': 'fake_access_path'}
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid']}
fake_connect_info2 = copy.deepcopy(fake_connect_info)
fake_connect_info2['access_url'] = 'fake_console_url'
@@ -5747,7 +5816,7 @@ class ComputeAPITestCase(BaseTestCase):
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
rpc_msg2 = {'method': 'authorize_console',
'args': fake_connect_info,
- 'version': '1.0'}
+ 'version': '1.2'}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
@@ -5779,7 +5848,8 @@ class ComputeAPITestCase(BaseTestCase):
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
- 'internal_access_path': 'fake_access_path'}
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid']}
fake_connect_info2 = copy.deepcopy(fake_connect_info)
fake_connect_info2['access_url'] = 'fake_console_url'
@@ -5791,7 +5861,7 @@ class ComputeAPITestCase(BaseTestCase):
'version': '2.24'}
rpc_msg2 = {'method': 'authorize_console',
'args': fake_connect_info,
- 'version': '1.0'}
+ 'version': '1.2'}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index a78a13883..6c40a95e2 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -171,6 +171,12 @@ class ComputeRpcAPITestCase(test.TestCase):
instance=self.fake_instance, console_type='type',
version='2.24')
+ def test_validate_console_port(self):
+ self._test_compute_api('validate_console_port', 'call',
+ instance=self.fake_instance, port="5900",
+ console_type="novnc",
+ version="2.26")
+
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
host_param='param', mode='mode', host='host')
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index ed733599b..32e685623 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -541,7 +541,7 @@ class _BaseTestCase(object):
self.mox.ReplayAll()
self.conductor.quota_commit(self.context, 'reservations')
- def test_quota_commit(self):
+ def test_quota_rollback(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.rollback(self.context, 'reservations')
self.mox.ReplayAll()
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/consoleauth/test_consoleauth.py
index 15397a400..54e3d2261 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/consoleauth/test_consoleauth.py
@@ -42,12 +42,74 @@ class ConsoleauthTestCase(test.TestCase):
self.useFixture(test.TimeOverride())
token = 'mytok'
self.flags(console_token_ttl=1)
+
+ def fake_validate_console_port(*args, **kwargs):
+ return True
+ self.stubs.Set(self.manager.compute_rpcapi,
+ "validate_console_port",
+ fake_validate_console_port)
+
self.manager.authorize_console(self.context, token, 'novnc',
- '127.0.0.1', 'host', '')
+ '127.0.0.1', '8080', 'host',
+ 'instance')
self.assertTrue(self.manager.check_token(self.context, token))
timeutils.advance_time_seconds(1)
self.assertFalse(self.manager.check_token(self.context, token))
+ def test_multiple_tokens_for_instance(self):
+ tokens = ["token" + str(i) for i in xrange(10)]
+ instance = "12345"
+
+ def fake_validate_console_port(*args, **kwargs):
+ return True
+
+ self.stubs.Set(self.manager.compute_rpcapi,
+ "validate_console_port",
+ fake_validate_console_port)
+ for token in tokens:
+ self.manager.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ instance)
+
+ for token in tokens:
+ self.assertTrue(self.manager.check_token(self.context, token))
+
+ def test_delete_tokens_for_instance(self):
+ instance = "12345"
+ tokens = ["token" + str(i) for i in xrange(10)]
+ for token in tokens:
+ self.manager.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ instance)
+ self.manager.delete_tokens_for_instance(self.context, instance)
+ stored_tokens = self.manager._get_tokens_for_instance(instance)
+
+ self.assertEqual(len(stored_tokens), 0)
+
+ for token in tokens:
+ self.assertFalse(self.manager.check_token(self.context, token))
+
+ def test_wrong_token_has_port(self):
+ token = 'mytok'
+
+ def fake_validate_console_port(*args, **kwargs):
+ return False
+
+ self.stubs.Set(self.manager.compute_rpcapi,
+ "validate_console_port",
+ fake_validate_console_port)
+
+ self.manager.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ instance_uuid='instance')
+ self.assertFalse(self.manager.check_token(self.context, token))
+
+ def test_console_no_instance_uuid(self):
+ self.manager.authorize_console(self.context, "token", 'novnc',
+ '127.0.0.1', '8080', 'host',
+ instance_uuid=None)
+ self.assertFalse(self.manager.check_token(self.context, "token"))
+
def test_get_backdoor_port(self):
self.manager.backdoor_port = 59697
port = self.manager.get_backdoor_port(self.context)
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/consoleauth/test_rpcapi.py
index 15af5fdcf..53ca2e5d6 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/consoleauth/test_rpcapi.py
@@ -65,11 +65,17 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
def test_authorize_console(self):
self._test_consoleauth_api('authorize_console', token='token',
console_type='ctype', host='h', port='p',
- internal_access_path='iap')
+ internal_access_path='iap', instance_uuid="instance",
+ version="1.2")
def test_check_token(self):
self._test_consoleauth_api('check_token', token='t')
+ def test_delete_tokens_for_instnace(self):
+ self._test_consoleauth_api('delete_tokens_for_instance',
+ instance_uuid="instance",
+ version='1.2')
+
def test_get_backdoor_port(self):
self._test_consoleauth_api('get_backdoor_port', host='fake_host',
version='1.1')
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 80b3a9c85..7f9fffddc 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -144,6 +144,7 @@ policy_data = """
"compute_extension:hide_server_addresses": "",
"compute_extension:hosts": "",
"compute_extension:hypervisors": "",
+ "compute_extension:image_size": "",
"compute_extension:instance_actions": "",
"compute_extension:instance_actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "",
diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py
index ff006db68..2ffc50227 100644
--- a/nova/tests/fakeguestfs.py
+++ b/nova/tests/fakeguestfs.py
@@ -24,6 +24,7 @@ class GuestFS(object):
self.mounts = []
self.files = {}
self.auginit = False
+ self.attach_method = 'libvirt'
def launch(self):
self.running = True
@@ -39,6 +40,12 @@ class GuestFS(object):
def add_drive_opts(self, file, *args, **kwargs):
self.drives.append((file, kwargs['format']))
+ def get_attach_method(self):
+ return self.attach_method
+
+ def set_attach_method(self, attach_method):
+ self.attach_method = attach_method
+
def inspect_os(self):
return ["/dev/guestvgf/lv_root"]
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index 69a4e677e..bb5496e85 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -16,6 +16,7 @@
from lxml import etree
+import time
import uuid
# Allow passing None to the various connect methods
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index 51cf30a3e..06f2f5147 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -52,6 +52,7 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
+ 'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
@@ -66,6 +67,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
+ 'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
@@ -79,6 +81,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': None,
'disk_format': None,
+ 'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
@@ -92,6 +95,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
+ 'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
@@ -105,6 +109,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
+ 'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
@@ -119,6 +124,7 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
+ 'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
@@ -134,6 +140,7 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
+ 'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
diff --git a/nova/tests/image/test_fake.py b/nova/tests/image/test_fake.py
index 614201b67..c63bb5389 100644
--- a/nova/tests/image/test_fake.py
+++ b/nova/tests/image/test_fake.py
@@ -41,7 +41,8 @@ class FakeImageServiceTestCase(test.TestCase):
self.assertEquals(keys, set(['id', 'name', 'created_at',
'updated_at', 'deleted_at', 'deleted',
'status', 'is_public', 'properties',
- 'disk_format', 'container_format']))
+ 'disk_format', 'container_format',
+ 'size']))
self.assertTrue(isinstance(image['created_at'], datetime.datetime))
self.assertTrue(isinstance(image['updated_at'], datetime.datetime))
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
new file mode 100644
index 000000000..f5f470cbc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
new file mode 100644
index 000000000..e36ddc76c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="%(image_id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(image_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(image_id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(image_id)s" type="application/vnd.openstack.image" rel="alternate"/>
+</image>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
new file mode 100644
index 000000000..a29172bf4
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
@@ -0,0 +1,219 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
new file mode 100644
index 000000000..4c1e4c4be
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 831c93436..1faf7f480 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -33,6 +33,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "OS-EXT-IMG-SIZE",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ImageSize",
+ "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "OS-EXT-SRV-ATTR",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index c76b55bbb..0aefc123f 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -12,6 +12,9 @@
<extension alias="OS-EXT-IPS" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
<description>%(text)s</description>
</extension>
+ <extension alias="OS-EXT-IMG-SIZE" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/image_size/api/v1.1" name="ImageSize">
+ <description>%(text)s</description>
+ </extension>
<extension alias="OS-EXT-SRV-ATTR" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 7b964fa76..a7f7b6d91 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -326,6 +326,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
+ 'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False):
@@ -3206,6 +3207,30 @@ class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
ctype = 'xml'
+class ImageSizeSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".image_size.Image_size")
+
+ def test_show(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ return self._verify_response('image-get-resp', subs, response)
+
+ def test_detail(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ return self._verify_response('images-details-get-resp', subs, response)
+
+
+class ImageSizeSampleXmlTests(ImageSizeSampleJsonTests):
+ ctype = 'xml'
+
+
class ConfigDriveSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.config_drive."
"Config_drive")
@@ -3406,7 +3431,7 @@ class AttachInterfacesSampleJsonTest(ServersSampleBase):
def fake_list_ports(self, *args, **kwargs):
uuid = kwargs.get('device_id', None)
if not uuid:
- raise InstanceNotFound(instance_id=None)
+ raise exception.InstanceNotFound(instance_id=None)
port_data = {
"id": "ce531f90-199f-48c0-816c-13e38010b442",
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
@@ -3426,7 +3451,7 @@ class AttachInterfacesSampleJsonTest(ServersSampleBase):
def fake_show_port(self, context, port_id=None):
if not port_id:
- raise PortNotFound(port_id=None)
+ raise exception.PortNotFound(port_id=None)
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index cba08d9ce..4eac0c88c 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -26,6 +26,7 @@ from nova import db
from nova.network import driver
from nova.network import linux_net
from nova.openstack.common import fileutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
@@ -496,7 +497,7 @@ class LinuxNetworkTestCase(test.TestCase):
dev = 'br100'
linux_net.restart_dhcp(self.context, dev, network_ref)
expected = ['env',
- 'CONFIG_FILE=%s' % CONF.dhcpbridge_flagfile,
+ 'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=fake',
'dnsmasq',
'--strict-order',
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index ba997ac9d..92b8e1d91 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -29,7 +29,7 @@ from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -2157,7 +2157,7 @@ class FloatingIPTestCase(test.TestCase):
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
- raise db_session.DBError("If you're smart, you'll retry!")
+ raise db_exc.DBError("If you're smart, you'll retry!")
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index b535363a8..e9e19ef45 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -1141,7 +1141,7 @@ class TestQuantumv2(test.TestCase):
class TestQuantumv2ModuleMethods(test.TestCase):
- def test_ensure_requested_network_ordering_no_preference(self):
+ def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
quantumapi._ensure_requested_network_ordering(
@@ -1149,7 +1149,7 @@ class TestQuantumv2ModuleMethods(test.TestCase):
l,
None)
- def test_ensure_requested_network_ordering_no_preference(self):
+ def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
quantumapi._ensure_requested_network_ordering(
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index a161fe5e0..55d502915 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -181,7 +181,7 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('allocate_for_instance', rpc_method='call',
instance_id='fake_id', project_id='fake_id', host='fake_host',
rxtx_factor='fake_factor', vpn=False, requested_networks={},
- macs=set(), version='1.9')
+ macs=[], version='1.9')
def test_deallocate_for_instance(self):
self._test_network_api('deallocate_for_instance', rpc_method='call',
diff --git a/nova/tests/servicegroup/test_zk_driver.py b/nova/tests/servicegroup/test_zk_driver.py
index 7330da66a..a33b4b0cd 100644
--- a/nova/tests/servicegroup/test_zk_driver.py
+++ b/nova/tests/servicegroup/test_zk_driver.py
@@ -38,7 +38,7 @@ class ZKServiceGroupTestCase(test.TestCase):
servicegroup.API._driver = None
try:
from nova.servicegroup.drivers import zk
- _unused = zk
+ _unused = zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
self.flags(servicegroup_driver='zk')
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 0c5c6d02c..b225a2116 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -163,6 +163,8 @@ class ImageCacheManagerTestCase(test.TestCase):
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
'host': 'remotehost',
'name': 'inst-3',
'uuid': '789',
@@ -174,18 +176,24 @@ class ImageCacheManagerTestCase(test.TestCase):
# The argument here should be a context, but it's mocked out
image_cache_manager._list_running_instances(None, all_instances)
- self.assertEqual(len(image_cache_manager.used_images), 2)
+ self.assertEqual(len(image_cache_manager.used_images), 4)
self.assertTrue(image_cache_manager.used_images['1'] ==
(1, 0, ['inst-1']))
self.assertTrue(image_cache_manager.used_images['2'] ==
(1, 1, ['inst-2', 'inst-3']))
+ self.assertTrue(image_cache_manager.used_images['21'] ==
+ (0, 1, ['inst-3']))
+ self.assertTrue(image_cache_manager.used_images['22'] ==
+ (0, 1, ['inst-3']))
self.assertTrue('inst-1' in image_cache_manager.instance_names)
self.assertTrue('123' in image_cache_manager.instance_names)
- self.assertEqual(len(image_cache_manager.image_popularity), 2)
+ self.assertEqual(len(image_cache_manager.image_popularity), 4)
self.assertEqual(image_cache_manager.image_popularity['1'], 1)
self.assertEqual(image_cache_manager.image_popularity['2'], 2)
+ self.assertEqual(image_cache_manager.image_popularity['21'], 1)
+ self.assertEqual(image_cache_manager.image_popularity['22'], 1)
def test_list_resizing_instances(self):
all_instances = [{'image_ref': '1',
@@ -703,6 +711,8 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_verify_base_images(self):
hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
+ hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
+ hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
self.flags(instances_path='/instance_path')
@@ -715,6 +725,8 @@ class ImageCacheManagerTestCase(test.TestCase):
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
hashed_42,
hashed_1,
+ hashed_21,
+ hashed_22,
'%s_5368709120' % hashed_1,
'%s_10737418240' % hashed_1,
'00000004']
@@ -744,8 +756,10 @@ class ImageCacheManagerTestCase(test.TestCase):
if path == fq_path(p) + '.info':
return False
- if path in ['/instance_path/_base/%s_sm' % hashed_1,
- '/instance_path/_base/%s_sm' % hashed_42]:
+ if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
+ hashed_21,
+ hashed_22,
+ hashed_42]]:
return False
self.fail('Unexpected path existence check: %s' % path)
@@ -800,6 +814,8 @@ class ImageCacheManagerTestCase(test.TestCase):
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
@@ -850,7 +866,8 @@ class ImageCacheManagerTestCase(test.TestCase):
image_cache_manager.verify_base_images(None, all_instances)
# Verify
- active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1)]
+ active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
+ fq_path(hashed_21), fq_path(hashed_22)]
self.assertEquals(image_cache_manager.active_base_files, active)
for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 981cb20bd..fcd66dae8 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -270,6 +270,17 @@ class FakeVolumeDriver(object):
return ""
+class FakeConfigGuestDisk(object):
+ def __init__(self, *args, **kwargs):
+ self.source_type = None
+ self.driver_cache = None
+
+
+class FakeConfigGuest(object):
+ def __init__(self, *args, **kwargs):
+ self.driver_cache = None
+
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
@@ -1678,7 +1689,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_type='lxc')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertEquals(conn.uri, 'lxc:///')
+ self.assertEquals(conn.uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
@@ -1912,7 +1923,9 @@ class LibvirtConnTestCase(test.TestCase):
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
- check = (lambda t: t.findall(xpath)[2].text,
+ # NOTE(sirp): empty strings don't roundtrip in lxml (they are
+ # converted to None), so we need an `or ''` to correct for that
+ check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
@@ -1966,7 +1979,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertEquals(conn.uri, expected_uri)
+ self.assertEquals(conn.uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
@@ -2006,7 +2019,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertEquals(conn.uri, testuri)
+ self.assertEquals(conn.uri(), testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
@@ -2569,6 +2582,129 @@ class LibvirtConnTestCase(test.TestCase):
None)
self.assertTrue(self.create_image_called)
+ def test_create_image_plain(self):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name):
+ self.path = os.path.join(instance['name'], name)
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = db.instance_create(self.context, instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'to_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ None,
+ image_meta)
+ xml = conn.to_xml(instance, None,
+ disk_info, image_meta)
+ conn._create_image(context, instance, xml,
+ disk_info['mapping'])
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * 1024 * 1024 * 1024},
+ {'filename': 'ephemeral_20_default',
+ 'size': 20 * 1024 * 1024 * 1024},
+ ]
+ self.assertEquals(gotFiles, wantFiles)
+
+ def test_create_image_with_swap(self):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name):
+ self.path = os.path.join(instance['name'], name)
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = db.instance_create(self.context, instance_ref)
+
+ # Turn on some swap to exercise that codepath in _create_image
+ instance['instance_type']['swap'] = 500
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'to_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ None,
+ image_meta)
+ xml = conn.to_xml(instance, None,
+ disk_info, image_meta)
+ conn._create_image(context, instance, xml,
+ disk_info['mapping'])
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * 1024 * 1024 * 1024},
+ {'filename': 'ephemeral_20_default',
+ 'size': 20 * 1024 * 1024 * 1024},
+ {'filename': 'swap_500',
+ 'size': 500 * 1024 * 1024},
+ ]
+ self.assertEquals(gotFiles, wantFiles)
+
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
@@ -2772,6 +2908,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
mock.destroy()
mock.undefineFlags(1).AndReturn(1)
@@ -2781,7 +2918,7 @@ class LibvirtConnTestCase(test.TestCase):
return mock
def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN}
+ return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
@@ -2792,6 +2929,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
mock.undefine()
@@ -2802,7 +2940,7 @@ class LibvirtConnTestCase(test.TestCase):
return mock
def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN}
+ return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
@@ -2813,6 +2951,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_destroy_undefines_no_attribute_with_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
@@ -2825,7 +2964,7 @@ class LibvirtConnTestCase(test.TestCase):
return mock
def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN}
+ return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
@@ -2836,6 +2975,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_destroy_undefines_no_attribute_no_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
@@ -2847,7 +2987,7 @@ class LibvirtConnTestCase(test.TestCase):
return mock
def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN}
+ return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
@@ -2858,6 +2998,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
mock.destroy()
self.mox.ReplayAll()
@@ -2875,21 +3016,29 @@ class LibvirtConnTestCase(test.TestCase):
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
- def test_available_least_handles_missing(self):
+ def test_disk_over_committed_size_total(self):
# Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
- return ['fake']
+ return ['fake1', 'fake2']
self.stubs.Set(conn, 'list_instances', list_instances)
+ fake_disks = {'fake1': [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size':'83886080'}],
+ 'fake2': [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size':'10737418240'}]}
+
def get_info(instance_name):
- raise exception.InstanceNotFound(instance_id='fake')
+ return jsonutils.dumps(fake_disks.get(instance_name))
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
- result = conn.get_disk_available_least()
- space = fake_libvirt_utils.get_fs_info(CONF.instances_path)['free']
- self.assertEqual(result, space / 1024 ** 3)
+ result = conn.get_disk_over_committed_size_total()
+ self.assertEqual(result, 10653532160)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -3433,6 +3582,33 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(got_events[0].transition,
virtevent.EVENT_LIFECYCLE_STOPPED)
+ def test_set_cache_mode(self):
+ self.flags(disk_cachemodes=['file=directsync'])
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn.set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'directsync')
+
+ def test_set_cache_mode_invalid_mode(self):
+ self.flags(disk_cachemodes=['file=FAKE'])
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn.set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, None)
+
+ def test_set_cache_mode_invalid_object(self):
+ self.flags(disk_cachemodes=['file=directsync'])
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuest()
+
+ fake_conf.driver_cache = 'fake'
+ conn.set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'fake')
+
class HostStateTestCase(test.TestCase):
@@ -3455,11 +3631,8 @@ class HostStateTestCase(test.TestCase):
def get_cpu_info(self):
return HostStateTestCase.cpu_info
- def get_local_gb_total(self):
- return 100
-
- def get_local_gb_used(self):
- return 20
+ def get_local_gb_info(self):
+ return {'total': 100, 'used': 20, 'free': 80}
def get_memory_mb_total(self):
return 497
@@ -3775,8 +3948,10 @@ class IptablesFirewallTestCase(test.TestCase):
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
- self.assertEquals(ipv4_network_rules,
- ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
+ # Extra rule is for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 1
+ self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index cb7943ea8..026e55e9c 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -371,6 +371,8 @@ class LibvirtVifTestCase(test.TestCase):
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), 1)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, br_want)
@@ -409,7 +411,7 @@ class LibvirtVifTestCase(test.TestCase):
self.mapping_bridge_quantum,
br_want)
- def _check_ovs_ethernet_driver(self, d, net, mapping):
+ def _check_ovs_ethernet_driver(self, d, net, mapping, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
@@ -417,24 +419,26 @@ class LibvirtVifTestCase(test.TestCase):
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), 0)
self.assertEqual(node.get("type"), "ethernet")
dev_name = node.find("target").get("dev")
- self.assertTrue(dev_name.startswith("tap"))
+ self.assertTrue(dev_name.startswith(dev_prefix))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_ovs['mac'])
script = node.find("script").get("path")
self.assertEquals(script, "")
- def test_ovs_ethernet_driver(self):
+ def test_ovs_ethernet_driver_legacy(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9010)
d = vif.LibvirtOpenVswitchDriver(get_connection)
- d = vif.LibvirtOpenVswitchDriver()
self._check_ovs_ethernet_driver(d,
self.net_ovs,
- self.mapping_ovs_legacy)
+ self.mapping_ovs_legacy,
+ "nic")
def test_ovs_ethernet_driver(self):
def get_connection():
@@ -444,7 +448,8 @@ class LibvirtVifTestCase(test.TestCase):
d = vif.LibvirtGenericVIFDriver(get_connection)
self._check_ovs_ethernet_driver(d,
self.net_ovs,
- self.mapping_ovs)
+ self.mapping_ovs,
+ "tap")
def _check_ovs_virtualport_driver(self, d, net, mapping, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
@@ -454,6 +459,8 @@ class LibvirtVifTestCase(test.TestCase):
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), 0)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
@@ -503,6 +510,8 @@ class LibvirtVifTestCase(test.TestCase):
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), 1)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, br_want)
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index f800d2eb7..ef25ca726 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -63,41 +63,25 @@ import nova.virt.baremetal.db.sqlalchemy.migrate_repo
LOG = logging.getLogger(__name__)
-def _get_connect_string(backend,
- user=None,
- passwd=None,
- database=None):
+def _get_connect_string(backend, user, passwd, database):
"""
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
- if not user:
- user = "openstack_citest"
- if not passwd:
- passwd = "openstack_citest"
- if not database:
- database = "openstack_citest"
-
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
+ else:
+ raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% locals())
-def _is_backend_avail(backend,
- user="openstack_citest",
- passwd="openstack_citest",
- database="openstack_citest"):
+def _is_backend_avail(backend, user, passwd, database):
try:
- if backend == "mysql":
- connect_uri = _get_connect_string("mysql",
- user=user, passwd=passwd, database=database)
- elif backend == "postgres":
- connect_uri = _get_connect_string("postgres",
- user=user, passwd=passwd, database=database)
+ connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
@@ -110,17 +94,17 @@ def _is_backend_avail(backend,
return True
-def _have_mysql():
+def _have_mysql(user, passwd, database):
present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
if present is None:
- return _is_backend_avail('mysql')
+ return _is_backend_avail('mysql', user, passwd, database)
return present.lower() in ('', 'true')
-def _have_postgresql():
+def _have_postgresql(user, passwd, database):
present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT')
if present is None:
- return _is_backend_avail('postgres')
+ return _is_backend_avail('postgres', user, passwd, database)
return present.lower() in ('', 'true')
@@ -162,8 +146,49 @@ def get_pgsql_connection_info(conn_pieces):
return (user, password, database, host)
+class CommonTestsMixIn(object):
+ """These tests are shared between TestNovaMigrations and
+ TestBaremetalMigrations.
+
+ BaseMigrationTestCase is effectively an abstract class, meant to be derived
+ from and not directly tested against; that's why these `test_` methods need
+ to be on a Mixin, so that they won't be picked up as valid tests for
+ BaseMigrationTestCase.
+ """
+ def test_walk_versions(self):
+ for key, engine in self.engines.items():
+ self._walk_versions(engine, self.snake_walk)
+
+ def test_mysql_opportunistically(self):
+ self._test_mysql_opportunistically()
+
+ def test_mysql_connect_fail(self):
+ """
+ Test that we can trigger a mysql connection failure and we fail
+ gracefully to ensure we don't break people without mysql
+ """
+ if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD,
+ self.DATABASE):
+ self.fail("Shouldn't have connected")
+
+ def test_postgresql_opportunistically(self):
+ self._test_postgresql_opportunistically()
+
+ def test_postgresql_connect_fail(self):
+ """
+ Test that we can trigger a postgres connection failure and we fail
+ gracefully to ensure we don't break people without postgres
+ """
+ if _is_backend_avail('postgres', "openstack_cifail", self.PASSWD,
+ self.DATABASE):
+ self.fail("Shouldn't have connected")
+
+
class BaseMigrationTestCase(test.TestCase):
"""Base class fort testing migrations and migration utils."""
+ USER = None
+ PASSWD = None
+ DATABASE = None
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
@@ -223,7 +248,8 @@ class BaseMigrationTestCase(test.TestCase):
def execute_cmd(cmd=None):
status, output = commands.getstatusoutput(cmd)
LOG.debug(output)
- self.assertEqual(0, status)
+ self.assertEqual(0, status,
+ "Failed to run: %s\n%s" % (cmd, output))
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
@@ -270,21 +296,14 @@ class BaseMigrationTestCase(test.TestCase):
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
- def test_mysql_connect_fail(self):
- """
- Test that we can trigger a mysql connection failure and we fail
- gracefully to ensure we don't break people without mysql
- """
- if _is_backend_avail('mysql', user="openstack_cifail"):
- self.fail("Shouldn't have connected")
-
- def _test_mysql_opportunistically(self, database=None):
+ def _test_mysql_opportunistically(self):
# Test that table creation on mysql only builds InnoDB tables
- if not _have_mysql():
+ if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
- connect_string = _get_connect_string("mysql", database=database)
+ connect_string = _get_connect_string("mysql", self.USER, self.PASSWD,
+ self.DATABASE)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
engine = sqlalchemy.create_engine(connect_string)
@@ -313,21 +332,14 @@ class BaseMigrationTestCase(test.TestCase):
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
connection.close()
- def test_postgresql_connect_fail(self):
- """
- Test that we can trigger a postgres connection failure and we fail
- gracefully to ensure we don't break people without postgres
- """
- if _is_backend_avail('postgresql', user="openstack_cifail"):
- self.fail("Shouldn't have connected")
-
- def _test_postgresql_opportunistically(self, database=None):
+ def _test_postgresql_opportunistically(self):
# Test postgresql database migration walk
- if not _have_postgresql():
+ if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE):
self.skipTest("postgresql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
- connect_string = _get_connect_string("postgres", database=database)
+ connect_string = _get_connect_string("postgres", self.USER,
+ self.PASSWD, self.DATABASE)
engine = sqlalchemy.create_engine(connect_string)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
@@ -363,7 +375,7 @@ class BaseMigrationTestCase(test.TestCase):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
- self._migrate_down(engine, version)
+ self._migrate_down(engine, version - 1)
self._migrate_up(engine, version)
if downgrade:
@@ -373,10 +385,10 @@ class BaseMigrationTestCase(test.TestCase):
xrange(self.INIT_VERSION + 2,
self.REPOSITORY.latest + 1)):
# downgrade -> upgrade -> downgrade
- self._migrate_down(engine, version)
+ self._migrate_down(engine, version - 1)
if snake_walk:
self._migrate_up(engine, version)
- self._migrate_down(engine, version)
+ self._migrate_down(engine, version - 1)
def _migrate_down(self, engine, version):
self.migration_api.downgrade(engine,
@@ -386,11 +398,19 @@ class BaseMigrationTestCase(test.TestCase):
self.migration_api.db_version(engine,
self.REPOSITORY))
+ # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
+ # version). So if we have any downgrade checks, they need to be run for
+ # the previous (higher numbered) migration.
+ post_downgrade = getattr(
+ self, "_post_downgrade_%d" % (version + 1), None)
+ if post_downgrade:
+ post_downgrade(engine)
+
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
- migration version with special _prerun_### and
+ migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
@@ -398,9 +418,9 @@ class BaseMigrationTestCase(test.TestCase):
try:
if with_data:
data = None
- prerun = getattr(self, "_prerun_%d" % version, None)
- if prerun:
- data = prerun(engine)
+ pre_upgrade = getattr(self, "_pre_upgrade_%d" % version, None)
+ if pre_upgrade:
+ data = pre_upgrade(engine)
self.migration_api.upgrade(engine,
self.REPOSITORY,
@@ -420,8 +440,11 @@ class BaseMigrationTestCase(test.TestCase):
raise
-class TestNovaMigrations(BaseMigrationTestCase):
+class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
+ USER = "openstack_citest"
+ PASSWD = "openstack_citest"
+ DATABASE = "openstack_citest"
def __init__(self, *args, **kwargs):
super(TestNovaMigrations, self).__init__(*args, **kwargs)
@@ -448,22 +471,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
globals(), locals(), ['versioning_api'], -1)
self.migration_api = temp.versioning_api
- def tearDown(self):
- super(TestNovaMigrations, self).tearDown()
-
- def test_walk_versions(self):
- for key, engine in self.engines.items():
- self._walk_versions(engine, self.snake_walk)
-
- def test_mysql_opportunistically(self):
- self._test_mysql_opportunistically(
- database='openstack_citest')
-
- def test_postgresql_opportunistically(self):
- self._test_postgresql_opportunistically(
- database='openstack_citest')
-
- def _prerun_134(self, engine):
+ def _pre_upgrade_134(self, engine):
now = timeutils.utcnow()
data = [{
'id': 1,
@@ -517,7 +525,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertEqual(data[0]['mac'], bw['mac'])
# migration 141, update migrations instance uuid
- def _prerun_141(self, engine):
+ def _pre_upgrade_141(self, engine):
data = {
'instance_uuid': str(uuid.uuid4())
}
@@ -535,7 +543,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertEqual(data['instance_uuid'], row['instance_uuid'])
# migration 146, availability zone transition
- def _prerun_146(self, engine):
+ def _pre_upgrade_146(self, engine):
data = {
'availability_zone': 'custom_az',
'name': 'name',
@@ -554,8 +562,15 @@ class TestNovaMigrations(BaseMigrationTestCase):
aggregate_md.c.aggregate_id == data['id']).execute().first()
self.assertEqual(data['availability_zone'], md['value'])
+ def _post_downgrade_146(self, engine):
+ # Downgrade should delete availability_zone aggregate_metadata entries
+ aggregate_md = get_table(engine, 'aggregate_metadata')
+ num_azs = aggregate_md.count().where(
+ aggregate_md.c.key == 'availability_zone').execute().scalar()
+ self.assertEqual(0, num_azs)
+
# migration 147, availability zone transition for services
- def _prerun_147(self, engine):
+ def _pre_upgrade_147(self, engine):
az = 'test_zone'
host1 = 'compute-host1'
host2 = 'compute-host2'
@@ -599,7 +614,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertEqual(host, None)
# migration 149, changes IPAddr storage format
- def _prerun_149(self, engine):
+ def _pre_upgrade_149(self, engine):
provider_fw_rules = get_table(engine, 'provider_fw_rules')
console_pools = get_table(engine, 'console_pools')
data = {
@@ -657,7 +672,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertIn(str(netaddr.IPAddress(row['address'])), iplist)
# migration 151 - changes period_beginning and period_ending to DateTime
- def _prerun_151(self, engine):
+ def _pre_upgrade_151(self, engine):
task_log = get_table(engine, 'task_log')
data = {
'task_name': 'The name of the task',
@@ -683,7 +698,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertEqual(data['period_ending'], str(row['period_ending']))
# migration 152 - convert deleted from boolean to int
- def _prerun_152(self, engine):
+ def _pre_upgrade_152(self, engine):
host1 = 'compute-host1'
host2 = 'compute-host2'
# NOTE(sdague): start at #4 because services data already in table
@@ -720,7 +735,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertEqual(volume.id, volume.deleted)
# migration 153, copy flavor information into system_metadata
- def _prerun_153(self, engine):
+ def _pre_upgrade_153(self, engine):
fake_types = [
dict(id=10, name='type1', memory_mb=128, vcpus=1,
root_gb=10, ephemeral_gb=0, flavorid="1", swap=0,
@@ -788,7 +803,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
# migration 154, add shadow tables for deleted data
# There are 53 shadow tables but we only test one
# There are additional tests in test_db_api.py
- def _prerun_154(self, engine):
+ def _pre_upgrade_154(self, engine):
meta = sqlalchemy.schema.MetaData()
meta.reflect(engine)
table_names = meta.tables.keys()
@@ -832,7 +847,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
str(shadow_column.type))
# migration 156 - introduce CIDR type
- def _prerun_156(self, engine):
+ def _pre_upgrade_156(self, engine):
# assume the same data as from 149
data = {
'provider_fw_rules':
@@ -867,7 +882,7 @@ class TestNovaMigrations(BaseMigrationTestCase):
# recheck the 149 data
self._check_149(engine, data)
- def _prerun_158(self, engine):
+ def _pre_upgrade_158(self, engine):
networks = get_table(engine, 'networks')
data = [
{'vlan': 1, 'deleted': 0},
@@ -888,8 +903,11 @@ class TestNovaMigrations(BaseMigrationTestCase):
self.assertEqual(len(rows), 1)
-class TestBaremetalMigrations(BaseMigrationTestCase):
+class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
+ USER = "openstack_citest"
+ PASSWD = "openstack_citest"
+ DATABASE = "openstack_baremetal_citest"
def __init__(self, *args, **kwargs):
super(TestBaremetalMigrations, self).__init__(*args, **kwargs)
@@ -918,22 +936,7 @@ class TestBaremetalMigrations(BaseMigrationTestCase):
globals(), locals(), ['versioning_api'], -1)
self.migration_api = temp.versioning_api
- def tearDown(self):
- super(TestBaremetalMigrations, self).tearDown()
-
- def test_walk_versions(self):
- for key, engine in self.engines.items():
- self._walk_versions(engine, self.snake_walk)
-
- def test_mysql_opportunistically(self):
- self._test_mysql_opportunistically(
- database='openstack_baremetal_citest')
-
- def test_postgresql_opportunistically(self):
- self._test_postgresql_opportunistically(
- database='openstack_baremetal_citest')
-
- def _prerun_002(self, engine):
+ def _pre_upgrade_002(self, engine):
data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null',
'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}]
table = get_table(engine, 'bm_deployments')
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index ad0e4539c..640497fb0 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -151,11 +151,11 @@ class PowerVMDriverTestCase(test.TestCase):
self.assertTrue(self.powervm_connection.instance_exists(name))
def test_spawn(self):
- def fake_image_fetch_to_raw(context, image_id, file_path,
+ def fake_image_fetch(context, image_id, file_path,
user_id, project_id):
pass
self.flags(powervm_img_local_path='/images/')
- self.stubs.Set(images, 'fetch_to_raw', fake_image_fetch_to_raw)
+ self.stubs.Set(images, 'fetch', fake_image_fetch)
image_meta = {}
image_meta['id'] = '666'
fake_net_info = network_model.NetworkInfo([
@@ -173,7 +173,7 @@ class PowerVMDriverTestCase(test.TestCase):
raise ex
self.flags(powervm_img_local_path='/images/')
- self.stubs.Set(images, 'fetch_to_raw', lambda *x, **y: None)
+ self.stubs.Set(images, 'fetch', lambda *x, **y: None)
self.stubs.Set(
self.powervm_connection._powervm._disk_adapter,
'create_volume_from_image',
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 940ddf6ec..2c2c58db9 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -23,6 +23,7 @@ import os
import os.path
import StringIO
import tempfile
+from xml.dom import minidom
import mox
import netaddr
@@ -1059,3 +1060,47 @@ class StringLengthTestCase(test.TestCase):
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
+
+
+class SafeParserTestCase(test.TestCase):
+ def test_external_dtd(self):
+ xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+ <html>
+ <head/>
+ <body>html with dtd</body>
+ </html>""")
+
+ parser = utils.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_external_file(self):
+ xml_string = """<!DOCTYPE external [
+ <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
+ ]>
+ <root>&ee;</root>"""
+
+ parser = utils.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_notation(self):
+ xml_string = """<?xml version="1.0" standalone="no"?>
+ <!-- comment data -->
+ <!DOCTYPE x [
+ <!NOTATION notation SYSTEM "notation.jpeg">
+ ]>
+ <root attr1="value1">
+ </root>"""
+
+ parser = utils.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 84c3caa63..10dc70741 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -2068,8 +2068,10 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
- self.assertEquals(ipv4_network_rules,
- ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
+ # Extra rule is for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 1
+ self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
diff --git a/nova/tests/virt/disk/test_nbd.py b/nova/tests/virt/disk/test_nbd.py
index 750506882..858b13603 100644
--- a/nova/tests/virt/disk/test_nbd.py
+++ b/nova/tests/virt/disk/test_nbd.py
@@ -42,14 +42,6 @@ def _fake_listdir_nbd_devices(path):
return ORIG_LISTDIR(path)
-def _fake_exists_no_users(path):
- if path.startswith('/sys/block/nbd'):
- if path.endswith('pid'):
- return False
- return True
- return ORIG_EXISTS(path)
-
-
def _fake_exists_all_used(path):
if path.startswith('/sys/block/nbd'):
return True
diff --git a/nova/utils.py b/nova/utils.py
index 2c7d0b427..764fa9070 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -672,19 +672,33 @@ class ProtectedExpatParser(expatreader.ExpatParser):
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
- raise ValueError("<!ENTITY> forbidden")
+ raise ValueError("<!ENTITY> entity declaration forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
- raise ValueError("<!ENTITY> forbidden")
+ raise ValueError("<!ENTITY> unparsed entity forbidden")
+
+ def external_entity_ref(self, context, base, systemId, publicId):
+ raise ValueError("<!ENTITY> external entity forbidden")
+
+ def notation_decl(self, name, base, sysid, pubid):
+ raise ValueError("<!ENTITY> notation forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
+ self._parser.EndDoctypeDeclHandler = None
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
+ self._parser.ExternalEntityRefHandler = self.external_entity_ref
+ self._parser.NotationDeclHandler = self.notation_decl
+ try:
+ self._parser.SkippedEntityHandler = None
+ except AttributeError:
+ # some pyexpat versions do not support SkippedEntity
+ pass
def safe_minidom_parse_string(xml_string):
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index e06bcd7d2..d117ad46d 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -25,8 +25,10 @@ import uuid
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
+import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
+from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@@ -61,7 +63,7 @@ def model_query(context, *args, **kwargs):
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
- if project_only and sqlalchemy_api.is_user_context(context):
+ if project_only and nova.context.is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
@@ -221,15 +223,20 @@ def bm_node_associate_and_update(context, node_uuid, values):
def bm_node_destroy(context, bm_node_id):
# First, delete all interfaces belonging to the node.
# Delete physically since these have unique columns.
- model_query(context, models.BareMetalInterface, read_deleted="no").\
+ session = db_session.get_session()
+ with session.begin():
+ model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
delete()
- model_query(context, models.BareMetalNode).\
+ rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
+ if not rows:
+ raise exception.NodeNotFound(node_id=bm_node_id)
+
@sqlalchemy_api.require_admin_context
def bm_pxe_ip_get_all(context):
@@ -393,7 +400,7 @@ def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
try:
session.add(bm_interface)
session.flush()
- except db_session.DBError, e:
+ except db_exc.DBError, e:
# TODO(deva): clean up when db layer raises DuplicateKeyError
if str(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py
index 2a560e24e..a9fc649e2 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py
@@ -47,13 +47,8 @@ def downgrade(migrate_engine):
bm_nodes = Table('bm_nodes', meta, autoload=True)
- image_path = Column('image_path', String(length=255))
- pxe_config_path = Column('pxe_config_path', String(length=255))
- deploy_key = Column('deploy_key', String(length=255))
- root_mb = Column('root_mb', Integer())
- swap_mb = Column('swap_mb', Integer())
-
- for c in [image_path, pxe_config_path, deploy_key, root_mb, swap_mb]:
+ for c in ['image_path', 'pxe_config_path', 'deploy_key', 'root_mb',
+ 'swap_mb']:
bm_nodes.drop_column(c)
bm_deployments = Table('bm_deployments', meta,
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py
index cc9a9316d..778719096 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py
@@ -35,6 +35,5 @@ def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
- uuid_col = Column('uuid', String(length=36))
- t.drop_column(uuid_col)
+ t.drop_column('uuid')
diff --git a/nova/virt/baremetal/db/sqlalchemy/session.py b/nova/virt/baremetal/db/sqlalchemy/session.py
index 585096c94..fc045d5ca 100644
--- a/nova/virt/baremetal/db/sqlalchemy/session.py
+++ b/nova/virt/baremetal/db/sqlalchemy/session.py
@@ -44,8 +44,6 @@ CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
_ENGINE = None
_MAKER = None
-DBError = nova_session.DBError
-
def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session."""
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 813f95c05..1e98126e2 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -27,7 +27,7 @@ from oslo.config import cfg
from nova.compute import instance_types
from nova import exception
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -428,7 +428,7 @@ class PXE(base.NodeDriver):
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
- except db_session.DBError:
+ except db_exc.DBError:
pass
else:
for mac in macs:
diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py
index acea8afdf..11cf9d098 100644
--- a/nova/virt/disk/vfs/guestfs.py
+++ b/nova/virt/disk/vfs/guestfs.py
@@ -19,6 +19,8 @@ import guestfs
from nova import exception
from nova.openstack.common import log as logging
from nova.virt.disk.vfs import api as vfs
+from nova.virt.libvirt import driver as libvirt_driver
+
LOG = logging.getLogger(__name__)
@@ -95,6 +97,9 @@ class VFSGuestFS(vfs.VFS):
try:
self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
+ if self.handle.get_attach_method() == 'libvirt':
+ libvirt_url = 'libvirt:' + libvirt_driver.LibvirtDriver.uri()
+ self.handle.set_attach_method(libvirt_url)
self.handle.launch()
self.setup_os()
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index a36beb7f0..d9502ec46 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -149,6 +149,10 @@ class IptablesFirewallDriver(FirewallDriver):
self.network_infos = {}
self.basically_filtered = False
+ # Flags for DHCP request rule
+ self.dhcp_create = False
+ self.dhcp_created = False
+
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
@@ -191,6 +195,13 @@ class IptablesFirewallDriver(FirewallDriver):
LOG.debug(_('Filters added to instance'), instance=instance)
self.refresh_provider_fw_rules()
LOG.debug(_('Provider Firewall Rules refreshed'), instance=instance)
+ # Ensure that DHCP request rule is updated if necessary
+ if (self.dhcp_create and not self.dhcp_created):
+ self.iptables.ipv4['filter'].add_rule(
+ 'INPUT',
+ '-s 0.0.0.0/32 -d 255.255.255.255/32 '
+ '-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
+ self.dhcp_created = True
self.iptables.apply()
def _create_filter(self, ips, chain_name):
@@ -272,6 +283,7 @@ class IptablesFirewallDriver(FirewallDriver):
if dhcp_server:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
'-j ACCEPT' % (dhcp_server,))
+ self.dhcp_create = True
def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info):
# make sure this is legacy nw_info
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 57e364115..5ba97a8a4 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -44,7 +44,6 @@ import errno
import eventlet
import functools
import glob
-import hashlib
import os
import shutil
import socket
@@ -205,6 +204,10 @@ libvirt_opts = [
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
+ cfg.ListOpt('disk_cachemodes',
+ default=[],
+ help='Specific cachemodes to use for different disk types '
+ 'e.g: ["file=directsync","block=none"]'),
]
CONF = cfg.CONF
@@ -318,6 +321,25 @@ class LibvirtDriver(driver.ComputeDriver):
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+ self.disk_cachemodes = {}
+
+ self.valid_cachemodes = ["default",
+ "none",
+ "writethrough",
+ "writeback",
+ "directsync",
+ "writethrough",
+ "unsafe",
+ ]
+
+ for mode_str in CONF.disk_cachemodes:
+ disk_type, sep, cache_mode = mode_str.partition('=')
+ if cache_mode not in self.valid_cachemodes:
+ LOG.warn(_("Invalid cachemode %(cache_mode)s specified "
+ "for disk type %(disk_type)s.") % locals())
+ continue
+ self.disk_cachemodes[disk_type] = cache_mode
+
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
@@ -339,6 +361,18 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = HostState(self)
return self._host_state
+ def set_cache_mode(self, conf):
+ """Set cache mode on LibvirtConfigGuestDisk object."""
+ try:
+ source_type = conf.source_type
+ driver_cache = conf.driver_cache
+ except AttributeError:
+ return
+
+ cache_mode = self.disk_cachemodes.get(source_type,
+ driver_cache)
+ conf.driver_cache = cache_mode
+
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
@@ -515,14 +549,14 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
- LOG.debug(_('Connecting to libvirt: %s'), self.uri)
+ LOG.debug(_('Connecting to libvirt: %s'), self.uri())
if not CONF.libvirt_nonblocking:
- self._wrapped_conn = self._connect(self.uri,
+ self._wrapped_conn = self._connect(self.uri(),
self.read_only)
else:
self._wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
- self._connect, self.uri, self.read_only)
+ self._connect, self.uri(), self.read_only)
try:
LOG.debug("Registering for lifecycle events %s" % str(self))
@@ -533,7 +567,7 @@ class LibvirtDriver(driver.ComputeDriver):
self)
except Exception, e:
LOG.warn(_("URI %s does not support events"),
- self.uri)
+ self.uri())
return self._wrapped_conn
@@ -551,8 +585,8 @@ class LibvirtDriver(driver.ComputeDriver):
return False
raise
- @property
- def uri(self):
+ @staticmethod
+ def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
@@ -662,8 +696,10 @@ class LibvirtDriver(driver.ComputeDriver):
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
+ old_domid = -1
if virt_dom is not None:
try:
+ old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
@@ -683,14 +719,16 @@ class LibvirtDriver(driver.ComputeDriver):
locals(), instance=instance)
raise
- def _wait_for_destroy():
+ def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
- state = self.get_info(instance)['state']
+ dom_info = self.get_info(instance)
+ state = dom_info['state']
+ new_domid = dom_info['id']
except exception.NotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
@@ -701,8 +739,23 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.FixedIntervalLoopingCall(_wait_for_destroy)
+ # NOTE(wangpan): If the instance was booted again after destroy,
+ # this may be a endless loop, so check the id of
+ # domain here, if it changed and the instance is
+ # still running, we should destroy it again.
+ # see https://bugs.launchpad.net/nova/+bug/1111213 for more details
+ if new_domid != expected_domid:
+ LOG.info(_("Instance may be started again."),
+ instance=instance)
+ kwargs['is_running'] = True
+ raise utils.LoopingCallDone()
+
+ kwargs = {'is_running': False}
+ timer = utils.FixedIntervalLoopingCall(_wait_for_destroy, old_domid)
timer.start(interval=0.5).wait()
+ if kwargs['is_running']:
+ LOG.info(_("Going to destroy instance again."), instance=instance)
+ self._destroy(instance)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
@@ -744,16 +797,39 @@ class LibvirtDriver(driver.ComputeDriver):
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
- try:
- self.firewall_driver.unfilter_instance(instance,
- network_info=network_info)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- LOG.error(_("Error from libvirt during unfilter. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
- reason = "Error unfiltering instance."
- raise exception.InstanceTerminationFailure(reason=reason)
+ retry = True
+ while retry:
+ try:
+ self.firewall_driver.unfilter_instance(instance,
+ network_info=network_info)
+ except libvirt.libvirtError as e:
+ try:
+ state = self.get_info(instance)['state']
+ except exception.NotFound:
+ state = power_state.SHUTDOWN
+
+ if state != power_state.SHUTDOWN:
+ LOG.warn(_("Instance may be still running, destroy "
+ "it again."), instance=instance)
+ self._destroy(instance)
+ else:
+ retry = False
+ errcode = e.get_error_code()
+ LOG.error(_("Error from libvirt during unfilter. "
+ "Code=%(errcode)s Error=%(e)s") %
+ locals(), instance=instance)
+ reason = "Error unfiltering instance."
+ raise exception.InstanceTerminationFailure(reason=reason)
+ except Exception:
+ retry = False
+ raise
+ else:
+ retry = False
+
+ # FIXME(wangpan): if the instance is booted again here, such as the
+ # the soft reboot operation boot it here, it will
+ # become "running deleted", should we check and destroy
+ # it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
@@ -877,6 +953,7 @@ class LibvirtDriver(driver.ComputeDriver):
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
+ self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
@@ -1319,7 +1396,7 @@ class LibvirtDriver(driver.ComputeDriver):
def power_on(self, instance):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
- self._create_domain(domain=dom)
+ self._create_domain(domain=dom, instance=instance)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
@@ -1676,7 +1753,7 @@ class LibvirtDriver(driver.ComputeDriver):
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
- fname = disk_images['kernel_id']
+ fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
@@ -1684,7 +1761,7 @@ class LibvirtDriver(driver.ComputeDriver):
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
- fname = disk_images['ramdisk_id']
+ fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
@@ -1692,7 +1769,7 @@ class LibvirtDriver(driver.ComputeDriver):
user_id=instance['user_id'],
project_id=instance['project_id'])
- root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
+ root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
inst_type = instance['instance_type']
@@ -1738,6 +1815,7 @@ class LibvirtDriver(driver.ComputeDriver):
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
+ mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
@@ -1745,7 +1823,7 @@ class LibvirtDriver(driver.ComputeDriver):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
- swap['dev'], block_device_info)):
+ mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
@@ -1997,6 +2075,9 @@ class LibvirtDriver(driver.ComputeDriver):
'raw')
devices.append(diskconfig)
+ for d in devices:
+ self.set_cache_mode(d)
+
return devices
def get_guest_config_sysinfo(self, instance):
@@ -2259,7 +2340,8 @@ class LibvirtDriver(driver.ComputeDriver):
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
- 'cpu_time': cpu_time}
+ 'cpu_time': cpu_time,
+ 'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0):
@@ -2422,23 +2504,25 @@ class LibvirtDriver(driver.ComputeDriver):
return self._conn.getInfo()[1]
@staticmethod
- def get_local_gb_total():
- """Get the total hdd size(GB) of physical computer.
-
- :returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
+ def get_local_gb_info():
+ """Get local storage info of the compute node in GB.
+ :returns: A dict containing:
+ :total: How big the overall usable filesystem is (in gigabytes)
+ :free: How much space is free (in gigabytes)
+ :used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
- vg_total = libvirt_utils.volume_group_total_space(
+ info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
- return vg_total / (1024 ** 3)
else:
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['total'] / (1024 ** 3)
+ info = libvirt_utils.get_fs_info(CONF.instances_path)
+
+ for (k, v) in info.iteritems():
+ info[k] = v / (1024 ** 3)
+
+ return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
@@ -2504,24 +2588,6 @@ class LibvirtDriver(driver.ComputeDriver):
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
- def get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
-
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
-
- if CONF.libvirt_images_type == 'lvm':
- vg_used = libvirt_utils.volume_group_used_space(
- CONF.libvirt_images_volume_group)
- return vg_used / (1024 ** 3)
- else:
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['used'] / (1024 ** 3)
-
def get_hypervisor_type(self):
"""Get hypervisor type.
@@ -2693,17 +2759,35 @@ class LibvirtDriver(driver.ComputeDriver):
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
+
+ def _get_disk_available_least():
+ """Return total real disk available least size.
+
+ The size of available disk, when block_migration command given
+ disk_over_commit param is FALSE.
+
+ The size that deducted real instance disk size from the total size
+ of the virtual disk of all instances.
+
+ """
+ disk_free_gb = disk_info_dict['free']
+ disk_over_committed = self.get_disk_over_committed_size_total()
+ # Disk available least size
+ available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
+ return (available_least / (1024 ** 3))
+
+ disk_info_dict = self.get_local_gb_info()
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
- 'local_gb': self.get_local_gb_total(),
+ 'local_gb': disk_info_dict['total'],
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
- 'local_gb_used': self.get_local_gb_used(),
+ 'local_gb_used': disk_info_dict['used'],
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
- 'disk_available_least': self.get_disk_available_least()}
+ 'disk_available_least': _get_disk_available_least()}
return dic
def check_can_live_migrate_destination(self, ctxt, instance_ref,
@@ -3232,22 +3316,11 @@ class LibvirtDriver(driver.ComputeDriver):
'disk_size': dk_size})
return jsonutils.dumps(disk_info)
- def get_disk_available_least(self):
- """Return disk available least size.
-
- The size of available disk, when block_migration command given
- disk_over_commit param is FALSE.
-
- The size that deducted real nstance disk size from the total size
- of the virtual disk of all instances.
-
- """
- # available size of the disk
- dk_sz_gb = self.get_local_gb_total() - self.get_local_gb_used()
-
+ def get_disk_over_committed_size_total(self):
+ """Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
- instances_sz = 0
+ disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
@@ -3255,7 +3328,7 @@ class LibvirtDriver(driver.ComputeDriver):
for info in disk_infos:
i_vt_sz = int(info['virt_disk_size'])
i_dk_sz = int(info['disk_size'])
- instances_sz += i_vt_sz - i_dk_sz
+ disk_over_committed_size += i_vt_sz - i_dk_sz
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
@@ -3267,9 +3340,7 @@ class LibvirtDriver(driver.ComputeDriver):
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
- # Disk available least size
- available_least_size = dk_sz_gb * (1024 ** 3) - instances_sz
- return (available_least_size / 1024 / 1024 / 1024)
+ return disk_over_committed_size
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
@@ -3545,6 +3616,9 @@ class LibvirtDriver(driver.ComputeDriver):
% locals())
return os.access(instance_path, os.W_OK)
+ def inject_network_info(self, instance, nw_info):
+ self.firewall_driver.setup_basic_filtering(instance, nw_info)
+
class HostState(object):
"""Manages information about the compute node through libvirt."""
@@ -3569,9 +3643,10 @@ class HostState(object):
data["vcpus"] = self.driver.get_vcpu_total()
data["vcpus_used"] = self.driver.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.driver.get_cpu_info())
- data["disk_total"] = self.driver.get_local_gb_total()
- data["disk_used"] = self.driver.get_local_gb_used()
- data["disk_available"] = data["disk_total"] - data["disk_used"]
+ disk_info_dict = self.driver.get_local_gb_info()
+ data["disk_total"] = disk_info_dict['total']
+ data["disk_used"] = disk_info_dict['used']
+ data["disk_available"] = disk_info_dict['free']
data["host_memory_total"] = self.driver.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.driver.get_memory_mb_used())
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index d66d61415..74a63b81e 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -54,6 +54,12 @@ imagecache_opts = [
cfg.BoolOpt('remove_unused_base_images',
default=True,
help='Should unused base images be removed?'),
+ cfg.BoolOpt('remove_unused_kernels',
+ default=False,
+ help='Should unused kernel images be removed? This is only '
+ 'safe to enable if all compute nodes have been updated '
+ 'to support this option. This will enabled by default '
+ 'in future.'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
@@ -76,6 +82,29 @@ CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('instances_path', 'nova.compute.manager')
+def get_cache_fname(images, key):
+ """Return a filename based on the SHA1 hash of a given image ID.
+
+ Image files stored in the _base directory that match this pattern
+ are considered for cleanup by the image cache manager. The cache
+ manager considers the file to be in use if it matches an instance's
+ image_ref, kernel_id or ramdisk_id property.
+
+ However, in grizzly-3 and before, only the image_ref property was
+ considered. This means that it's unsafe to store kernel and ramdisk
+ images using this pattern until we're sure that all compute nodes
+ are running a cache manager newer than grizzly-3. For now, we
+ require admins to confirm that by setting the remove_unused_kernels
+ boolean but, at some point in the future, we'll be safely able to
+ assume this.
+ """
+ image_id = str(images[key])
+ if not CONF.remove_unused_kernels and key in ['kernel_id', 'ramdisk_id']:
+ return image_id
+ else:
+ return hashlib.sha1(image_id).hexdigest()
+
+
def get_info_filename(base_path):
"""Construct a filename for storing additional information about a base
image.
@@ -240,8 +269,8 @@ class ImageCacheManager(object):
"""Return a list of the images present in _base.
Determine what images we have on disk. There will be other files in
- this directory (for example kernels) so we only grab the ones which
- are the right length to be disk images.
+ this directory so we only grab the ones which are the right length
+ to be disk images.
Note that this does not return a value. It instead populates a class
variable with a list of images that we need to try and explain.
@@ -278,18 +307,22 @@ class ImageCacheManager(object):
self.instance_names.add(instance['name'] + '_resize')
self.instance_names.add(instance['uuid'] + '_resize')
- image_ref_str = str(instance['image_ref'])
- local, remote, insts = self.used_images.get(image_ref_str,
- (0, 0, []))
- if instance['host'] == CONF.host:
- local += 1
- else:
- remote += 1
- insts.append(instance['name'])
- self.used_images[image_ref_str] = (local, remote, insts)
+ for image_key in ['image_ref', 'kernel_id', 'ramdisk_id']:
+ try:
+ image_ref_str = str(instance[image_key])
+ except KeyError:
+ continue
+ local, remote, insts = self.used_images.get(image_ref_str,
+ (0, 0, []))
+ if instance['host'] == CONF.host:
+ local += 1
+ else:
+ remote += 1
+ insts.append(instance['name'])
+ self.used_images[image_ref_str] = (local, remote, insts)
- self.image_popularity.setdefault(image_ref_str, 0)
- self.image_popularity[image_ref_str] += 1
+ self.image_popularity.setdefault(image_ref_str, 0)
+ self.image_popularity[image_ref_str] += 1
def _list_backing_images(self):
"""List the backing images currently in use."""
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 20af11ddc..cf3fd9d26 100755
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -205,7 +205,8 @@ def create_lvm_image(vg, lv, size, sparse=False):
:size: size of image in bytes
:sparse: create sparse logical volume
"""
- free_space = volume_group_free_space(vg)
+ vg_info = get_volume_group_info(vg)
+ free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
@@ -232,33 +233,14 @@ def create_lvm_image(vg, lv, size, sparse=False):
execute(*cmd, run_as_root=True, attempts=3)
-def volume_group_free_space(vg):
- """Return available space on volume group in bytes.
-
- :param vg: volume group name
- """
- out, err = execute('vgs', '--noheadings', '--nosuffix',
- '--units', 'b', '-o', 'vg_free', vg,
- run_as_root=True)
- return int(out.strip())
-
-
-def volume_group_total_space(vg):
- """Return total space on volume group in bytes.
-
- :param vg: volume group name
- """
-
- out, err = execute('vgs', '--noheadings', '--nosuffix',
- '--units', 'b', '-o', 'vg_size', vg,
- run_as_root=True)
- return int(out.strip())
-
-
-def volume_group_used_space(vg):
- """Return available space on volume group in bytes.
+def get_volume_group_info(vg):
+ """Return free/used/total space info for a volume group in bytes
:param vg: volume group name
+ :returns: A dict containing:
+ :total: How big the filesystem is (in bytes)
+ :free: How much space is free (in bytes)
+ :used: How much space is used (in bytes)
"""
out, err = execute('vgs', '--noheadings', '--nosuffix',
@@ -270,7 +252,9 @@ def volume_group_used_space(vg):
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
- return int(info[0]) - int(info[1])
+ return {'total': int(info[0]),
+ 'free': int(info[1]),
+ 'used': int(info[0]) - int(info[1])}
def list_logical_volumes(vg):
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 523857e42..8d9664751 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -172,7 +172,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
mac_id = mapping['mac'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
- conf.filtername = name
+ if self.get_firewall_required():
+ conf.filtername = name
designer.set_vif_bandwidth_config(conf, instance)
return conf
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index eec2d6467..1277251b5 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -67,6 +67,9 @@ def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
convert the type itself if necessary.
"""
+ if network_info is None:
+ return None
+
# the code below depends on the legacy 'network_info'
if hasattr(network_info, 'legacy'):
network_info = network_info.legacy()
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index 58ad29099..6c8e4b9a5 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -159,9 +159,9 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
if not os.path.isfile(file_path):
LOG.debug(_("Fetching image '%s' from glance") % image_id)
- images.fetch_to_raw(context, image_id, file_path,
- instance['user_id'],
- project_id=instance['project_id'])
+ images.fetch(context, image_id, file_path,
+ instance['user_id'],
+ instance['project_id'])
else:
LOG.debug((_("Using image found at '%s'") % file_path))
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index b70d1583e..9b136f351 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -57,6 +57,7 @@ vmware_group = cfg.OptGroup(name='vmware',
CONF = cfg.CONF
CONF.register_group(vmware_group)
CONF.register_opts(vmware_vif_opts, vmware_group)
+CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('vnc_enabled', 'nova.vnc')
LOG = logging.getLogger(__name__)
diff --git a/openstack-common.conf b/openstack-common.conf
index abbc7570e..a2688fa45 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cliutils,context,db,db.sqlalchemy,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes,version
+modules=cliutils,context,db,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes,version,processutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/run_tests.sh b/run_tests.sh
index 68a564754..29e723c7f 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -16,6 +16,7 @@ function usage {
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
+ echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo " --virtual-env-path <path> Location of the virtualenv directory"
@@ -46,6 +47,7 @@ function process_options {
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
+ -d|--debug) debug=1;;
--virtual-env-path)
(( i++ ))
venv_path=${!i}
@@ -80,6 +82,7 @@ wrapper=""
just_pep8=0
no_pep8=0
coverage=0
+debug=0
recreate_db=1
update=0
@@ -109,6 +112,20 @@ function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
+ if [ $debug -eq 1 ]; then
+ if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
+ # Default to running all tests if specific test is not
+ # provided.
+ testrargs="discover ./nova/tests"
+ fi
+ ${wrapper} python -m testtools.run $testropts $testrargs
+
+ # Short circuit because all of the testr and coverage stuff
+ # below does not make sense when running testtools.run for
+ # debugging purposes.
+ return $?
+ fi
+
if [ $coverage -eq 1 ]; then
TESTRTESTS="$TESTRTESTS --coverage"
else
diff --git a/tools/hacking.py b/tools/hacking.py
index 1279e87e9..dbb4fbe79 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -176,9 +176,12 @@ def nova_import_rules(logical_line):
Examples:
Okay: from os import path
+ Okay: from os import path as p
+ Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
N302: from os.path import dirname as dirname2
+ N302: from os.path import (dirname as dirname2)
N303: from os.path import *
N304: from .compute import rpcapi
"""
@@ -186,6 +189,7 @@ def nova_import_rules(logical_line):
# pass the doctest, since the relativity depends on the file's locality
def is_module_for_sure(mod, search_path=sys.path):
+ mod = mod.replace('(', '') # Ignore parentheses
try:
mod_name = mod
while '.' in mod_name:
@@ -201,7 +205,8 @@ def nova_import_rules(logical_line):
# NOTE(vish): the import error might be due
# to a missing dependency
missing = str(exc).split()[-1]
- if missing != mod.split('.')[-1]:
+ if (missing != mod.split('.')[-1] or
+ "cannot import" in str(exc)):
_missingImport.add(missing)
return True
return False
diff --git a/tools/pip-requires b/tools/pip-requires
index bb4eb7119..d9326cb46 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,5 +1,5 @@
SQLAlchemy>=0.7.8,<=0.7.9
-Cheetah==2.4.4
+Cheetah>=2.4.4
amqplib>=0.6.1
anyjson>=0.2.4
argparse
@@ -14,7 +14,7 @@ PasteDeploy==1.5.0
paste
sqlalchemy-migrate>=0.7.2
netaddr
-suds==0.4
+suds>=0.4
paramiko
pyasn1
Babel>=0.9.6
diff --git a/tools/run_pep8.sh b/tools/run_pep8.sh
index 80c20a92d..a2a982cdc 100755
--- a/tools/run_pep8.sh
+++ b/tools/run_pep8.sh
@@ -24,4 +24,4 @@ PLUGINS_PATH=${SCRIPT_ROOT}/plugins/xenserver/xenapi/etc/xapi.d/plugins
PYTHONPATH=${PLUGINS_PATH} ${PEP8} ./plugins/xenserver/xenapi \
`find plugins/xenserver/xenapi/etc/xapi.d/plugins -type f -perm +111`
-! pyflakes nova/ | grep "imported but unused"
+! pyflakes nova/ | grep "imported but unused\|redefinition of function"