summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.testr.conf4
-rwxr-xr-xbin/nova-rootwrap67
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json8
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml3
-rw-r--r--doc/api_samples/os-networks-associate/network-associate-host-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-associate-host-req.xml2
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-host-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-host-req.xml1
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-project-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-project-req.xml1
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-req.xml1
-rw-r--r--doc/api_samples/os-used-limits/usedlimits-get-resp.json4
-rw-r--r--doc/api_samples/os-used-limits/usedlimits-get-resp.xml2
-rw-r--r--etc/nova/policy.json1
-rw-r--r--etc/nova/rootwrap.conf14
-rw-r--r--nova/api/metadata/base.py34
-rw-r--r--nova/api/metadata/handler.py3
-rw-r--r--nova/api/metadata/password.py44
-rw-r--r--nova/api/openstack/compute/contrib/networks.py40
-rw-r--r--nova/api/openstack/compute/contrib/networks_associate.py69
-rw-r--r--nova/api/openstack/compute/contrib/used_limits.py6
-rw-r--r--nova/compute/api.py2
-rw-r--r--nova/compute/manager.py21
-rw-r--r--nova/compute/resource_tracker.py43
-rw-r--r--nova/conductor/api.py31
-rw-r--r--nova/conductor/manager.py21
-rw-r--r--nova/conductor/rpcapi.py26
-rw-r--r--nova/db/api.py9
-rw-r--r--nova/db/sqlalchemy/api.py11
-rw-r--r--nova/db/sqlalchemy/session.py10
-rw-r--r--nova/locale/nova.pot1423
-rw-r--r--nova/network/api.py12
-rw-r--r--nova/network/manager.py23
-rw-r--r--nova/network/quantum/nova_ipam_lib.py273
-rw-r--r--nova/network/rpcapi.py6
-rw-r--r--nova/rootwrap/filters.py1
-rw-r--r--nova/rootwrap/wrapper.py59
-rw-r--r--nova/test.py24
-rw-r--r--nova/tests/api/ec2/test_cloud.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py55
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_used_limits.py6
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py2
-rw-r--r--nova/tests/compute/test_resource_tracker.py72
-rw-r--r--nova/tests/compute/test_virtapi.py93
-rw-r--r--nova/tests/conductor/test_conductor.py48
-rw-r--r--nova/tests/fake_policy.py1
-rw-r--r--nova/tests/hyperv/basetestcase.py11
-rw-r--r--nova/tests/image/test_s3.py3
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/test_api_samples.py131
-rw-r--r--nova/tests/integrated/test_extensions.py2
-rw-r--r--nova/tests/network/test_rpcapi.py7
-rw-r--r--nova/tests/test_api.py2
-rw-r--r--nova/tests/test_imagebackend.py8
-rw-r--r--nova/tests/test_metadata.py67
-rw-r--r--nova/tests/test_nova_rootwrap.py47
-rw-r--r--nova/tests/test_virt_drivers.py10
-rw-r--r--nova/tests/test_xenapi.py109
-rw-r--r--nova/tests/virt/disk/test_nbd.py115
-rw-r--r--nova/tests/xenapi/test_vm_utils.py52
-rw-r--r--nova/virt/disk/mount/nbd.py27
-rw-r--r--nova/virt/driver.py7
-rw-r--r--nova/virt/fake.py10
-rw-r--r--nova/virt/firewall.py4
-rw-r--r--nova/virt/virtapi.py10
-rw-r--r--nova/virt/xenapi/driver.py8
-rw-r--r--nova/virt/xenapi/fake.py8
-rw-r--r--nova/virt/xenapi/vm_utils.py60
-rw-r--r--nova/virt/xenapi/vmops.py23
-rw-r--r--nova/virt/xenapi/volume_utils.py39
-rw-r--r--nova/virt/xenapi/volumeops.py25
-rwxr-xr-xrun_tests.sh103
-rw-r--r--setup.cfg7
-rw-r--r--tools/install_venv.py3
-rw-r--r--tools/test-requires14
-rw-r--r--tox.ini19
88 files changed, 2176 insertions, 1380 deletions
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 000000000..fd9442349
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./nova/tests $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index 3322bc815..c8e880d79 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -33,7 +33,9 @@
"""
import ConfigParser
+import logging
import os
+import pwd
import signal
import subprocess
import sys
@@ -51,30 +53,22 @@ def _subprocess_setup():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+def _exit_error(execname, message, errorcode, log=True):
+ print "%s: %s" % (execname, message)
+ if log:
+ logging.error(message)
+ sys.exit(errorcode)
+
+
if __name__ == '__main__':
# Split arguments, require at least a command
execname = sys.argv.pop(0)
if len(sys.argv) < 2:
- print "%s: %s" % (execname, "No command specified")
- sys.exit(RC_NOCOMMAND)
+ _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
configfile = sys.argv.pop(0)
userargs = sys.argv[:]
- # Load configuration
- config = ConfigParser.RawConfigParser()
- config.read(configfile)
- try:
- filters_path = config.get("DEFAULT", "filters_path").split(",")
- if config.has_option("DEFAULT", "exec_dirs"):
- exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
- else:
- # Use system PATH if exec_dirs is not specified
- exec_dirs = os.environ["PATH"].split(':')
- except ConfigParser.Error:
- print "%s: Incorrect configuration file: %s" % (execname, configfile)
- sys.exit(RC_BADCONFIG)
-
# Add ../ to sys.path to allow running from branch
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
os.pardir, os.pardir))
@@ -83,14 +77,37 @@ if __name__ == '__main__':
from nova.rootwrap import wrapper
+ # Load configuration
+ try:
+ rawconfig = ConfigParser.RawConfigParser()
+ rawconfig.read(configfile)
+ config = wrapper.RootwrapConfig(rawconfig)
+ except ValueError as exc:
+ msg = "Incorrect value in %s: %s" % (configfile, exc.message)
+ _exit_error(execname, msg, RC_BADCONFIG, log=False)
+ except ConfigParser.Error:
+ _exit_error(execname, "Incorrect configuration file: %s" % configfile,
+ RC_BADCONFIG, log=False)
+
+ if config.use_syslog:
+ wrapper.setup_syslog(execname,
+ config.syslog_log_facility,
+ config.syslog_log_level)
+
# Execute command if it matches any of the loaded filters
- filters = wrapper.load_filters(filters_path)
+ filters = wrapper.load_filters(config.filters_path)
try:
filtermatch = wrapper.match_filter(filters, userargs,
- exec_dirs=exec_dirs)
+ exec_dirs=config.exec_dirs)
if filtermatch:
- obj = subprocess.Popen(filtermatch.get_command(userargs,
- exec_dirs=exec_dirs),
+ command = filtermatch.get_command(userargs,
+ exec_dirs=config.exec_dirs)
+ if config.use_syslog:
+ logging.info("(%s > %s) Executing %s (filter match = %s)" % (
+ os.getlogin(), pwd.getpwuid(os.getuid())[0],
+ command, filtermatch.name))
+
+ obj = subprocess.Popen(command,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
@@ -100,9 +117,11 @@ if __name__ == '__main__':
sys.exit(obj.returncode)
except wrapper.FilterMatchNotExecutable as exc:
- print "Executable not found: %s" % exc.match.exec_path
- sys.exit(RC_NOEXECFOUND)
+ msg = ("Executable not found: %s (filter match = %s)"
+ % (exc.match.exec_path, exc.match.name))
+ _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
except wrapper.NoFilterMatched:
- print "Unauthorized command: %s" % ' '.join(userargs)
- sys.exit(RC_UNAUTHORIZED)
+ msg = ("Unauthorized command: %s (no filter matched)"
+ % ' '.join(userargs))
+ _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 79211b946..b85fae2de 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -297,6 +297,14 @@
"updated": "2011-12-23T00:00:00+00:00"
},
{
+ "alias": "os-networks-associate",
+ "description": "Network association support",
+ "links": [],
+ "name": "NetworkAssociationSupport",
+ "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2",
+ "updated": "2012-11-19T00:00:00+00:00"
+ },
+ {
"alias": "os-quota-class-sets",
"description": "Quota classes management support",
"links": [],
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index 6c53c875b..049498fc4 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -125,6 +125,9 @@
<extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks/api/v1.1" name="Networks">
<description>Admin-only Network Management Extension</description>
</extension>
+ <extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
+ <description>Network association support</description>
+ </extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
<description>Quota classes management support</description>
</extension>
diff --git a/doc/api_samples/os-networks-associate/network-associate-host-req.json b/doc/api_samples/os-networks-associate/network-associate-host-req.json
new file mode 100644
index 000000000..a6487211e
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-associate-host-req.json
@@ -0,0 +1,3 @@
+{
+ "associate_host": "testHost"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-associate-host-req.xml b/doc/api_samples/os-networks-associate/network-associate-host-req.xml
new file mode 100644
index 000000000..3221be61d
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-associate-host-req.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<associate_host>testHost</associate_host> \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-host-req.json b/doc/api_samples/os-networks-associate/network-disassociate-host-req.json
new file mode 100644
index 000000000..d6c5419fd
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-host-req.json
@@ -0,0 +1,3 @@
+{
+ "disassociate_host": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-host-req.xml b/doc/api_samples/os-networks-associate/network-disassociate-host-req.xml
new file mode 100644
index 000000000..3c2cc0d84
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-host-req.xml
@@ -0,0 +1 @@
+<disassociate_host/> \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-project-req.json b/doc/api_samples/os-networks-associate/network-disassociate-project-req.json
new file mode 100644
index 000000000..6c0e46730
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-project-req.json
@@ -0,0 +1,3 @@
+{
+ "disassociate_project": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-project-req.xml b/doc/api_samples/os-networks-associate/network-disassociate-project-req.xml
new file mode 100644
index 000000000..be94feb9f
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-project-req.xml
@@ -0,0 +1 @@
+<disassociate_project/> \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-req.json b/doc/api_samples/os-networks-associate/network-disassociate-req.json
new file mode 100644
index 000000000..66ab7cef0
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-req.json
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-req.xml b/doc/api_samples/os-networks-associate/network-disassociate-req.xml
new file mode 100644
index 000000000..bcad8e0a8
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-req.xml
@@ -0,0 +1 @@
+<disassociate/> \ No newline at end of file
diff --git a/doc/api_samples/os-used-limits/usedlimits-get-resp.json b/doc/api_samples/os-used-limits/usedlimits-get-resp.json
index 21ed3a082..c5593b7e7 100644
--- a/doc/api_samples/os-used-limits/usedlimits-get-resp.json
+++ b/doc/api_samples/os-used-limits/usedlimits-get-resp.json
@@ -14,9 +14,9 @@
"maxTotalRAMSize": 51200,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
- "totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
- "totalSecurityGroupsUsed": 0
+ "totalSecurityGroupsUsed": 0,
+ "totalFloatingIpsUsed": 0
},
"rate": [
{
diff --git a/doc/api_samples/os-used-limits/usedlimits-get-resp.xml b/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
index 745a4828a..c2b0572e5 100644
--- a/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
+++ b/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
@@ -26,10 +26,10 @@
<limit name="totalRAMUsed" value="0"/>
<limit name="totalInstancesUsed" value="0"/>
<limit name="maxSecurityGroups" value="10"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
<limit name="maxTotalCores" value="20"/>
<limit name="totalSecurityGroupsUsed" value="0"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="totalKeyPairsUsed" value="0"/>
<limit name="maxTotalInstances" value="10"/>
<limit name="totalCoresUsed" value="0"/>
<limit name="maxTotalRAMSize" value="51200"/>
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index e5eb92c7c..d06430129 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -61,6 +61,7 @@
"compute_extension:multinic": "",
"compute_extension:networks": "rule:admin_api",
"compute_extension:networks:view": "",
+ "compute_extension:networks_associate": "rule:admin_api",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "rule:admin_api",
"compute_extension:quota_classes": "",
diff --git a/etc/nova/rootwrap.conf b/etc/nova/rootwrap.conf
index 5d6034eb9..fb2997abd 100644
--- a/etc/nova/rootwrap.conf
+++ b/etc/nova/rootwrap.conf
@@ -11,3 +11,17 @@ filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
+
+# Enable logging to syslog
+# Default value is False
+use_syslog=False
+
+# Which syslog facility to use.
+# Valid values include auth, authpriv, syslog, user0, user1...
+# Default value is 'syslog'
+syslog_log_facility=syslog
+
+# Which messages to log.
+# INFO means log all usage
+# ERROR means only log unsuccessful attempts
+syslog_log_level=ERROR
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index b271662b8..1c316bf9c 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -24,11 +24,13 @@ import os
import posixpath
from nova.api.ec2 import ec2utils
+from nova.api.metadata import password
from nova import block_device
from nova import context
from nova import db
from nova import network
from nova.openstack.common import cfg
+from nova.openstack.common import timeutils
from nova.virt import netutils
@@ -57,11 +59,17 @@ VERSIONS = [
'2009-04-04',
]
-OPENSTACK_VERSIONS = ["2012-08-10"]
+FOLSOM = '2012-08-10'
+GRIZZLY = '2013-04-04'
+OPENSTACK_VERSIONS = [
+ FOLSOM,
+ GRIZZLY,
+]
CONTENT_DIR = "content"
MD_JSON_NAME = "meta_data.json"
UD_NAME = "user_data"
+PASS_NAME = "password"
class InvalidMetadataVersion(Exception):
@@ -128,6 +136,13 @@ class InstanceMetadata():
for item in instance.get('metadata', []):
self.launch_metadata[item['key']] = item['value']
+ self.password = ''
+ # get password if set
+ for item in instance.get('system_metadata', []):
+ if item['key'] == 'password':
+ self.password = item['value'] or ''
+ break
+
self.uuid = instance.get('uuid')
self.content = {}
@@ -257,6 +272,8 @@ class InstanceMetadata():
ret = [MD_JSON_NAME]
if self.userdata_raw is not None:
ret.append(UD_NAME)
+ if self._check_os_version(GRIZZLY, version):
+ ret.append(PASS_NAME)
return ret
if path == UD_NAME:
@@ -264,6 +281,9 @@ class InstanceMetadata():
raise KeyError(path)
return self.userdata_raw
+ if path == PASS_NAME and self._check_os_version(GRIZZLY, version):
+ return password.handle_password
+
if path != MD_JSON_NAME:
raise KeyError(path)
@@ -303,8 +323,11 @@ class InstanceMetadata():
return data[path]
- def _check_version(self, required, requested):
- return VERSIONS.index(requested) >= VERSIONS.index(required)
+ def _check_version(self, required, requested, versions=VERSIONS):
+ return versions.index(requested) >= versions.index(required)
+
+ def _check_os_version(self, required, requested):
+ return self._check_version(required, requested, OPENSTACK_VERSIONS)
def _get_hostname(self):
return "%s%s%s" % (self.instance['hostname'],
@@ -332,7 +355,10 @@ class InstanceMetadata():
# specifically handle the top level request
if len(path_tokens) == 1:
if path_tokens[0] == "openstack":
- versions = OPENSTACK_VERSIONS + ["latest"]
+ # NOTE(vish): don't show versions that are in the future
+ today = timeutils.utcnow().strftime("%Y-%m-%d")
+ versions = [v for v in OPENSTACK_VERSIONS if v <= today]
+ versions += ["latest"]
else:
versions = VERSIONS + ["latest"]
return versions
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 06fdce30e..b164c5fea 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -120,6 +120,9 @@ class MetadataRequestHandler(wsgi.Application):
except base.InvalidMetadataPath:
raise webob.exc.HTTPNotFound()
+ if callable(data):
+ return data(req, meta_data)
+
return base.ec2_md_print(data)
def _handle_remote_ip_request(self, req):
diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py
new file mode 100644
index 000000000..3cda67eee
--- /dev/null
+++ b/nova/api/metadata/password.py
@@ -0,0 +1,44 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import context
+from nova import db
+
+
+MAX_SIZE = 256
+
+
+def handle_password(req, meta_data):
+ ctxt = context.get_admin_context()
+ password = meta_data.password
+ if req.method == 'GET':
+ return meta_data.password
+ elif req.method == 'POST':
+ # NOTE(vish): The conflict will only happen once the metadata cache
+ # updates, but it isn't a huge issue if it can be set for
+ # a short window.
+ if meta_data.password:
+ raise exc.HTTPConflict()
+ if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
+ msg = _("Request is too large.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ db.instance_system_metadata_update(ctxt,
+ meta_data.uuid,
+ {'password': req.body},
+ False)
+ else:
+ raise exc.HTTPBadRequest()
diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/networks.py
index 0a494ea88..a45de72fe 100644
--- a/nova/api/openstack/compute/contrib/networks.py
+++ b/nova/api/openstack/compute/contrib/networks.py
@@ -21,6 +21,8 @@ import webob
from webob import exc
from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import db
from nova import exception
from nova import network
from nova.openstack.common import log as logging
@@ -52,35 +54,11 @@ def network_dict(context, network):
return {}
-class NetworkController(object):
+class NetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = network_api or network.API()
- def action(self, req, id, body):
- _actions = {
- 'disassociate': self._disassociate,
- }
-
- for action, data in body.iteritems():
- try:
- return _actions[action](req, id, body)
- except KeyError:
- msg = _("Network does not have %s action") % action
- raise exc.HTTPBadRequest(explanation=msg)
-
- raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
-
- def _disassociate(self, request, network_id, body):
- context = request.environ['nova.context']
- authorize(context)
- LOG.debug(_("Disassociating network with id %s"), network_id)
- try:
- self.network_api.disassociate(context, network_id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
def index(self, req):
context = req.environ['nova.context']
authorize_view(context)
@@ -88,6 +66,18 @@ class NetworkController(object):
result = [network_dict(context, net_ref) for net_ref in networks]
return {'networks': result}
+ @wsgi.action("disassociate")
+ def _disassociate_host_and_project(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Disassociating network with id %s"), id)
+
+ try:
+ self.network_api.associate(context, id, host=None, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
def show(self, req, id):
context = req.environ['nova.context']
authorize_view(context)
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
new file mode 100644
index 000000000..a923c769d
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -0,0 +1,69 @@
+import netaddr
+import webob
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import network
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'networks_associate')
+
+
+class NetworkAssociateActionController(wsgi.Controller):
+ """Network Association API Controller."""
+
+ def __init__(self, network_api=None):
+ self.network_api = network_api or network.API()
+
+ @wsgi.action("disassociate_host")
+ def _disassociate_host_only(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Disassociating host with network with id %s"), id)
+ try:
+ self.network_api.associate(context, id, host=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ @wsgi.action("disassociate_project")
+ def _disassociate_project_only(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Disassociating project with network with id %s"), id)
+ try:
+ self.network_api.associate(context, id, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ @wsgi.action("associate_host")
+ def _associate_host(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ self.network_api.associate(context, id,
+ host=body['associate_host'])
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+
+class Networks_associate(extensions.ExtensionDescriptor):
+ """Network association support"""
+
+ name = "NetworkAssociationSupport"
+ alias = "os-networks-associate"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "networks_associate/api/v2")
+ updated = "2012-11-19T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ extension = extensions.ControllerExtension(
+ self, 'os-networks', NetworkAssociateActionController())
+
+ return [extension]
diff --git a/nova/api/openstack/compute/contrib/used_limits.py b/nova/api/openstack/compute/contrib/used_limits.py
index a7ac33ae9..a5e0b378b 100644
--- a/nova/api/openstack/compute/contrib/used_limits.py
+++ b/nova/api/openstack/compute/contrib/used_limits.py
@@ -56,10 +56,8 @@ class UsedLimitsController(wsgi.Controller):
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
- 'totalVolumesUsed': 'volumes',
- 'totalVolumeGigabytesUsed': 'gigabytes',
- 'totalSecurityGroupsUsed': 'floating_ips',
- 'totalKeyPairsUsed': 'key_pairs',
+ 'totalFloatingIpsUsed': 'floating_ips',
+ 'totalSecurityGroupsUsed': 'security_groups',
}
used_limits = {}
for display_name, quota in quota_map.iteritems():
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 8747f0d99..19f5a3a26 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -462,7 +462,7 @@ class API(base.Base):
# Handle config_drive
config_drive_id = None
- if config_drive and config_drive is not True:
+ if config_drive and not utils.is_valid_boolstr(config_drive):
# config_drive is volume id
config_drive_id = config_drive
config_drive = None
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 979f7c53a..6efc83fb9 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -278,23 +278,22 @@ class ComputeVirtAPI(virtapi.VirtAPI):
return self._compute.conductor_api.aggregate_metadata_delete(
context, aggregate, key)
- def security_group_get_by_instance(self, context, instance_uuid):
- return self._compute.db.security_group_get_by_instance(context,
- instance_uuid)
+ def security_group_get_by_instance(self, context, instance):
+ return self._compute.conductor_api.security_group_get_by_instance(
+ context, instance)
def security_group_rule_get_by_security_group(self, context,
- security_group_id):
- return self._compute.db.security_group_rule_get_by_security_group(
- context, security_group_id)
+ security_group):
+ return (self._compute.conductor_api.
+ security_group_rule_get_by_security_group(context,
+ security_group))
def provider_fw_rule_get_all(self, context):
- return self._compute.db.provider_fw_rule_get_all(context)
+ return self._compute.conductor_api.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
- return self._compute.db.agent_build_get_by_triple(context,
- hypervisor,
- os,
- architecture)
+ return self._compute.conductor_api.agent_build_get_by_triple(
+ context, hypervisor, os, architecture)
class ComputeManager(manager.SchedulerDependentManager):
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 7306b5940..82f8ec461 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -268,6 +268,11 @@ class ResourceTracker(object):
self._update_usage_from_migrations(resources, migrations)
+ # Detect and account for orphaned instances that may exist on the
+ # hypervisor, but are not in the DB:
+ orphans = self._find_orphaned_instances()
+ self._update_usage_from_orphans(resources, orphans)
+
self._report_final_resource_view(resources)
self._sync_compute_node(context, resources)
@@ -364,8 +369,8 @@ class ResourceTracker(object):
def _update_usage(self, resources, usage, sign=1):
resources['memory_mb_used'] += sign * usage['memory_mb']
- resources['local_gb_used'] += sign * usage['root_gb']
- resources['local_gb_used'] += sign * usage['ephemeral_gb']
+ resources['local_gb_used'] += sign * usage.get('root_gb', 0)
+ resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0)
# free ram and disk may be negative, depending on policy:
resources['free_ram_mb'] = (resources['memory_mb'] -
@@ -501,6 +506,40 @@ class ResourceTracker(object):
for instance in instances:
self._update_usage_from_instance(resources, instance)
+ def _find_orphaned_instances(self):
+ """Given the set of instances and migrations already account for
+ by resource tracker, sanity check the hypervisor to determine
+ if there are any "orphaned" instances left hanging around.
+
+ Orphans could be consuming memory and should be accounted for in
+ usage calculations to guard against potential out of memory
+ errors.
+ """
+ uuids1 = frozenset(self.tracked_instances.keys())
+ uuids2 = frozenset(self.tracked_migrations.keys())
+ uuids = uuids1 | uuids2
+
+ usage = self.driver.get_per_instance_usage()
+ vuuids = frozenset(usage.keys())
+
+ orphan_uuids = vuuids - uuids
+ orphans = [usage[uuid] for uuid in orphan_uuids]
+
+ return orphans
+
+ def _update_usage_from_orphans(self, resources, orphans):
+ """Include orphaned instances in usage."""
+ for orphan in orphans:
+ uuid = orphan['uuid']
+ memory_mb = orphan['memory_mb']
+
+ LOG.warn(_("Detected running orphan instance: %(uuid)s (consuming "
+ "%(memory_mb)s MB memory") % locals())
+
+ # just record memory usage for the orphan
+ usage = {'memory_mb': orphan['memory_mb']}
+ self._update_usage(resources, usage)
+
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used"]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 501ccb6de..4c2f031e6 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -118,6 +118,20 @@ class LocalAPI(object):
def get_backdoor_port(self, context, host):
raise exc.InvalidRequest
+ def security_group_get_by_instance(self, context, instance):
+ return self._manager.security_group_get_by_instance(context, instance)
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ return self._manager.security_group_rule_get_by_security_group(
+ context, secgroup)
+
+ def provider_fw_rule_get_all(self, context):
+ return self._manager.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return self._manager.agent_build_get_by_triple(context, hypervisor,
+ os, architecture)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager"""
@@ -183,3 +197,20 @@ class API(object):
# currently.
def get_backdoor_port(self, context, host):
return self.conductor_rpcapi.get_backdoor_port(context)
+
+ def security_group_get_by_instance(self, context, instance):
+ return self.conductor_rpcapi.security_group_get_by_instance(context,
+ instance)
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ return self.conductor_rpcapi.security_group_rule_get_by_security_group(
+ context, secgroup)
+
+ def provider_fw_rule_get_all(self, context):
+ return self.conductor_rpcapi.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return self.conductor_rpcapi.agent_build_get_by_triple(context,
+ hypervisor,
+ os,
+ architecture)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 42e27e038..712644738 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD"""
- RPC_API_VERSION = '1.7'
+ RPC_API_VERSION = '1.10'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -130,3 +130,22 @@ class ConductorManager(manager.SchedulerDependentManager):
def get_backdoor_port(self, context):
return self.backdoor_port
+
+ def security_group_get_by_instance(self, context, instance):
+ group = self.db.security_group_get_by_instance(context,
+ instance['id'])
+ return jsonutils.to_primitive(group)
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ rule = self.db.security_group_rule_get_by_security_group(
+ context, secgroup['id'])
+ return jsonutils.to_primitive(rule)
+
+ def provider_fw_rule_get_all(self, context):
+ rules = self.db.provider_fw_rule_get_all(context)
+ return jsonutils.to_primitive(rules)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ info = self.db.agent_build_get_by_triple(context, hypervisor, os,
+ architecture)
+ return jsonutils.to_primitive(info)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 04bff83c7..0beeb3d2f 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -35,6 +35,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.6 - Added get_backdoor_port()
1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
+ 1.8 - Added security_group_get_by_instance and
+ security_group_rule_get_by_security_group
+ 1.9 - Added provider_fw_rule_get_all
+ 1.10 - Added agent_build_get_by_triple
"""
BASE_RPC_API_VERSION = '1.0'
@@ -114,3 +118,25 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def get_backdoor_port(self, context):
msg = self.make_msg('get_backdoor_port')
return self.call(context, msg, version='1.6')
+
+ def security_group_get_by_instance(self, context, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('security_group_get_by_instance',
+ instance=instance_p)
+ return self.call(context, msg, version='1.8')
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ secgroup_p = jsonutils.to_primitive(secgroup)
+ msg = self.make_msg('security_group_rule_get_by_security_group',
+ secgroup=secgroup_p)
+ return self.call(context, msg, version='1.8')
+
+ def provider_fw_rule_get_all(self, context):
+ msg = self.make_msg('provider_fw_rule_get_all')
+ return self.call(context, msg, version='1.9')
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ msg = self.make_msg('agent_build_get_by_triple',
+ hypervisor=hypervisor, os=os,
+ architecture=architecture)
+ return self.call(context, msg, version='1.10')
diff --git a/nova/db/api.py b/nova/db/api.py
index 67d8e7618..4acff8a99 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -785,9 +785,12 @@ def network_delete_safe(context, network_id):
return IMPL.network_delete_safe(context, network_id)
-def network_disassociate(context, network_id):
- """Disassociate the network from project or raise if it does not exist."""
- return IMPL.network_disassociate(context, network_id)
+def network_disassociate(context, network_id, disassociate_host=True,
+ disassociate_project=True):
+ """Disassociate the network from project or host and raise if it does
+ not exist."""
+ return IMPL.network_disassociate(context, network_id, disassociate_host,
+ disassociate_project)
def network_get(context, network_id, project_only="allow_none"):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 29c40bb69..ec85ddcef 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -2135,9 +2135,14 @@ def network_delete_safe(context, network_id):
@require_admin_context
-def network_disassociate(context, network_id):
- network_update(context, network_id, {'project_id': None,
- 'host': None})
+def network_disassociate(context, network_id, disassociate_host,
+ disassociate_project):
+ net_update = {}
+ if disassociate_project:
+ net_update['project_id'] = None
+ if disassociate_host:
+ net_update['host'] = None
+ network_update(context, network_id, net_update)
@require_context
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 66ff0f916..cb05cc444 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -216,11 +216,11 @@ sql_opts = [
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('sql_connection_trace',
- default=False,
- help='Add python stack traces to SQL as comment strings'),
- cfg.IntOpt('sql_dbpool_enable',
- default=False,
- help="enable the use of eventlet's db_pool for MySQL"),
+ default=False,
+ help='Add python stack traces to SQL as comment strings'),
+ cfg.BoolOpt('sql_dbpool_enable',
+ default=False,
+ help="enable the use of eventlet's db_pool for MySQL"),
]
CONF = cfg.CONF
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 3c387b1d8..29c38b3f1 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2012-12-09 00:02+0000\n"
+"POT-Creation-Date: 2012-12-14 00:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -145,8 +145,8 @@ msgstr ""
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:225 nova/api/ec2/cloud.py:417 nova/api/ec2/cloud.py:442
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2305
+#: nova/exception.py:225 nova/api/ec2/cloud.py:436 nova/api/ec2/cloud.py:461
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2307
msgid "Keypair data is invalid"
msgstr ""
@@ -162,7 +162,7 @@ msgstr ""
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:241 nova/api/openstack/compute/servers.py:1303
+#: nova/exception.py:241 nova/api/openstack/compute/servers.py:1309
#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
@@ -176,7 +176,7 @@ msgstr ""
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:253 nova/api/ec2/cloud.py:599
+#: nova/exception.py:253 nova/api/ec2/cloud.py:618
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
@@ -764,7 +764,7 @@ msgstr ""
#: nova/exception.py:756
#, python-format
-msgid "Flavor access not found for %(flavor_id) / %(project_id) combination."
+msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
#: nova/exception.py:761
@@ -1097,6 +1097,16 @@ msgstr ""
msgid "The CRL file for %(project)s could not be found"
msgstr ""
+#: nova/hooks.py:62
+#, python-format
+msgid "Running %(name)s pre-hook: %(obj)s"
+msgstr ""
+
+#: nova/hooks.py:70
+#, python-format
+msgid "Running %(name)s post-hook: %(obj)s"
+msgstr ""
+
#: nova/manager.py:166
#, python-format
msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run"
@@ -1231,120 +1241,120 @@ msgstr ""
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:174
+#: nova/utils.py:197
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:185
+#: nova/utils.py:208
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:209 nova/utils.py:287 nova/virt/powervm/common.py:82
+#: nova/utils.py:232 nova/utils.py:310 nova/virt/powervm/common.py:82
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:222
+#: nova/utils.py:245
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:262
+#: nova/utils.py:285
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:264
+#: nova/utils.py:287
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:268
+#: nova/utils.py:291
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:303
+#: nova/utils.py:326
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:462
+#: nova/utils.py:488
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:465
+#: nova/utils.py:491
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:500
+#: nova/utils.py:526
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:561
+#: nova/utils.py:587
msgid "in looping call"
msgstr ""
-#: nova/utils.py:621
+#: nova/utils.py:647
#, python-format
msgid "Unknown byte multiplier: %s"
msgstr ""
-#: nova/utils.py:750
+#: nova/utils.py:776
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:779
+#: nova/utils.py:805
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:903
+#: nova/utils.py:929
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:980
+#: nova/utils.py:1006
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1090 nova/virt/configdrive.py:154
+#: nova/utils.py:1116 nova/virt/configdrive.py:160
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: nova/wsgi.py:85
+#: nova/wsgi.py:87
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:109
+#: nova/wsgi.py:111
msgid "Stopping WSGI server."
msgstr ""
-#: nova/wsgi.py:127
+#: nova/wsgi.py:129
msgid "WSGI server has stopped."
msgstr ""
-#: nova/wsgi.py:196
+#: nova/wsgi.py:198
msgid "You must implement __call__"
msgstr ""
-#: nova/wsgi.py:382
+#: nova/wsgi.py:384
#, python-format
msgid "Loading app %(name)s from %(path)s"
msgstr ""
-#: nova/api/auth.py:109
+#: nova/api/auth.py:116
msgid "Invalid service catalog json."
msgstr ""
-#: nova/api/auth.py:132
+#: nova/api/auth.py:139
msgid "Sourcing roles from deprecated X-Role HTTP header"
msgstr ""
@@ -1494,241 +1504,241 @@ msgstr ""
msgid "Unsupported API request: controller = %(controller)s, action = %(action)s"
msgstr ""
-#: nova/api/ec2/cloud.py:365
+#: nova/api/ec2/cloud.py:384
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:391
+#: nova/api/ec2/cloud.py:410
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:407
+#: nova/api/ec2/cloud.py:426
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:414 nova/api/ec2/cloud.py:439
+#: nova/api/ec2/cloud.py:433 nova/api/ec2/cloud.py:458
#: nova/api/openstack/compute/contrib/keypairs.py:93
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/ec2/cloud.py:420 nova/api/ec2/cloud.py:445
+#: nova/api/ec2/cloud.py:439 nova/api/ec2/cloud.py:464
#: nova/api/openstack/compute/contrib/keypairs.py:101
#, python-format
msgid "Key pair '%s' already exists."
msgstr ""
-#: nova/api/ec2/cloud.py:429
+#: nova/api/ec2/cloud.py:448
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:452
+#: nova/api/ec2/cloud.py:471
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:586 nova/api/ec2/cloud.py:707
+#: nova/api/ec2/cloud.py:605 nova/api/ec2/cloud.py:726
msgid "Not enough parameters, need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:591
+#: nova/api/ec2/cloud.py:610
#, python-format
msgid "%s Not enough parameters to build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:629 nova/api/ec2/cloud.py:661
+#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:680
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:652
+#: nova/api/ec2/cloud.py:671
#, python-format
msgid "%s - This rule already exists in group"
msgstr ""
-#: nova/api/ec2/cloud.py:718
+#: nova/api/ec2/cloud.py:737
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:794
+#: nova/api/ec2/cloud.py:813
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:798 nova/api/openstack/compute/contrib/volumes.py:240
+#: nova/api/ec2/cloud.py:817 nova/api/openstack/compute/contrib/volumes.py:240
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:826
+#: nova/api/ec2/cloud.py:845
msgid "Delete Failed"
msgstr ""
-#: nova/api/ec2/cloud.py:839
+#: nova/api/ec2/cloud.py:858
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:847
+#: nova/api/ec2/cloud.py:866
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:860 nova/api/openstack/compute/contrib/volumes.py:419
+#: nova/api/ec2/cloud.py:879 nova/api/openstack/compute/contrib/volumes.py:419
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:866
+#: nova/api/ec2/cloud.py:885
msgid "Detach Volume Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:892 nova/api/ec2/cloud.py:949
-#: nova/api/ec2/cloud.py:1485 nova/api/ec2/cloud.py:1500
+#: nova/api/ec2/cloud.py:911 nova/api/ec2/cloud.py:968
+#: nova/api/ec2/cloud.py:1504 nova/api/ec2/cloud.py:1519
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1014
+#: nova/api/ec2/cloud.py:1033
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1165
+#: nova/api/ec2/cloud.py:1184
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1169
+#: nova/api/ec2/cloud.py:1188
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1173
+#: nova/api/ec2/cloud.py:1192
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1178
+#: nova/api/ec2/cloud.py:1197
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1181
+#: nova/api/ec2/cloud.py:1200
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1189
+#: nova/api/ec2/cloud.py:1208
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1197
+#: nova/api/ec2/cloud.py:1216
#: nova/api/openstack/compute/contrib/floating_ips.py:257
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1206
+#: nova/api/ec2/cloud.py:1225
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1209
+#: nova/api/ec2/cloud.py:1228
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1212
+#: nova/api/ec2/cloud.py:1231
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1220
+#: nova/api/ec2/cloud.py:1239
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1225
+#: nova/api/ec2/cloud.py:1244
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1228
+#: nova/api/ec2/cloud.py:1247
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1255
+#: nova/api/ec2/cloud.py:1274
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1287
+#: nova/api/ec2/cloud.py:1306
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1297
+#: nova/api/ec2/cloud.py:1316
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1306
+#: nova/api/ec2/cloud.py:1325
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1315
+#: nova/api/ec2/cloud.py:1334
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1406
+#: nova/api/ec2/cloud.py:1425
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1422
+#: nova/api/ec2/cloud.py:1441
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1441
+#: nova/api/ec2/cloud.py:1460
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1503
+#: nova/api/ec2/cloud.py:1522
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1505
+#: nova/api/ec2/cloud.py:1524
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1507
+#: nova/api/ec2/cloud.py:1526
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1509
+#: nova/api/ec2/cloud.py:1528
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1522
+#: nova/api/ec2/cloud.py:1541
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1551
+#: nova/api/ec2/cloud.py:1570
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1581
+#: nova/api/ec2/cloud.py:1600
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1599
+#: nova/api/ec2/cloud.py:1618
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1632
+#: nova/api/ec2/cloud.py:1651
msgid "Invalid CIDR"
msgstr ""
@@ -1808,70 +1818,70 @@ msgstr ""
msgid "Extension %(ext_name)s extending resource: %(collection)s"
msgstr ""
-#: nova/api/openstack/common.py:101
+#: nova/api/openstack/common.py:114
#, python-format
msgid ""
"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
"Bad upgrade or db corrupted?"
msgstr ""
-#: nova/api/openstack/common.py:140 nova/api/openstack/common.py:174
+#: nova/api/openstack/common.py:153 nova/api/openstack/common.py:187
msgid "limit param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:143 nova/api/openstack/common.py:178
+#: nova/api/openstack/common.py:156 nova/api/openstack/common.py:191
msgid "limit param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:168
+#: nova/api/openstack/common.py:181
msgid "offset param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:182
+#: nova/api/openstack/common.py:195
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:217 nova/api/openstack/compute/servers.py:536
+#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:542
#, python-format
msgid "marker [%s] not found"
msgstr ""
-#: nova/api/openstack/common.py:257
+#: nova/api/openstack/common.py:270
#, python-format
msgid "href %s does not contain version"
msgstr ""
-#: nova/api/openstack/common.py:272
+#: nova/api/openstack/common.py:285
msgid "Image metadata limit exceeded"
msgstr ""
-#: nova/api/openstack/common.py:280
+#: nova/api/openstack/common.py:293
msgid "Image metadata key cannot be blank"
msgstr ""
-#: nova/api/openstack/common.py:283
+#: nova/api/openstack/common.py:296
msgid "Image metadata key too long"
msgstr ""
-#: nova/api/openstack/common.py:286
+#: nova/api/openstack/common.py:299
msgid "Invalid image metadata"
msgstr ""
-#: nova/api/openstack/common.py:337
+#: nova/api/openstack/common.py:350
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr ""
-#: nova/api/openstack/common.py:340
+#: nova/api/openstack/common.py:353
#, python-format
msgid "Instance is in an invalid state for '%(action)s'"
msgstr ""
-#: nova/api/openstack/common.py:420
+#: nova/api/openstack/common.py:433
msgid "Rejecting snapshot request, snapshots currently disabled"
msgstr ""
-#: nova/api/openstack/common.py:422
+#: nova/api/openstack/common.py:435
msgid "Instance snapshots are not permitted at this time."
msgstr ""
@@ -2023,7 +2033,7 @@ msgstr ""
msgid "subclasses must implement construct()!"
msgstr ""
-#: nova/api/openstack/compute/extensions.py:31
+#: nova/api/openstack/compute/extensions.py:38
msgid "Initializing extension manager."
msgstr ""
@@ -2098,229 +2108,230 @@ msgstr ""
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:450
-#: nova/api/openstack/compute/servers.py:462
-#: nova/api/openstack/compute/servers.py:555
-#: nova/api/openstack/compute/servers.py:723
-#: nova/api/openstack/compute/servers.py:983
-#: nova/api/openstack/compute/servers.py:1086
-#: nova/api/openstack/compute/servers.py:1254
+#: nova/api/openstack/compute/servers.py:456
+#: nova/api/openstack/compute/servers.py:468
+#: nova/api/openstack/compute/servers.py:561
+#: nova/api/openstack/compute/servers.py:729
+#: nova/api/openstack/compute/servers.py:989
+#: nova/api/openstack/compute/servers.py:1092
+#: nova/api/openstack/compute/servers.py:1260
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:501
+#: nova/api/openstack/compute/servers.py:507
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:520
+#: nova/api/openstack/compute/servers.py:526
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:539
+#: nova/api/openstack/compute/servers.py:545
msgid "Flavor could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:562
+#: nova/api/openstack/compute/servers.py:568
msgid "Server name is not a string or unicode"
msgstr ""
-#: nova/api/openstack/compute/servers.py:566
+#: nova/api/openstack/compute/servers.py:572
msgid "Server name is an empty string"
msgstr ""
-#: nova/api/openstack/compute/servers.py:570
+#: nova/api/openstack/compute/servers.py:576
msgid "Server name must be less than 256 characters."
msgstr ""
-#: nova/api/openstack/compute/servers.py:587
+#: nova/api/openstack/compute/servers.py:593
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:590
+#: nova/api/openstack/compute/servers.py:596
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:594
+#: nova/api/openstack/compute/servers.py:600
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:625
+#: nova/api/openstack/compute/servers.py:631
msgid "Unknown argment : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:628
+#: nova/api/openstack/compute/servers.py:634
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:638
+#: nova/api/openstack/compute/servers.py:644
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:648
+#: nova/api/openstack/compute/servers.py:654
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:661
+#: nova/api/openstack/compute/servers.py:667
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:667
+#: nova/api/openstack/compute/servers.py:673
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:670
+#: nova/api/openstack/compute/servers.py:676
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:696
+#: nova/api/openstack/compute/servers.py:702
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:703
+#: nova/api/openstack/compute/servers.py:709
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:710
+#: nova/api/openstack/compute/servers.py:716
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:739
+#: nova/api/openstack/compute/servers.py:745
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:787
-#: nova/api/openstack/compute/servers.py:893
+#: nova/api/openstack/compute/servers.py:793
+#: nova/api/openstack/compute/servers.py:899
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:827
+#: nova/api/openstack/compute/servers.py:833
msgid "min_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:830
+#: nova/api/openstack/compute/servers.py:836
msgid "min_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:835
+#: nova/api/openstack/compute/servers.py:841
msgid "max_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:838
+#: nova/api/openstack/compute/servers.py:844
msgid "max_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:841
+#: nova/api/openstack/compute/servers.py:847
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/openstack/compute/servers.py:890
+#: nova/api/openstack/compute/servers.py:896
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:896
+#: nova/api/openstack/compute/servers.py:902
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:975
+#: nova/api/openstack/compute/servers.py:981
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1001
-#: nova/api/openstack/compute/servers.py:1021
+#: nova/api/openstack/compute/servers.py:1007
+#: nova/api/openstack/compute/servers.py:1027
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1007
+#: nova/api/openstack/compute/servers.py:1013
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1027
+#: nova/api/openstack/compute/servers.py:1033
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1040
+#: nova/api/openstack/compute/servers.py:1046
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1044
+#: nova/api/openstack/compute/servers.py:1050
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1057
+#: nova/api/openstack/compute/servers.py:1063
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1069
+#: nova/api/openstack/compute/servers.py:1075
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1072
+#: nova/api/openstack/compute/servers.py:1078
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1096
+#: nova/api/openstack/compute/servers.py:1102
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1105
+#: nova/api/openstack/compute/servers.py:1111
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1132
+#: nova/api/openstack/compute/servers.py:1138
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1145
+#: nova/api/openstack/compute/servers.py:1151
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1149
-#: nova/api/openstack/compute/servers.py:1351
+#: nova/api/openstack/compute/servers.py:1155
+#: nova/api/openstack/compute/servers.py:1357
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1160
+#: nova/api/openstack/compute/servers.py:1166
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1173
+#: nova/api/openstack/compute/servers.py:1179
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1176
+#: nova/api/openstack/compute/servers.py:1182
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1194
+#: nova/api/openstack/compute/servers.py:1200
#: nova/api/openstack/compute/contrib/aggregates.py:143
+#: nova/api/openstack/compute/contrib/coverage_ext.py:218
#: nova/api/openstack/compute/contrib/keypairs.py:78
#: nova/api/openstack/compute/contrib/networks.py:72
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1199
+#: nova/api/openstack/compute/servers.py:1205
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1261
+#: nova/api/openstack/compute/servers.py:1267
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1294
+#: nova/api/openstack/compute/servers.py:1300
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1378
+#: nova/api/openstack/compute/servers.py:1384
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
@@ -2360,7 +2371,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/admin_actions.py:186
#: nova/api/openstack/compute/contrib/admin_actions.py:314
#: nova/api/openstack/compute/contrib/multinic.py:41
-#: nova/api/openstack/compute/contrib/rescue.py:45
+#: nova/api/openstack/compute/contrib/rescue.py:44
msgid "Server not found"
msgstr ""
@@ -2486,6 +2497,16 @@ msgstr ""
msgid "Unable to get console"
msgstr ""
+#: nova/api/openstack/compute/contrib/coverage_ext.py:85
+#, python-format
+msgid "No backdoor API command for service: %s\n"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:216
+#, python-format
+msgid "Coverage doesn't have %s action"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/disk_config.py:43
#, python-format
msgid "%s must be either 'MANUAL' or 'AUTO'."
@@ -2607,16 +2628,28 @@ msgstr ""
msgid "Putting host %(host)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:188
+#: nova/api/openstack/compute/contrib/hosts.py:182
+msgid "Virt driver does not implement host maintenance mode."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:190
#, python-format
msgid "Setting host %(host)s to %(state)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:235
+#: nova/api/openstack/compute/contrib/hosts.py:195
+msgid "Virt driver does not implement host disabled status."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:207
+msgid "Virt driver does not implement host power management."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:239
msgid "Describe-resource is admin only functionality"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:243
+#: nova/api/openstack/compute/contrib/hosts.py:247
msgid "Host not found"
msgstr ""
@@ -2813,200 +2846,200 @@ msgstr ""
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/compute/api.py:251
+#: nova/compute/api.py:252
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:258
+#: nova/compute/api.py:259
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:267
+#: nova/compute/api.py:268
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:287
+#: nova/compute/api.py:288
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:297
+#: nova/compute/api.py:298
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:301
+#: nova/compute/api.py:302
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:305
+#: nova/compute/api.py:306
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:537
+#: nova/compute/api.py:538
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:609
+#: nova/compute/api.py:610
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:636
+#: nova/compute/api.py:637
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:871
+#: nova/compute/api.py:873
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:966
+#: nova/compute/api.py:968
msgid "host for instance is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:1010
+#: nova/compute/api.py:1012
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:1032
+#: nova/compute/api.py:1034
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1079
+#: nova/compute/api.py:1081
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1093
+#: nova/compute/api.py:1095
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1160
+#: nova/compute/api.py:1162
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1292
+#: nova/compute/api.py:1294
#, python-format
msgid "Image type not recognized %s"
msgstr ""
-#: nova/compute/api.py:1401
+#: nova/compute/api.py:1403
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1733
+#: nova/compute/api.py:1735
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1742
+#: nova/compute/api.py:1744
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1784
+#: nova/compute/api.py:1786
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:1960
+#: nova/compute/api.py:1962
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1968
+#: nova/compute/api.py:1970
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:2036
+#: nova/compute/api.py:2038
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2121
+#: nova/compute/api.py:2123
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2278
+#: nova/compute/api.py:2280
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2282
+#: nova/compute/api.py:2284
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2383
+#: nova/compute/api.py:2385
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2386
+#: nova/compute/api.py:2388
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2394
+#: nova/compute/api.py:2396
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2400
+#: nova/compute/api.py:2402
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2420
+#: nova/compute/api.py:2422
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2423
+#: nova/compute/api.py:2425
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2430
+#: nova/compute/api.py:2432
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2495
+#: nova/compute/api.py:2497
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2503
+#: nova/compute/api.py:2505
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2506
+#: nova/compute/api.py:2508
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2763
+#: nova/compute/api.py:2765
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2772
+#: nova/compute/api.py:2774
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2775
+#: nova/compute/api.py:2777
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2786
+#: nova/compute/api.py:2788
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3101,504 +3134,504 @@ msgstr ""
msgid "Possibly task preempted."
msgstr ""
-#: nova/compute/manager.py:334
+#: nova/compute/manager.py:331
#, python-format
msgid "%(nodename)s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:363
+#: nova/compute/manager.py:360
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:382
+#: nova/compute/manager.py:379
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:394
+#: nova/compute/manager.py:391
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:408
+#: nova/compute/manager.py:405
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:418
+#: nova/compute/manager.py:415
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:449
+#: nova/compute/manager.py:446
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:525
+#: nova/compute/manager.py:522
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:597 nova/compute/manager.py:1832
+#: nova/compute/manager.py:594 nova/compute/manager.py:1827
#, python-format
msgid "No node specified, defaulting to %(node)s"
msgstr ""
-#: nova/compute/manager.py:630
+#: nova/compute/manager.py:627
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:653
+#: nova/compute/manager.py:650
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:686 nova/compute/manager.py:1883
+#: nova/compute/manager.py:683 nova/compute/manager.py:1878
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:703
+#: nova/compute/manager.py:700
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:708
+#: nova/compute/manager.py:705
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:714
+#: nova/compute/manager.py:711
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:737
+#: nova/compute/manager.py:734
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:767
+#: nova/compute/manager.py:764
msgid "Instance has already been created"
msgstr ""
-#: nova/compute/manager.py:813
+#: nova/compute/manager.py:810
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:819
+#: nova/compute/manager.py:816
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:829
+#: nova/compute/manager.py:826
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:850
+#: nova/compute/manager.py:847
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:854
+#: nova/compute/manager.py:851
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:867
+#: nova/compute/manager.py:864
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:885
+#: nova/compute/manager.py:882
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:909
+#: nova/compute/manager.py:906
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:981
+#: nova/compute/manager.py:978
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:1012
+#: nova/compute/manager.py:1009
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1015
+#: nova/compute/manager.py:1012
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1022
+#: nova/compute/manager.py:1019
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:1046
+#: nova/compute/manager.py:1044
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1086 nova/compute/manager.py:2052
-#: nova/compute/manager.py:3334
+#: nova/compute/manager.py:1084 nova/compute/manager.py:2046
+#: nova/compute/manager.py:3342
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1220
+#: nova/compute/manager.py:1218
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1306
+#: nova/compute/manager.py:1304
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1330
+#: nova/compute/manager.py:1328
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1339
+#: nova/compute/manager.py:1337
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1376
+#: nova/compute/manager.py:1374
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1382
+#: nova/compute/manager.py:1380
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1435
+#: nova/compute/manager.py:1433
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1442
+#: nova/compute/manager.py:1440
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1447
+#: nova/compute/manager.py:1445
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1478
+#: nova/compute/manager.py:1476
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1485
+#: nova/compute/manager.py:1483
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1495
+#: nova/compute/manager.py:1493
msgid "set_admin_password is not implemented by this driver."
msgstr ""
-#: nova/compute/manager.py:1511
+#: nova/compute/manager.py:1509
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1519
+#: nova/compute/manager.py:1517
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1534
+#: nova/compute/manager.py:1532
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1538
+#: nova/compute/manager.py:1536
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1559
+#: nova/compute/manager.py:1557
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:1572
+#: nova/compute/manager.py:1570
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1605
+#: nova/compute/manager.py:1604
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1626
+#: nova/compute/manager.py:1625
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1799
+#: nova/compute/manager.py:1794
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:1805
+#: nova/compute/manager.py:1800
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1814
+#: nova/compute/manager.py:1809
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2049
+#: nova/compute/manager.py:2043
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2105
+#: nova/compute/manager.py:2099
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2122
+#: nova/compute/manager.py:2116
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2160
+#: nova/compute/manager.py:2154
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2190
+#: nova/compute/manager.py:2184
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2212
+#: nova/compute/manager.py:2206
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2217
+#: nova/compute/manager.py:2211
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2220
+#: nova/compute/manager.py:2214
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2237
+#: nova/compute/manager.py:2231
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2262
+#: nova/compute/manager.py:2256
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2290
+#: nova/compute/manager.py:2284
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2334
+#: nova/compute/manager.py:2328
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2343
+#: nova/compute/manager.py:2337
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2358
+#: nova/compute/manager.py:2352
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2387
+#: nova/compute/manager.py:2381
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2391
+#: nova/compute/manager.py:2385
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2404
+#: nova/compute/manager.py:2398
#, python-format
msgid "Faild to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2429
+#: nova/compute/manager.py:2423
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2466
+#: nova/compute/manager.py:2460
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2526
+#: nova/compute/manager.py:2520
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2586
+#: nova/compute/manager.py:2580
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2612
+#: nova/compute/manager.py:2606
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2661
+#: nova/compute/manager.py:2655
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2663
+#: nova/compute/manager.py:2657
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2677
+#: nova/compute/manager.py:2671
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2811
+#: nova/compute/manager.py:2805
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2840
+#: nova/compute/manager.py:2849
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2846
+#: nova/compute/manager.py:2855
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2855
+#: nova/compute/manager.py:2864
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2862
+#: nova/compute/manager.py:2871
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2866
+#: nova/compute/manager.py:2875
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2873
+#: nova/compute/manager.py:2882
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2881
+#: nova/compute/manager.py:2890
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2898
+#: nova/compute/manager.py:2907
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2916
+#: nova/compute/manager.py:2925
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2939
+#: nova/compute/manager.py:2948
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3053
+#: nova/compute/manager.py:3061
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3071
+#: nova/compute/manager.py:3079
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3099
+#: nova/compute/manager.py:3107
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3105 nova/compute/manager.py:3143
+#: nova/compute/manager.py:3113 nova/compute/manager.py:3151
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3130
+#: nova/compute/manager.py:3138
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3166
+#: nova/compute/manager.py:3174
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3178 nova/compute/manager.py:3189
-#: nova/compute/manager.py:3203
+#: nova/compute/manager.py:3186 nova/compute/manager.py:3197
+#: nova/compute/manager.py:3211
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3183
+#: nova/compute/manager.py:3191
msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3196
+#: nova/compute/manager.py:3204
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3212
+#: nova/compute/manager.py:3220
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3220
+#: nova/compute/manager.py:3228
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3233
+#: nova/compute/manager.py:3241
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3288
+#: nova/compute/manager.py:3296
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3295
+#: nova/compute/manager.py:3303
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3302
+#: nova/compute/manager.py:3310
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
@@ -3615,79 +3648,79 @@ msgid ""
" claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:244
+#: nova/compute/resource_tracker.py:245
msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:248
+#: nova/compute/resource_tracker.py:249
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:294
+#: nova/compute/resource_tracker.py:295
#, python-format
msgid "Compute_service record created for %s "
msgstr ""
-#: nova/compute/resource_tracker.py:299
+#: nova/compute/resource_tracker.py:300
#, python-format
msgid "Compute_service record updated for %s "
msgstr ""
-#: nova/compute/resource_tracker.py:312
+#: nova/compute/resource_tracker.py:313
#, python-format
msgid "No service record for host %s"
msgstr ""
-#: nova/compute/resource_tracker.py:322
+#: nova/compute/resource_tracker.py:323
#, python-format
msgid "Hypervisor: free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:323
+#: nova/compute/resource_tracker.py:324
#, python-format
msgid "Hypervisor: free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:328
+#: nova/compute/resource_tracker.py:329
#, python-format
msgid "Hypervisor: free VCPUs: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:330
+#: nova/compute/resource_tracker.py:331
msgid "Hypervisor: VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:337
+#: nova/compute/resource_tracker.py:338
#, python-format
msgid "Free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:338
+#: nova/compute/resource_tracker.py:339
#, python-format
msgid "Free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:343
+#: nova/compute/resource_tracker.py:344
#, python-format
msgid "Free VCPUS: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:345
+#: nova/compute/resource_tracker.py:346
msgid "Free VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:383
+#: nova/compute/resource_tracker.py:384
#, python-format
msgid "Updating from migration %s"
msgstr ""
-#: nova/compute/resource_tracker.py:441
+#: nova/compute/resource_tracker.py:442
msgid "Instance not resizing, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:509
+#: nova/compute/resource_tracker.py:510
#, python-format
msgid "Missing keys: %s"
msgstr ""
@@ -3706,7 +3739,7 @@ msgstr ""
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/manager.py:53
+#: nova/conductor/manager.py:59
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
@@ -3777,7 +3810,7 @@ msgid ""
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:2730
+#: nova/db/sqlalchemy/api.py:2740
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
@@ -3822,7 +3855,7 @@ msgstr ""
msgid "volume_usage_cache table not dropped"
msgstr ""
-#: nova/image/glance.py:147
+#: nova/image/glance.py:159
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
@@ -3906,185 +3939,205 @@ msgstr ""
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
-#: nova/network/ldapdns.py:97
+#: nova/network/ldapdns.py:98
#, python-format
msgid ""
"Found multiple matches for domain %(domain)s.\n"
"%(entry)s"
msgstr ""
-#: nova/network/ldapdns.py:124
+#: nova/network/ldapdns.py:125
#, python-format
msgid "Unable to dequalify. %(name)s is not in %(domain)s.\n"
msgstr ""
-#: nova/network/ldapdns.py:319
+#: nova/network/ldapdns.py:320
msgid "This driver only supports type 'a' entries."
msgstr ""
-#: nova/network/ldapdns.py:362 nova/network/minidns.py:167
+#: nova/network/ldapdns.py:363 nova/network/minidns.py:169
msgid "This shouldn't be getting called except during testing."
msgstr ""
-#: nova/network/linux_net.py:186
+#: nova/network/linux_net.py:190
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:221
+#: nova/network/linux_net.py:225
#, python-format
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:246
+#: nova/network/linux_net.py:250
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:381
+#: nova/network/linux_net.py:385
msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:587
+#: nova/network/linux_net.py:591
#, python-format
msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:813
+#: nova/network/linux_net.py:829
#, python-format
msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:853
+#: nova/network/linux_net.py:870
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:855
+#: nova/network/linux_net.py:872
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:920
+#: nova/network/linux_net.py:937
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:922
+#: nova/network/linux_net.py:939
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1059
+#: nova/network/linux_net.py:1076
#, python-format
msgid "Error clearing stale veth %s"
msgstr ""
-#: nova/network/linux_net.py:1162
+#: nova/network/linux_net.py:1199
#, python-format
msgid "Starting VLAN inteface %s"
msgstr ""
-#: nova/network/linux_net.py:1201
+#: nova/network/linux_net.py:1230
+#, python-format
+msgid "Failed unplugging VLAN interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1233
+#, python-format
+msgid "Unplugged VLAN interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1255
#, python-format
msgid "Starting Bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1213
+#: nova/network/linux_net.py:1267
#, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr ""
-#: nova/network/linux_net.py:1246
+#: nova/network/linux_net.py:1300
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1383
+#: nova/network/linux_net.py:1340
+#, python-format
+msgid "Failed unplugging bridge interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1343
+#, python-format
+msgid "Unplugged bridge interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1498
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1391
+#: nova/network/linux_net.py:1506
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1410
+#: nova/network/linux_net.py:1525
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1412
+#: nova/network/linux_net.py:1527
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:307
+#: nova/network/manager.py:314
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/manager.py:316 nova/network/manager.py:576
+#: nova/network/manager.py:323 nova/network/manager.py:585
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/manager.py:331
+#: nova/network/manager.py:338
#, python-format
msgid "floating IP allocation for instance |%s|"
msgstr ""
-#: nova/network/manager.py:395
+#: nova/network/manager.py:402
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/manager.py:413
+#: nova/network/manager.py:420
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/manager.py:417
+#: nova/network/manager.py:424
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/manager.py:438
+#: nova/network/manager.py:445
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/manager.py:499
+#: nova/network/manager.py:507
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/manager.py:705
+#: nova/network/manager.py:716
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:712
+#: nova/network/manager.py:723
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/manager.py:742
+#: nova/network/manager.py:753
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:750
+#: nova/network/manager.py:761
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/manager.py:797
+#: nova/network/manager.py:808
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4092,39 +4145,39 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/manager.py:843
+#: nova/network/manager.py:854
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/manager.py:853
+#: nova/network/manager.py:864
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/manager.py:971
+#: nova/network/manager.py:982
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:975
+#: nova/network/manager.py:986
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1102
+#: nova/network/manager.py:1113
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1107
+#: nova/network/manager.py:1118
#, python-format
msgid "networks retrieved for instance: |%(networks)s|"
msgstr ""
-#: nova/network/manager.py:1143
+#: nova/network/manager.py:1154
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1373
+#: nova/network/manager.py:1384
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4132,119 +4185,119 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1462
+#: nova/network/manager.py:1472
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1479
+#: nova/network/manager.py:1491
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1483
+#: nova/network/manager.py:1495
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1491
+#: nova/network/manager.py:1503
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1496
+#: nova/network/manager.py:1508
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1500
+#: nova/network/manager.py:1512
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1503
+#: nova/network/manager.py:1515
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1522
+#: nova/network/manager.py:1534
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1546
+#: nova/network/manager.py:1558
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1566
+#: nova/network/manager.py:1578
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1647
+#: nova/network/manager.py:1659
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1650
+#: nova/network/manager.py:1662
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1661
+#: nova/network/manager.py:1673
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1718
+#: nova/network/manager.py:1730
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1738
+#: nova/network/manager.py:1750
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2231
+#: nova/network/manager.py:2243
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2238
+#: nova/network/manager.py:2250
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s. Network size"
" is %(network_size)s"
msgstr ""
-#: nova/network/minidns.py:44
+#: nova/network/minidns.py:46
#, python-format
msgid "minidns file is |%s|"
msgstr ""
-#: nova/network/minidns.py:71 nova/network/minidns.py:102
+#: nova/network/minidns.py:73 nova/network/minidns.py:104
msgid "Invalid name"
msgstr ""
-#: nova/network/minidns.py:74
+#: nova/network/minidns.py:76
msgid "This driver only supports type 'a'"
msgstr ""
-#: nova/network/minidns.py:118
+#: nova/network/minidns.py:120
#, python-format
msgid "Cannot delete entry |%s|"
msgstr ""
-#: nova/network/minidns.py:204
+#: nova/network/minidns.py:206
#, python-format
msgid "Cannot delete domain |%s|"
msgstr ""
-#: nova/network/model.py:337
+#: nova/network/model.py:339
msgid "v4 subnets are required for legacy nw_info"
msgstr ""
@@ -4276,6 +4329,10 @@ msgstr ""
msgid "empty project id for instance %s"
msgstr ""
+#: nova/network/quantumv2/api.py:150
+msgid "Port not found"
+msgstr ""
+
#: nova/network/quantumv2/api.py:158
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
@@ -4326,17 +4383,17 @@ msgstr ""
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/log.py:177
+#: nova/openstack/common/log.py:168
#, python-format
msgid "Deprecated Config: %s"
msgstr ""
-#: nova/openstack/common/log.py:309
+#: nova/openstack/common/log.py:300
#, python-format
msgid "syslog facility must be one of: %s"
msgstr ""
-#: nova/openstack/common/log.py:467
+#: nova/openstack/common/log.py:458
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr ""
@@ -4728,7 +4785,7 @@ msgstr ""
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:67 nova/scheduler/manager.py:183
+#: nova/scheduler/driver.py:67 nova/scheduler/manager.py:186
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
@@ -4806,7 +4863,7 @@ msgstr ""
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:167
+#: nova/scheduler/manager.py:170
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
@@ -5191,16 +5248,16 @@ msgstr ""
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:2982
+#: nova/tests/compute/test_compute.py:3016
msgid "wrong host/node"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:150 nova/virt/hyperv/vmops.py:555
+#: nova/tests/hyperv/hypervutils.py:150 nova/virt/hyperv/vmops.py:559
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:208 nova/virt/hyperv/vmops.py:490
+#: nova/tests/hyperv/hypervutils.py:208 nova/virt/hyperv/vmops.py:494
#, python-format
msgid "Failed to destroy vm %s"
msgstr ""
@@ -5232,12 +5289,12 @@ msgid ""
"arguments \"%(params)s\""
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:144
+#: nova/tests/integrated/test_api_samples.py:146
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:148
+#: nova/tests/integrated/test_api_samples.py:150
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5245,25 +5302,25 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:156
+#: nova/tests/integrated/test_api_samples.py:158
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:159
+#: nova/tests/integrated/test_api_samples.py:161
#, python-format
msgid ""
"Length mismatch: %(result)s\n"
"%(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:170
+#: nova/tests/integrated/test_api_samples.py:172
#, python-format
msgid "Result: %(res_obj)s not in %(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:188
-#: nova/tests/integrated/test_api_samples.py:201
+#: nova/tests/integrated/test_api_samples.py:190
+#: nova/tests/integrated/test_api_samples.py:203
#, python-format
msgid ""
"Values do not match:\n"
@@ -5330,21 +5387,21 @@ msgstr ""
msgid "Added %(filepath)s to config drive"
msgstr ""
-#: nova/virt/driver.py:794
+#: nova/virt/driver.py:789
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/virt/driver.py:797
+#: nova/virt/driver.py:792
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/virt/driver.py:804
+#: nova/virt/driver.py:799
#, python-format
msgid "Unable to load the virtualization driver: %s"
msgstr ""
-#: nova/virt/fake.py:198
+#: nova/virt/fake.py:195
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr ""
@@ -5555,30 +5612,34 @@ msgstr ""
msgid "Release loop device %s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:59
-msgid "nbd unavailable: module not loaded"
+#: nova/virt/disk/mount/nbd.py:57 nova/virt/disk/mount/nbd.py:71
+msgid "No free nbd devices"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:64
-msgid "No free nbd devices"
+#: nova/virt/disk/mount/nbd.py:62
+msgid "ndb module not loaded"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:83
+#: nova/virt/disk/mount/nbd.py:63
+msgid "nbd unavailable: module not loaded"
+msgstr ""
+
+#: nova/virt/disk/mount/nbd.py:88
#, python-format
msgid "Get nbd device %(dev)s for %(imgfile)s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:88
+#: nova/virt/disk/mount/nbd.py:93
#, python-format
msgid "qemu-nbd error: %s"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:100
+#: nova/virt/disk/mount/nbd.py:105
#, python-format
msgid "nbd device %s did not show up"
msgstr ""
-#: nova/virt/disk/mount/nbd.py:110
+#: nova/virt/disk/mount/nbd.py:114
#, python-format
msgid "Release nbd device %s"
msgstr ""
@@ -5745,37 +5806,37 @@ msgstr ""
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:79 nova/virt/libvirt/driver.py:1491
-#: nova/virt/xenapi/vm_utils.py:505
+#: nova/virt/hyperv/basevolumeutils.py:79 nova/virt/libvirt/driver.py:1503
+#: nova/virt/xenapi/vm_utils.py:510
#, python-format
msgid "block_device_list %s"
msgstr ""
-#: nova/virt/hyperv/driver.py:190 nova/virt/hyperv/driver.py:193
+#: nova/virt/hyperv/driver.py:183 nova/virt/hyperv/driver.py:186
msgid "plug_vifs called"
msgstr ""
-#: nova/virt/hyperv/driver.py:196
+#: nova/virt/hyperv/driver.py:189
msgid "ensure_filtering_rules_for_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:201
+#: nova/virt/hyperv/driver.py:194
msgid "unfilter_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:205
+#: nova/virt/hyperv/driver.py:198
msgid "confirm_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:210
+#: nova/virt/hyperv/driver.py:203
msgid "finish_revert_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:216
+#: nova/virt/hyperv/driver.py:209
msgid "finish_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:219
+#: nova/virt/hyperv/driver.py:212
msgid "get_console_output called"
msgstr ""
@@ -5978,161 +6039,166 @@ msgstr ""
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:183 nova/virt/libvirt/driver.py:1430
+#: nova/virt/hyperv/vmops.py:183 nova/virt/libvirt/driver.py:1436
msgid "Using config drive"
msgstr ""
-#: nova/virt/hyperv/vmops.py:194 nova/virt/libvirt/driver.py:1440
+#: nova/virt/hyperv/vmops.py:194 nova/virt/libvirt/driver.py:1446
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:239
+#: nova/virt/hyperv/vmops.py:201 nova/virt/libvirt/driver.py:1451
+#, python-format
+msgid "Creating config drive failed with error: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:243
#, python-format
msgid "Failed to create VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:242
+#: nova/virt/hyperv/vmops.py:246
#, python-format
msgid "Created VM %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:259
+#: nova/virt/hyperv/vmops.py:263
#, python-format
msgid "Set memory for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:272
+#: nova/virt/hyperv/vmops.py:276
#, python-format
msgid "Set vcpus for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:276
+#: nova/virt/hyperv/vmops.py:280
#, python-format
msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
msgstr ""
-#: nova/virt/hyperv/vmops.py:285
+#: nova/virt/hyperv/vmops.py:289
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmops.py:293
+#: nova/virt/hyperv/vmops.py:297
#, python-format
msgid "Failed to add scsi controller to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:310
+#: nova/virt/hyperv/vmops.py:314
#, python-format
msgid "Creating disk for %(vm_name)s by attaching disk file %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:338
+#: nova/virt/hyperv/vmops.py:342
#, python-format
msgid "Failed to add drive to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:341
+#: nova/virt/hyperv/vmops.py:345
#, python-format
msgid "New %(drive_type)s drive path is %(drive_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:366
+#: nova/virt/hyperv/vmops.py:370
#, python-format
msgid "Failed to add %(drive_type)s image to VM %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:368
+#: nova/virt/hyperv/vmops.py:372
#, python-format
msgid "Created drive type %(drive_type)s for %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:373
+#: nova/virt/hyperv/vmops.py:377
#, python-format
msgid "Creating nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:378
+#: nova/virt/hyperv/vmops.py:382
msgid "Cannot find vSwitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:398
+#: nova/virt/hyperv/vmops.py:402
msgid "Failed creating a port on the external vswitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:399
+#: nova/virt/hyperv/vmops.py:403
#, python-format
msgid "Failed creating port for %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:402
+#: nova/virt/hyperv/vmops.py:406
#, python-format
msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:414
+#: nova/virt/hyperv/vmops.py:418
#, python-format
msgid "Failed to add nic to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:416
+#: nova/virt/hyperv/vmops.py:420
#, python-format
msgid "Created nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:423 nova/virt/hyperv/vmops.py:426
+#: nova/virt/hyperv/vmops.py:427 nova/virt/hyperv/vmops.py:430
#, python-format
msgid "Attempting to bind NIC to %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:431
+#: nova/virt/hyperv/vmops.py:435
msgid "No vSwitch specified, attaching to default"
msgstr ""
-#: nova/virt/hyperv/vmops.py:456
+#: nova/virt/hyperv/vmops.py:460
#, python-format
msgid "Got request to destroy vm %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:500
+#: nova/virt/hyperv/vmops.py:504
#, python-format
msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:506
+#: nova/virt/hyperv/vmops.py:510
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:511
+#: nova/virt/hyperv/vmops.py:515
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:517
+#: nova/virt/hyperv/vmops.py:521
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:522
+#: nova/virt/hyperv/vmops.py:526
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:527
+#: nova/virt/hyperv/vmops.py:531
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:532
+#: nova/virt/hyperv/vmops.py:536
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:552
+#: nova/virt/hyperv/vmops.py:556
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:581
+#: nova/virt/hyperv/vmops.py:585
#, python-format
msgid "use_cow_image:%s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:601
+#: nova/virt/hyperv/vmops.py:605
#, python-format
msgid "Failed to create Difference Disk from %(base)s to %(target)s"
msgstr ""
@@ -6174,7 +6240,7 @@ msgstr ""
msgid "Removing existing folder %s "
msgstr ""
-#: nova/virt/hyperv/volumeops.py:90 nova/virt/xenapi/vm_utils.py:512
+#: nova/virt/hyperv/volumeops.py:90 nova/virt/xenapi/vm_utils.py:517
#, python-format
msgid "block device info: %s"
msgstr ""
@@ -6234,7 +6300,7 @@ msgstr ""
msgid "Failed to remove volume from VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:228 nova/virt/libvirt/driver.py:624
+#: nova/virt/hyperv/volumeops.py:228 nova/virt/libvirt/driver.py:623
msgid "Could not determine iscsi initiator name"
msgstr ""
@@ -6348,246 +6414,239 @@ msgstr ""
msgid "An error has occurred when calling the iscsi initiator: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:341
+#: nova/virt/libvirt/driver.py:340
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:347
+#: nova/virt/libvirt/driver.py:346
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:368
+#: nova/virt/libvirt/driver.py:367
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:390 nova/virt/libvirt/driver.py:393
+#: nova/virt/libvirt/driver.py:389 nova/virt/libvirt/driver.py:392
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:411
+#: nova/virt/libvirt/driver.py:410
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:487
+#: nova/virt/libvirt/driver.py:486
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:501
+#: nova/virt/libvirt/driver.py:500
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:506
+#: nova/virt/libvirt/driver.py:505
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:528
+#: nova/virt/libvirt/driver.py:527
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:543
+#: nova/virt/libvirt/driver.py:542
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:556
+#: nova/virt/libvirt/driver.py:555
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:573
+#: nova/virt/libvirt/driver.py:572
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:587
+#: nova/virt/libvirt/driver.py:586
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:747
+#: nova/virt/libvirt/driver.py:746
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:757
+#: nova/virt/libvirt/driver.py:756
msgid "attaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:777
+#: nova/virt/libvirt/driver.py:776
msgid "detaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:909
+#: nova/virt/libvirt/driver.py:908
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:913
+#: nova/virt/libvirt/driver.py:912
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:945
+#: nova/virt/libvirt/driver.py:944
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:981
+#: nova/virt/libvirt/driver.py:980
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1113
+#: nova/virt/libvirt/driver.py:1108
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1120 nova/virt/powervm/operator.py:255
+#: nova/virt/libvirt/driver.py:1115 nova/virt/powervm/operator.py:255
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1136
+#: nova/virt/libvirt/driver.py:1131
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1174 nova/virt/libvirt/driver.py:1200
+#: nova/virt/libvirt/driver.py:1169 nova/virt/libvirt/driver.py:1195
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:1189
+#: nova/virt/libvirt/driver.py:1184
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1238
+#: nova/virt/libvirt/driver.py:1233
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1242
+#: nova/virt/libvirt/driver.py:1237
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1246 nova/virt/libvirt/driver.py:1250
+#: nova/virt/libvirt/driver.py:1241 nova/virt/libvirt/driver.py:1245
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1278
-#, python-format
-msgid ""
-"Creating ephemeral disk named %(disk_name)s of size %(size_in_gb)s gb at "
-"%(fname)s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:1322
+#: nova/virt/libvirt/driver.py:1311
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1454
+#: nova/virt/libvirt/driver.py:1466
#, python-format
msgid "Injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1464
+#: nova/virt/libvirt/driver.py:1476
#, python-format
msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1538
+#: nova/virt/libvirt/driver.py:1550
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1544
+#: nova/virt/libvirt/driver.py:1556
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1548
+#: nova/virt/libvirt/driver.py:1560
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1552
+#: nova/virt/libvirt/driver.py:1564
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1568
+#: nova/virt/libvirt/driver.py:1580
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1858
+#: nova/virt/libvirt/driver.py:1871
msgid "Starting toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1862
+#: nova/virt/libvirt/driver.py:1875
msgid "Finished toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1879
+#: nova/virt/libvirt/driver.py:1892
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2011
+#: nova/virt/libvirt/driver.py:2024
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2128
+#: nova/virt/libvirt/driver.py:2141
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2211
+#: nova/virt/libvirt/driver.py:2224
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2235
+#: nova/virt/libvirt/driver.py:2248
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2239
+#: nova/virt/libvirt/driver.py:2252
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2354
+#: nova/virt/libvirt/driver.py:2367
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2362
+#: nova/virt/libvirt/driver.py:2375
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2397
+#: nova/virt/libvirt/driver.py:2410
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2422
+#: nova/virt/libvirt/driver.py:2435
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2434
+#: nova/virt/libvirt/driver.py:2447
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -6597,34 +6656,34 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2451
+#: nova/virt/libvirt/driver.py:2464
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2499
+#: nova/virt/libvirt/driver.py:2512
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2569
+#: nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2613
+#: nova/virt/libvirt/driver.py:2626
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2740
+#: nova/virt/libvirt/driver.py:2753
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2789
+#: nova/virt/libvirt/driver.py:2802
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
@@ -6673,11 +6732,11 @@ msgstr ""
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:215
+#: nova/virt/libvirt/imagebackend.py:213
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:278
+#: nova/virt/libvirt/imagebackend.py:276
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
@@ -6848,7 +6907,7 @@ msgstr ""
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:389
+#: nova/virt/libvirt/utils.py:436
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
@@ -7077,27 +7136,27 @@ msgid ""
"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:274
+#: nova/virt/vmwareapi/driver.py:261
#, python-format
msgid "In vmwareapi:_create_session, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:357
+#: nova/virt/vmwareapi/driver.py:344
#, python-format
msgid "In vmwareapi:_call_method, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:392
+#: nova/virt/vmwareapi/driver.py:379
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: success"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:397
+#: nova/virt/vmwareapi/driver.py:384
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:401
+#: nova/virt/vmwareapi/driver.py:388
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr ""
@@ -7476,19 +7535,19 @@ msgstr ""
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1495
+#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1455
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1499
+#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1459
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:93 nova/virt/xenapi/vmops.py:1504
+#: nova/virt/xenapi/agent.py:93 nova/virt/xenapi/vmops.py:1464
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
@@ -7588,24 +7647,24 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:359
+#: nova/virt/xenapi/driver.py:355
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:571
+#: nova/virt/xenapi/driver.py:567
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:623
+#: nova/virt/xenapi/driver.py:619
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:663
+#: nova/virt/xenapi/driver.py:659
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:747 nova/virt/xenapi/driver.py:761
+#: nova/virt/xenapi/driver.py:743 nova/virt/xenapi/driver.py:757
#, python-format
msgid "Got exception: %s"
msgstr ""
@@ -7693,44 +7752,44 @@ msgstr ""
msgid "Found no network for bridge %s"
msgstr ""
-#: nova/virt/xenapi/pool.py:78
+#: nova/virt/xenapi/pool.py:71
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: unrecoverable state during operation on "
"%(host)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:173
+#: nova/virt/xenapi/pool.py:158
#, python-format
msgid "Unable to eject %(host)s from the pool; pool not empty"
msgstr ""
-#: nova/virt/xenapi/pool.py:190
+#: nova/virt/xenapi/pool.py:175
#, python-format
msgid "Unable to eject %(host)s from the pool; No master found"
msgstr ""
-#: nova/virt/xenapi/pool.py:207
+#: nova/virt/xenapi/pool.py:192
#, python-format
msgid "Pool-Join failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:210
+#: nova/virt/xenapi/pool.py:195
#, python-format
msgid "Unable to join %(host)s in the pool"
msgstr ""
-#: nova/virt/xenapi/pool.py:226
+#: nova/virt/xenapi/pool.py:211
#, python-format
msgid "Pool-eject failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:238
+#: nova/virt/xenapi/pool.py:223
#, python-format
msgid "Unable to set up pool: %(e)s."
msgstr ""
-#: nova/virt/xenapi/pool.py:249
+#: nova/virt/xenapi/pool.py:234
#, python-format
msgid "Pool-set_name_label failed: %(e)s"
msgstr ""
@@ -7747,609 +7806,609 @@ msgid ""
"Expected %(vlan_num)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:266
+#: nova/virt/xenapi/vm_utils.py:271
msgid "Created VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:278
+#: nova/virt/xenapi/vm_utils.py:283
msgid "VM destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:283 nova/virt/xenapi/vm_utils.py:298
+#: nova/virt/xenapi/vm_utils.py:288 nova/virt/xenapi/vm_utils.py:303
msgid "VM already halted, skipping shutdown..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:287
+#: nova/virt/xenapi/vm_utils.py:292
msgid "Shutting down VM (cleanly)"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:302
+#: nova/virt/xenapi/vm_utils.py:307
msgid "Shutting down VM (hard)"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:341
+#: nova/virt/xenapi/vm_utils.py:346
#, python-format
msgid "VBD not found in instance %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:358
+#: nova/virt/xenapi/vm_utils.py:363
#, python-format
msgid "VBD %s already detached"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:361
+#: nova/virt/xenapi/vm_utils.py:366
#, python-format
msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:366
+#: nova/virt/xenapi/vm_utils.py:371
#, python-format
msgid "Unable to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:371
+#: nova/virt/xenapi/vm_utils.py:376
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:382
+#: nova/virt/xenapi/vm_utils.py:387
#, python-format
msgid "Unable to destroy VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:401
+#: nova/virt/xenapi/vm_utils.py:406
#, python-format
msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:404
+#: nova/virt/xenapi/vm_utils.py:409
#, python-format
msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:420
+#: nova/virt/xenapi/vm_utils.py:425
#, python-format
msgid "Unable to destroy VDI %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:452
+#: nova/virt/xenapi/vm_utils.py:457
#, python-format
msgid ""
"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)"
" on %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:483
+#: nova/virt/xenapi/vm_utils.py:488
msgid "SR not present and could not be introduced"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:584
+#: nova/virt/xenapi/vm_utils.py:589
#, python-format
msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:604
+#: nova/virt/xenapi/vm_utils.py:609
#, python-format
msgid "No primary VDI found for %(vm_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:613
+#: nova/virt/xenapi/vm_utils.py:618
msgid "Starting snapshot for VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:661
+#: nova/virt/xenapi/vm_utils.py:666
#, python-format
msgid "Destroying cached VDI '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:719
+#: nova/virt/xenapi/vm_utils.py:724
#, python-format
msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:905
+#: nova/virt/xenapi/vm_utils.py:910
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %(sr_type)s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:961
+#: nova/virt/xenapi/vm_utils.py:966
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:995
+#: nova/virt/xenapi/vm_utils.py:1000
#, python-format
msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1007
+#: nova/virt/xenapi/vm_utils.py:1012
#, python-format
msgid ""
"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, "
"params: %(params)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1020
+#: nova/virt/xenapi/vm_utils.py:1025
#, python-format
msgid "download_vhd failed: %r"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1054
+#: nova/virt/xenapi/vm_utils.py:1059
#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1065
+#: nova/virt/xenapi/vm_utils.py:1070
#, python-format
msgid "Asking xapi to fetch vhd image %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1129
+#: nova/virt/xenapi/vm_utils.py:1134
#, python-format
msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1144
+#: nova/virt/xenapi/vm_utils.py:1149
#, python-format
msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1148
+#: nova/virt/xenapi/vm_utils.py:1153
#, python-format
msgid ""
"Image size %(size_bytes)d exceeded instance_type allowed size "
"%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1170
+#: nova/virt/xenapi/vm_utils.py:1175
#, python-format
msgid "Fetching image %(image_id)s, type %(image_type_str)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1183
+#: nova/virt/xenapi/vm_utils.py:1188
#, python-format
msgid "Size for image %(image_id)s: %(virtual_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1192
+#: nova/virt/xenapi/vm_utils.py:1197
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1211
+#: nova/virt/xenapi/vm_utils.py:1216
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1225
+#: nova/virt/xenapi/vm_utils.py:1230
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1234
+#: nova/virt/xenapi/vm_utils.py:1239
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1272
+#: nova/virt/xenapi/vm_utils.py:1277
#, python-format
msgid "Detected %(image_type_str)s format for image %(image_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1293
+#: nova/virt/xenapi/vm_utils.py:1298
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1311
+#: nova/virt/xenapi/vm_utils.py:1316
#, python-format
msgid "Unknown image format %(disk_image_type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1342
+#: nova/virt/xenapi/vm_utils.py:1347
#, python-format
msgid "VDI %s is still available"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1438
+#: nova/virt/xenapi/vm_utils.py:1443
#, python-format
msgid "Unable to parse rrd of %(vm_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1465
+#: nova/virt/xenapi/vm_utils.py:1470
#, python-format
msgid "Re-scanning SR %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1493
+#: nova/virt/xenapi/vm_utils.py:1498
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1511
+#: nova/virt/xenapi/vm_utils.py:1516
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration and/or configure the flag "
"'sr_matching_filter'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1524
+#: nova/virt/xenapi/vm_utils.py:1529
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1532
+#: nova/virt/xenapi/vm_utils.py:1537
#, python-format
msgid "ISO: looking at SR %(sr_rec)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1534
+#: nova/virt/xenapi/vm_utils.py:1539
msgid "ISO: not iso content"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1537
+#: nova/virt/xenapi/vm_utils.py:1542
msgid "ISO: iso content_type, no 'i18n-key' key"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1540
+#: nova/virt/xenapi/vm_utils.py:1545
msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1544
+#: nova/virt/xenapi/vm_utils.py:1549
msgid "ISO: SR MATCHing our criteria"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1546
+#: nova/virt/xenapi/vm_utils.py:1551
msgid "ISO: ISO, looking to see if it is host local"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1549
+#: nova/virt/xenapi/vm_utils.py:1554
#, python-format
msgid "ISO: PBD %(pbd_ref)s disappeared"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1552
+#: nova/virt/xenapi/vm_utils.py:1557
#, python-format
msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1555
+#: nova/virt/xenapi/vm_utils.py:1560
msgid "ISO: SR with local PBD"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1577
+#: nova/virt/xenapi/vm_utils.py:1582
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1593
+#: nova/virt/xenapi/vm_utils.py:1598
#, python-format
msgid "Unable to obtain RRD XML updates with server details: %(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1647
+#: nova/virt/xenapi/vm_utils.py:1652
#, python-format
msgid "Invalid statistics data from Xenserver: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1707
+#: nova/virt/xenapi/vm_utils.py:1712
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1794
+#: nova/virt/xenapi/vm_utils.py:1799
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
"%(original_parent_uuid)s, waiting for coalesce..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1804
+#: nova/virt/xenapi/vm_utils.py:1809
#, python-format
msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1839
+#: nova/virt/xenapi/vm_utils.py:1844
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1859
+#: nova/virt/xenapi/vm_utils.py:1864
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1872
+#: nova/virt/xenapi/vm_utils.py:1877
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1875
+#: nova/virt/xenapi/vm_utils.py:1880
#, python-format
msgid "Plugging VBD %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1877
+#: nova/virt/xenapi/vm_utils.py:1882
#, python-format
msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1880
+#: nova/virt/xenapi/vm_utils.py:1885
#, python-format
msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1885
+#: nova/virt/xenapi/vm_utils.py:1890
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1893
+#: nova/virt/xenapi/vm_utils.py:1898
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1906
+#: nova/virt/xenapi/vm_utils.py:1911
#, python-format
msgid "Running pygrub against %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1913
+#: nova/virt/xenapi/vm_utils.py:1918
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1915
+#: nova/virt/xenapi/vm_utils.py:1920
msgid "No Xen kernel found. Booting HVM."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1928
+#: nova/virt/xenapi/vm_utils.py:1933
msgid "Partitions:"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1934
+#: nova/virt/xenapi/vm_utils.py:1939
#, python-format
msgid " %(num)s: %(ptype)s %(size)d sectors"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1959
+#: nova/virt/xenapi/vm_utils.py:1964
#, python-format
msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to "
"%(dev_path)s..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1972
+#: nova/virt/xenapi/vm_utils.py:1977
#, python-format
msgid "Writing partition table %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2026
+#: nova/virt/xenapi/vm_utils.py:2031
#, python-format
msgid ""
"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2058
+#: nova/virt/xenapi/vm_utils.py:2063
#, python-format
msgid ""
"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% "
"reduction in size"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2110
+#: nova/virt/xenapi/vm_utils.py:2115
msgid "Manipulating interface files directly"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2119
+#: nova/virt/xenapi/vm_utils.py:2124
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2231
+#: nova/virt/xenapi/vm_utils.py:2236
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:124 nova/virt/xenapi/vmops.py:672
+#: nova/virt/xenapi/vmops.py:124 nova/virt/xenapi/vmops.py:671
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:166
+#: nova/virt/xenapi/vmops.py:165
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:234
+#: nova/virt/xenapi/vmops.py:233
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:303
+#: nova/virt/xenapi/vmops.py:302
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:375
+#: nova/virt/xenapi/vmops.py:374
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:406
+#: nova/virt/xenapi/vmops.py:405
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:479
+#: nova/virt/xenapi/vmops.py:478
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:496
+#: nova/virt/xenapi/vmops.py:495
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:522
+#: nova/virt/xenapi/vmops.py:521
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:528
+#: nova/virt/xenapi/vmops.py:527
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:542
+#: nova/virt/xenapi/vmops.py:541
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:545
+#: nova/virt/xenapi/vmops.py:544
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:556
+#: nova/virt/xenapi/vmops.py:555
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:583
+#: nova/virt/xenapi/vmops.py:582
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:591
+#: nova/virt/xenapi/vmops.py:590
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:641
+#: nova/virt/xenapi/vmops.py:640
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:645
+#: nova/virt/xenapi/vmops.py:644
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:653
+#: nova/virt/xenapi/vmops.py:652
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:690
+#: nova/virt/xenapi/vmops.py:689
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:696 nova/virt/xenapi/vmops.py:746
+#: nova/virt/xenapi/vmops.py:695 nova/virt/xenapi/vmops.py:745
msgid "Clean shutdown did not complete successfully, trying hard shutdown."
msgstr ""
-#: nova/virt/xenapi/vmops.py:775
+#: nova/virt/xenapi/vmops.py:774
msgid "Resize down not allowed without auto_disk_config"
msgstr ""
-#: nova/virt/xenapi/vmops.py:820
+#: nova/virt/xenapi/vmops.py:819
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:825
+#: nova/virt/xenapi/vmops.py:824
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:869
+#: nova/virt/xenapi/vmops.py:868
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:960
+#: nova/virt/xenapi/vmops.py:959
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:986
+#: nova/virt/xenapi/vmops.py:985
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1013
+#: nova/virt/xenapi/vmops.py:1012
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1020
+#: nova/virt/xenapi/vmops.py:1019
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1027
+#: nova/virt/xenapi/vmops.py:1026
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1053
+#: nova/virt/xenapi/vmops.py:1052
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1079
+#: nova/virt/xenapi/vmops.py:1078
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1130
+#: nova/virt/xenapi/vmops.py:1129
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1164
+#: nova/virt/xenapi/vmops.py:1163
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1213
+#: nova/virt/xenapi/vmops.py:1212
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1217
+#: nova/virt/xenapi/vmops.py:1216
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1316
+#: nova/virt/xenapi/vmops.py:1276
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1399
+#: nova/virt/xenapi/vmops.py:1359
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1418
+#: nova/virt/xenapi/vmops.py:1378
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1427
+#: nova/virt/xenapi/vmops.py:1387
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1430
+#: nova/virt/xenapi/vmops.py:1390
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1458
+#: nova/virt/xenapi/vmops.py:1418
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1554
+#: nova/virt/xenapi/vmops.py:1514
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1586
+#: nova/virt/xenapi/vmops.py:1546
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1634
+#: nova/virt/xenapi/vmops.py:1594
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1670
+#: nova/virt/xenapi/vmops.py:1630
msgid "Migrate Send failed"
msgstr ""
diff --git a/nova/network/api.py b/nova/network/api.py
index db8b87268..beee802c1 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -82,6 +82,8 @@ def update_instance_cache_with_nw_info(api, context, instance,
class API(base.Base):
"""API for interacting with the network manager."""
+ _sentinel = object()
+
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
super(API, self).__init__(**kwargs)
@@ -232,6 +234,16 @@ class API(base.Base):
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ """Associate or disassociate host or project to network"""
+ associations = {}
+ if host is not API._sentinel:
+ associations['host'] = host
+ if project is not API._sentinel:
+ associations['project'] = project
+ self.network_rpcapi.associate(context, network_uuid, associations)
+
@refresh_cache
def get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
diff --git a/nova/network/manager.py b/nova/network/manager.py
index d916d3fb7..cea7f6dc3 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -887,7 +887,7 @@ class NetworkManager(manager.SchedulerDependentManager):
The one at a time part is to flatten the layout to help scale
"""
- RPC_API_VERSION = '1.4'
+ RPC_API_VERSION = '1.5'
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
@@ -2210,6 +2210,27 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
network_id = None
self.db.network_associate(context, project_id, network_id, force=True)
+ @wrap_check_policy
+ def associate(self, context, network_uuid, associations):
+ """Associate or disassociate host or project to network."""
+ network_id = self.get_network(context, network_uuid)['id']
+ if 'host' in associations:
+ host = associations['host']
+ if host is None:
+ self.db.network_disassociate(context, network_id,
+ disassociate_host=True,
+ disassociate_project=False)
+ else:
+ self.db.network_set_host(context, network_id, host)
+ if 'project' in associations:
+ project = associations['project']
+ if project is None:
+ self.db.network_disassociate(context, network_id,
+ disassociate_host=False,
+ disassociate_project=True)
+ else:
+ self.db.network_associate(context, project, network_id, True)
+
def _get_network_by_id(self, context, network_id):
# NOTE(vish): Don't allow access to networks with project_id=None as
# these are networksa that haven't been allocated to a
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
deleted file mode 100644
index 5c2adf7f1..000000000
--- a/nova/network/quantum/nova_ipam_lib.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Nicira Networks, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-
-from nova import db
-from nova import exception
-from nova import ipv6
-from nova.network import manager
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def get_ipam_lib(net_man):
- return QuantumNovaIPAMLib(net_man)
-
-
-class QuantumNovaIPAMLib(object):
- """Implements Quantum IP Address Management (IPAM) interface
- using the local Nova database. This implementation is inline
- with how IPAM is used by other NetworkManagers.
- """
-
- def __init__(self, net_manager):
- """Holds a reference to the "parent" network manager, used
- to take advantage of various FlatManager methods to avoid
- code duplication.
- """
- self.net_manager = net_manager
-
- def create_subnet(self, context, label, tenant_id,
- quantum_net_id, priority, cidr=None,
- gateway=None, gateway_v6=None, cidr_v6=None,
- dns1=None, dns2=None):
- """Re-use the basic FlatManager create_networks method to
- initialize the networks and fixed_ips tables in Nova DB.
-
- Also stores a few more fields in the networks table that
- are needed by Quantum but not the FlatManager.
- """
- admin_context = context.elevated()
- subnet_size = len(netaddr.IPNetwork(cidr))
- networks = manager.FlatManager._do_create_networks(self.net_manager,
- admin_context, label, cidr,
- False, 1, subnet_size, cidr_v6, gateway,
- gateway_v6, quantum_net_id, None, dns1, dns2,
- ipam=True)
- #TODO(tr3buchet): refactor passing in the ipam key so that
- # it's no longer required. The reason it exists now is because
- # nova insists on carving up IP blocks. What ends up happening is
- # we create a v4 and an identically sized v6 block. The reason
- # the quantum tests passed previosly is nothing prevented an
- # incorrect v6 address from being assigned to the wrong subnet
-
- if len(networks) != 1:
- raise Exception(_("Error creating network entry"))
-
- network = networks[0]
- net = {"project_id": tenant_id,
- "priority": priority,
- "uuid": quantum_net_id}
- db.network_update(admin_context, network['id'], net)
-
- def delete_subnets_by_net_id(self, context, net_id, project_id):
- """Deletes a network based on Quantum UUID. Uses FlatManager
- delete_network to avoid duplication.
- """
- admin_context = context.elevated()
- network = db.network_get_by_uuid(admin_context, net_id)
- if not network:
- raise Exception(_("No network with net_id = %s") % net_id)
- manager.FlatManager.delete_network(self.net_manager,
- admin_context, None,
- network['uuid'],
- require_disassociated=False)
-
- def get_global_networks(self, admin_context):
- return db.project_get_networks(admin_context, None, False)
-
- def get_project_networks(self, admin_context):
- try:
- nets = db.network_get_all(admin_context.elevated())
- except exception.NoNetworksFound:
- return []
- # only return networks with a project_id set
- return [net for net in nets if net['project_id']]
-
- def get_project_and_global_net_ids(self, context, project_id):
- """Fetches all networks associated with this project, or
- that are "global" (i.e., have no project set).
- Returns list sorted by 'priority'.
- """
- admin_context = context.elevated()
- networks = db.project_get_networks(admin_context, project_id, False)
- networks.extend(self.get_global_networks(admin_context))
- id_priority_map = {}
- net_list = []
- for n in networks:
- net_id = n['uuid']
- net_list.append((net_id, n["project_id"]))
- id_priority_map[net_id] = n['priority']
- return sorted(net_list, key=lambda x: id_priority_map[x[0]])
-
- def allocate_fixed_ips(self, context, tenant_id, quantum_net_id,
- network_tenant_id, vif_rec):
- """Allocates a single fixed IPv4 address for a virtual interface."""
- admin_context = context.elevated()
- network = db.network_get_by_uuid(admin_context, quantum_net_id)
- address = None
- if network['cidr']:
- instance = db.instance_get_by_uuid(context,
- vif_rec['instance_uuid'])
- address = db.fixed_ip_associate_pool(admin_context,
- network['id'],
- instance['uuid'])
- values = {'allocated': True,
- 'virtual_interface_id': vif_rec['id']}
- db.fixed_ip_update(admin_context, address, values)
- return [address]
-
- def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id):
- """Returns tenant_id for this network. This is only necessary
- in the melange IPAM case.
- """
- return project_id
-
- def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None):
- """Returns information about the IPv4 and IPv6 subnets
- associated with a Quantum Network UUID.
- """
- n = db.network_get_by_uuid(context.elevated(), net_id)
- subnet_v4 = {
- 'network_id': n['uuid'],
- 'cidr': n['cidr'],
- 'gateway': n['gateway'],
- 'broadcast': n['broadcast'],
- 'netmask': n['netmask'],
- 'version': 4,
- 'dns1': n['dns1'],
- 'dns2': n['dns2']}
- #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
- # this is probably bad as there is no way to add v6
- # dns to nova
- subnet_v6 = {
- 'network_id': n['uuid'],
- 'cidr': n['cidr_v6'],
- 'gateway': n['gateway_v6'],
- 'broadcast': None,
- 'netmask': n['netmask_v6'],
- 'version': 6,
- 'dns1': None,
- 'dns2': None}
- return [subnet_v4, subnet_v6]
-
- def get_routes_by_ip_block(self, context, block_id, project_id):
- """Returns the list of routes for the IP block"""
- return []
-
- def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
- """Returns a list of IPv4 address strings associated with
- the specified virtual interface, based on the fixed_ips table.
- """
- # TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
- vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
- fixed_ips = db.fixed_ips_by_virtual_interface(context,
- vif_rec['id'])
- return [fixed_ip['address'] for fixed_ip in fixed_ips]
-
- def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
- """Returns a list containing a single IPv6 address strings
- associated with the specified virtual interface.
- """
- admin_context = context.elevated()
- network = db.network_get_by_uuid(admin_context, net_id)
- vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
- if network['cidr_v6']:
- ip = ipv6.to_global(network['cidr_v6'],
- vif_rec['address'],
- project_id)
- return [ip]
- return []
-
- def verify_subnet_exists(self, context, tenant_id, quantum_net_id):
- """Confirms that a subnet exists that is associated with the
- specified Quantum Network UUID. Raises an exception if no
- such subnet exists.
- """
- admin_context = context.elevated()
- net = db.network_get_by_uuid(admin_context, quantum_net_id)
- return net is not None
-
- def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
- """Deallocate all fixed IPs associated with the specified
- virtual interface.
- """
- admin_context = context.elevated()
- fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
- vif_ref['id'])
- # NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
- # associated with the instance-id. This prevents us from handing it
- # out again immediately, as allocating it to a new instance before
- # a DHCP lease has timed-out is bad. Instead, the fixed-ip will
- # be disassociated with the instance-id by a call to one of two
- # methods inherited from FlatManager:
- # - if DHCP is in use, a lease expiring in dnsmasq triggers
- # a call to release_fixed_ip in the network manager, or it will
- # be timed out periodically if the lease fails.
- # - otherwise, we release the ip immediately
-
- read_deleted_context = admin_context.elevated(read_deleted='yes')
- for fixed_ip in fixed_ips:
- fixed_id = fixed_ip['id']
- floating_ips = self.net_manager.db.floating_ip_get_by_fixed_ip_id(
- admin_context,
- fixed_id)
- # disassociate floating ips related to fixed_ip
- for floating_ip in floating_ips:
- address = floating_ip['address']
- manager.FloatingIP.disassociate_floating_ip(
- self.net_manager,
- read_deleted_context,
- address,
- affect_auto_assigned=True)
- # deallocate if auto_assigned
- if floating_ip['auto_assigned']:
- manager.FloatingIP.deallocate_floating_ip(
- read_deleted_context,
- address,
- affect_auto_assigned=True)
- db.fixed_ip_update(admin_context, fixed_ip['address'],
- {'allocated': False,
- 'virtual_interface_id': None})
- if not self.net_manager.DHCP:
- db.fixed_ip_disassociate(admin_context, fixed_ip['address'])
-
- if len(fixed_ips) == 0:
- LOG.error(_('No fixed IPs to deallocate for vif %s'),
- vif_ref['id'])
-
- def get_allocated_ips(self, context, subnet_id, project_id):
- """Returns a list of (ip, vif_id) pairs"""
- admin_context = context.elevated()
- ips = db.fixed_ip_get_all(admin_context)
- allocated_ips = []
- # Get all allocated IPs that are part of this subnet
- network = db.network_get_by_uuid(admin_context, subnet_id)
- for ip in ips:
- # Skip unallocated IPs
- if not ip['allocated'] == 1:
- continue
- if ip['network_id'] == network['id']:
- vif = db.virtual_interface_get(admin_context,
- ip['virtual_interface_id'])
- allocated_ips.append((ip['address'], vif['uuid']))
- return allocated_ips
-
- def get_floating_ips_by_fixed_address(self, context, fixed_address):
- return db.floating_ip_get_by_fixed_address(context, fixed_address)
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index f7bc02d84..8ee1ce443 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -37,6 +37,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
1.2 - Make migrate_instance_[start|finish] a little more flexible
1.3 - Adds fanout cast update_dns for multi_host networks
1.4 - Add get_backdoor_port()
+ 1.5 - Adds associate
'''
#
@@ -163,6 +164,11 @@ class NetworkAPI(rpc_proxy.RpcProxy):
return self.call(ctxt, self.make_msg('add_network_to_project',
project_id=project_id, network_uuid=network_uuid))
+ def associate(self, ctxt, network_uuid, associations):
+ return self.call(ctxt, self.make_msg('associate',
+ network_uuid=network_uuid, associations=associations),
+ self.topic, version="1.5")
+
def get_instance_nw_info(self, ctxt, instance_id, instance_uuid,
rxtx_factor, host, project_id):
return self.call(ctxt, self.make_msg('get_instance_nw_info',
diff --git a/nova/rootwrap/filters.py b/nova/rootwrap/filters.py
index a3e5f1c3c..632e8d5bc 100644
--- a/nova/rootwrap/filters.py
+++ b/nova/rootwrap/filters.py
@@ -23,6 +23,7 @@ class CommandFilter(object):
"""Command filter only checking that the 1st argument matches exec_path"""
def __init__(self, exec_path, run_as, *args):
+ self.name = ''
self.exec_path = exec_path
self.run_as = run_as
self.args = args
diff --git a/nova/rootwrap/wrapper.py b/nova/rootwrap/wrapper.py
index 742f23b14..848538234 100644
--- a/nova/rootwrap/wrapper.py
+++ b/nova/rootwrap/wrapper.py
@@ -17,6 +17,8 @@
import ConfigParser
+import logging
+import logging.handlers
import os
import string
@@ -37,10 +39,64 @@ class FilterMatchNotExecutable(Exception):
self.match = match
+class RootwrapConfig(object):
+
+ def __init__(self, config):
+ # filters_path
+ self.filters_path = config.get("DEFAULT", "filters_path").split(",")
+
+ # exec_dirs
+ if config.has_option("DEFAULT", "exec_dirs"):
+ self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
+ else:
+ # Use system PATH if exec_dirs is not specified
+ self.exec_dirs = os.environ["PATH"].split(':')
+
+ # syslog_log_facility
+ if config.has_option("DEFAULT", "syslog_log_facility"):
+ v = config.get("DEFAULT", "syslog_log_facility")
+ facility_names = logging.handlers.SysLogHandler.facility_names
+ self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
+ v, None)
+ if self.syslog_log_facility is None and v in facility_names:
+ self.syslog_log_facility = facility_names.get(v)
+ if self.syslog_log_facility is None:
+ raise ValueError('Unexpected syslog_log_facility: %s' % v)
+ else:
+ default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
+ self.syslog_log_facility = default_facility
+
+ # syslog_log_level
+ if config.has_option("DEFAULT", "syslog_log_level"):
+ v = config.get("DEFAULT", "syslog_log_level")
+ self.syslog_log_level = logging.getLevelName(v.upper())
+ if (self.syslog_log_level == "Level %s" % v.upper()):
+ raise ValueError('Unexepected syslog_log_level: %s' % v)
+ else:
+ self.syslog_log_level = logging.ERROR
+
+ # use_syslog
+ if config.has_option("DEFAULT", "use_syslog"):
+ self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
+ else:
+ self.use_syslog = False
+
+
+def setup_syslog(execname, facility, level):
+ rootwrap_logger = logging.getLogger()
+ rootwrap_logger.setLevel(level)
+ handler = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
+ handler.setFormatter(logging.Formatter(
+ os.path.basename(execname) + ': %(message)s'))
+ rootwrap_logger.addHandler(handler)
+
+
def build_filter(class_name, *args):
"""Returns a filter object of class class_name"""
if not hasattr(filters, class_name):
- # TODO(ttx): Log the error (whenever nova-rootwrap has a log file)
+ logging.warning("Skipping unknown filter class (%s) specified "
+ "in filter definitions" % class_name)
return None
filterclass = getattr(filters, class_name)
return filterclass(*args)
@@ -60,6 +116,7 @@ def load_filters(filters_path):
newfilter = build_filter(*filterdefinition)
if newfilter is None:
continue
+ newfilter.name = name
filterlist.append(newfilter)
return filterlist
diff --git a/nova/test.py b/nova/test.py
index 4379b6de4..baeef49c2 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -191,6 +191,15 @@ class TestCase(testtools.TestCase):
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
+ if (os.environ.get('OS_STDOUT_NOCAPTURE') != 'True' and
+ os.environ.get('OS_STDOUT_NOCAPTURE') != '1'):
+ stdout = self.useFixture(fixtures.StringStream('stdout')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
+ if (os.environ.get('OS_STDERR_NOCAPTURE') != 'True' and
+ os.environ.get('OS_STDERR_NOCAPTURE') != '1'):
+ stderr = self.useFixture(fixtures.StringStream('stderr')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+
self.log_fixture = self.useFixture(fixtures.FakeLogger('nova'))
self.useFixture(conf_fixture.ConfFixture(CONF))
@@ -222,3 +231,18 @@ class TestCase(testtools.TestCase):
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(ServiceFixture(name, host, **kwargs))
return svc.service
+
+
+class APICoverage(object):
+
+ cover_api = None
+
+ def test_api_methods(self):
+ self.assertTrue(self.cover_api is not None)
+ api_methods = [x for x in dir(self.cover_api)
+ if not x.startswith('_')]
+ test_methods = [x[5:] for x in dir(self)
+ if x.startswith('test_')]
+ self.assertThat(
+ test_methods,
+ testtools.matchers.ContainsAll(api_methods))
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 0a694bbb7..831143326 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -25,6 +25,8 @@ import os
import string
import tempfile
+import fixtures
+
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
@@ -101,6 +103,7 @@ class CloudTestCase(test.TestCase):
super(CloudTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
+ self.useFixture(fixtures.FakeLogger('boto'))
def fake_show(meh, context, id):
return {'id': id,
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index a50e8d89e..367e61910 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -23,6 +23,9 @@ import uuid
import webob
from nova.api.openstack.compute.contrib import networks
+from nova.api.openstack.compute.contrib import networks_associate
+from nova import config
+from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova import test
@@ -93,6 +96,8 @@ NEW_NETWORK = {
class FakeNetworkAPI(object):
+ _sentinel = object()
+
def __init__(self):
self.networks = copy.deepcopy(FAKE_NETWORKS)
@@ -110,6 +115,17 @@ class FakeNetworkAPI(object):
return True
raise exception.NetworkNotFound()
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ if host is not FakeNetworkAPI._sentinel:
+ network['host'] = host
+ if project is not FakeNetworkAPI._sentinel:
+ network['project_id'] = project
+ return True
+ raise exception.NetworkNotFound()
+
def add_network_to_project(self, context,
project_id, network_uuid=None):
if network_uuid:
@@ -165,6 +181,8 @@ class NetworksTest(test.TestCase):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
self.controller = networks.NetworkController(self.fake_network_api)
+ self.associate_controller = networks_associate\
+ .NetworkAssociateActionController(self.fake_network_api)
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -194,13 +212,35 @@ class NetworksTest(test.TestCase):
def test_network_disassociate(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- res = self.controller.action(req, uuid, {'disassociate': None})
+ res = self.controller._disassociate_host_and_project(
+ req, uuid, {'disassociate': None})
self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.fake_network_api.networks[0]['project_id'], None)
+ self.assertEqual(self.fake_network_api.networks[0]['host'], None)
+
+ def test_network_disassociate_host_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_host_only(
+ req, uuid, {'disassociate_host': None})
+ self.assertEqual(res.status_int, 202)
+ self.assertNotEqual(self.fake_network_api.networks[0]['project_id'],
+ None)
+ self.assertEqual(self.fake_network_api.networks[0]['host'], None)
+
+ def test_network_disassociate_project_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_project_only(
+ req, uuid, {'disassociate_project': None})
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.fake_network_api.networks[0]['project_id'], None)
+ self.assertNotEqual(self.fake_network_api.networks[0]['host'], None)
def test_network_disassociate_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100/action')
self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.action,
+ self.controller._disassociate_host_and_project,
req, 100, {'disassociate': None})
def test_network_get_as_user(self):
@@ -246,6 +286,17 @@ class NetworksTest(test.TestCase):
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['network']['project_id'], 'fake')
+ def test_network_associate_with_host(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._associate_host(
+ req, uuid, {'associate_host': "TestHost"})
+ self.assertEqual(res.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ self.assertEqual(res_dict['network']['host'], 'TestHost')
+
def test_network_create(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
res_dict = self.controller.create(req, NEW_NETWORK)
diff --git a/nova/tests/api/openstack/compute/contrib/test_used_limits.py b/nova/tests/api/openstack/compute/contrib/test_used_limits.py
index 4a75961ed..ce2322bfe 100644
--- a/nova/tests/api/openstack/compute/contrib/test_used_limits.py
+++ b/nova/tests/api/openstack/compute/contrib/test_used_limits.py
@@ -52,10 +52,8 @@ class UsedLimitsTestCase(test.TestCase):
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
- 'totalVolumesUsed': 'volumes',
- 'totalVolumeGigabytesUsed': 'gigabytes',
- 'totalSecurityGroupsUsed': 'floating_ips',
- 'totalKeyPairsUsed': 'key_pairs',
+ 'totalFloatingIpsUsed': 'floating_ips',
+ 'totalSecurityGroupsUsed': 'security_groups',
}
limits = {}
for display_name, q in quota_map.iteritems():
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 0afbecb22..15be74d15 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -2761,7 +2761,7 @@ class ServersControllerCreateTest(test.TestCase):
'open': 'stack',
},
'personality': {},
- 'config_drive': True,
+ 'config_drive': "true",
},
}
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 9bad14275..9cc235579 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -193,17 +193,17 @@ class BaseTestCase(test.TestCase):
# only used in the subsequent notification:
return (instance, instance)
- def _tracker(self, host=None, unsupported=False):
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
- if unsupported:
- driver = UnsupportedVirtDriver()
- else:
- driver = FakeVirtDriver()
+ driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
@@ -215,10 +215,13 @@ class UnsupportedDriverTestCase(BaseTestCase):
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
- self.tracker = self._tracker(unsupported=True)
+ self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
+ def _driver(self):
+ return UnsupportedVirtDriver()
+
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
@@ -248,7 +251,7 @@ class UnsupportedDriverTestCase(BaseTestCase):
root_gb=10)
self.tracker.update_usage(self.context, instance)
- def testDisabledResizeClaim(self):
+ def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_instance_type_create()
claim = self.tracker.resize_claim(self.context, instance,
@@ -258,7 +261,7 @@ class UnsupportedDriverTestCase(BaseTestCase):
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
- def testDisabledResizeContextClaim(self):
+ def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_instance_type_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
@@ -327,18 +330,6 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
- self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus')
- self._assert(0, 'memory_mb_used')
- self._assert(0, 'local_gb_used')
- self._assert(0, 'vcpus_used')
- self._assert(0, 'running_vms')
- self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
- self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
- self.assertFalse(self.tracker.disabled)
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
-
def _fake_service_get_all_compute_by_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
@@ -412,6 +403,19 @@ class TrackerTestCase(BaseTrackerTestCase):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
+ def test_init(self):
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+
class InstanceClaimTestCase(BaseTrackerTestCase):
@@ -817,3 +821,31 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
+
+
+class OrphanTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(OrphanTestCase, self).setUp()
+
+ def _driver(self):
+ class OrphanVirtDriver(FakeVirtDriver):
+ def get_per_instance_usage(self):
+ return {
+ '1-2-3-4-5': {'memory_mb': 4, 'uuid': '1-2-3-4-5'},
+ '2-3-4-5-6': {'memory_mb': 4, 'uuid': '2-3-4-5-6'},
+
+ }
+
+ return OrphanVirtDriver()
+
+ def test_usage(self):
+ # 2 instances, 4 mb each
+ self.assertEqual(8, self.tracker.compute_node['memory_mb_used'])
+
+ def test_find(self):
+ # create one legit instance and verify the 2 orphans remain
+ self._fake_instance()
+ orphans = self.tracker._find_orphaned_instances()
+
+ self.assertEqual(2, len(orphans))
diff --git a/nova/tests/compute/test_virtapi.py b/nova/tests/compute/test_virtapi.py
index ac4f75344..568bf456d 100644
--- a/nova/tests/compute/test_virtapi.py
+++ b/nova/tests/compute/test_virtapi.py
@@ -24,38 +24,22 @@ from nova.virt import fake
from nova.virt import virtapi
-class VirtAPIBaseTest(test.TestCase):
+class VirtAPIBaseTest(test.TestCase, test.APICoverage):
+
+ cover_api = virtapi.VirtAPI
+
def setUp(self):
super(VirtAPIBaseTest, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
+ self.set_up_virtapi()
- @classmethod
- def set_up_virtapi(cls):
- cls.virtapi = virtapi.VirtAPI()
-
- @classmethod
- def setUpClass(cls):
- super(VirtAPIBaseTest, cls).setUpClass()
- cls.set_up_virtapi()
- cls._totest_methods = [x for x in dir(cls.virtapi)
- if not x.startswith('_')]
- cls._tested_methods = [x for x in dir(cls)
- if x.startswith('test_')]
-
- def _tested_method(self, method):
- self._tested_methods.remove('test_' + method)
- self._totest_methods.remove(method)
-
- def run(self, result):
- super(VirtAPIBaseTest, self).run(result)
- if not self._tested_methods:
- self.assertEqual(self._totest_methods, [])
+ def set_up_virtapi(self):
+ self.virtapi = virtapi.VirtAPI()
def assertExpected(self, method, *args, **kwargs):
self.assertRaises(NotImplementedError,
getattr(self.virtapi, method), self.context,
*args, **kwargs)
- self._tested_method(method)
def test_instance_update(self):
self.assertExpected('instance_update', 'fake-uuid',
@@ -79,11 +63,12 @@ class VirtAPIBaseTest(test.TestCase):
'foo')
def test_security_group_get_by_instance(self):
- self.assertExpected('security_group_get_by_instance', 'fake-uuid')
+ self.assertExpected('security_group_get_by_instance',
+ {'id': 'fake-id'})
def test_security_group_rule_get_by_security_group(self):
self.assertExpected('security_group_rule_get_by_security_group',
- 'fake-id')
+ {'id': 'fake-id'})
def test_provider_fw_rule_get_all(self):
self.assertExpected('provider_fw_rule_get_all')
@@ -94,9 +79,11 @@ class VirtAPIBaseTest(test.TestCase):
class FakeVirtAPITest(VirtAPIBaseTest):
- @classmethod
- def set_up_virtapi(cls):
- cls.virtapi = fake.FakeVirtAPI()
+
+ cover_api = fake.FakeVirtAPI
+
+ def set_up_virtapi(self):
+ self.virtapi = fake.FakeVirtAPI()
def assertExpected(self, method, *args, **kwargs):
if method == 'instance_update':
@@ -107,9 +94,11 @@ class FakeVirtAPITest(VirtAPIBaseTest):
db_method = method
self.mox.StubOutWithMock(db, db_method)
- if method in ('aggregate_metadata_add', 'aggregate_metadata_delete'):
- # NOTE(danms): FakeVirtAPI will convert the aggregate to
- # aggregate['id'], so expect that in the actual db call
+ if method in ('aggregate_metadata_add', 'aggregate_metadata_delete',
+ 'security_group_rule_get_by_security_group',
+ 'security_group_get_by_instance'):
+ # NOTE(danms): FakeVirtAPI will convert the first argument to
+ # argument['id'], so expect that in the actual db call
e_args = tuple([args[0]['id']] + list(args[1:]))
else:
e_args = args
@@ -119,7 +108,6 @@ class FakeVirtAPITest(VirtAPIBaseTest):
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
- self._tested_method(method)
class FakeCompute(object):
@@ -134,40 +122,17 @@ class FakeCompute(object):
class ComputeVirtAPITest(VirtAPIBaseTest):
- @classmethod
- def set_up_virtapi(cls):
- cls.compute = FakeCompute()
- cls.virtapi = compute_manager.ComputeVirtAPI(cls.compute)
-
- @classmethod
- def setUpClass(cls):
- super(ComputeVirtAPITest, cls).setUpClass()
- # NOTE(danms): Eventually these should all be migrated to the
- # conductor, but until then, dispatch appropriately.
- cls.conductor_methods = ['instance_update', 'instance_get_by_uuid',
- 'instance_get_all_by_host',
- 'aggregate_get_by_host',
- 'aggregate_metadata_add',
- 'aggregate_metadata_delete',
- ]
- cls.db_methods = ['security_group_get_by_instance',
- 'security_group_rule_get_by_security_group',
- 'provider_fw_rule_get_all',
- 'agent_build_get_by_triple',
- ]
- def assertExpected(self, method, *args, **kwargs):
- if method in self.conductor_methods:
- target = self.compute.conductor_api
- elif method in self.db_methods:
- target = self.compute.db
- else:
- raise Exception('Method "%s" not known to this test!')
+ cover_api = compute_manager.ComputeVirtAPI
- self.mox.StubOutWithMock(target, method)
- getattr(target, method)(self.context, *args, **kwargs).AndReturn(
- 'it worked')
+ def set_up_virtapi(self):
+ self.compute = FakeCompute()
+ self.virtapi = compute_manager.ComputeVirtAPI(self.compute)
+
+ def assertExpected(self, method, *args, **kwargs):
+ self.mox.StubOutWithMock(self.compute.conductor_api, method)
+ getattr(self.compute.conductor_api, method)(
+ self.context, *args, **kwargs).AndReturn('it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
- self._tested_method(method)
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 4aec358b8..734dba9ed 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -36,7 +36,7 @@ from nova import test
FAKE_IMAGE_REF = 'fake-image-ref'
-class _BaseTestCase(test.TestCase):
+class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
@@ -222,8 +222,48 @@ class _BaseTestCase(test.TestCase):
self.assertEqual(port, backdoor_port)
+ def test_security_group_get_by_instance(self):
+ fake_instance = {'id': 'fake-instance'}
+ self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
+ db.security_group_get_by_instance(
+ self.context, fake_instance['id']).AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.security_group_get_by_instance(self.context,
+ fake_instance)
+ self.assertEqual(result, 'it worked')
+
+ def test_security_group_rule_get_by_security_group(self):
+ fake_secgroup = {'id': 'fake-secgroup'}
+ self.mox.StubOutWithMock(db,
+ 'security_group_rule_get_by_security_group')
+ db.security_group_rule_get_by_security_group(
+ self.context, fake_secgroup['id']).AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.security_group_rule_get_by_security_group(
+ self.context, fake_secgroup)
+ self.assertEqual(result, 'it worked')
+
+ def test_provider_fw_rule_get_all(self):
+ fake_rules = ['a', 'b', 'c']
+ self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
+ db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
+ self.mox.ReplayAll()
+ result = self.conductor.provider_fw_rule_get_all(self.context)
+ self.assertEqual(result, fake_rules)
+
+ def test_agent_build_get_by_triple(self):
+ self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
+ db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
+ 'fake-arch').AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.agent_build_get_by_triple(self.context,
+ 'fake-hv',
+ 'fake-os',
+ 'fake-arch')
+ self.assertEqual(result, 'it worked')
+
-class ConductorTestCase(_BaseTestCase):
+class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests"""
def setUp(self):
super(ConductorTestCase, self).setUp()
@@ -231,7 +271,7 @@ class ConductorTestCase(_BaseTestCase):
self.stub_out_client_exceptions()
-class ConductorRPCAPITestCase(_BaseTestCase):
+class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests"""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
@@ -240,7 +280,7 @@ class ConductorRPCAPITestCase(_BaseTestCase):
self.conductor = conductor_rpcapi.ConductorAPI()
-class ConductorAPITestCase(_BaseTestCase):
+class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests"""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 3823a77b0..9c1140922 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -137,6 +137,7 @@ policy_data = """
"compute_extension:multinic": "",
"compute_extension:networks": "",
"compute_extension:networks:view": "",
+ "compute_extension:networks_associate": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
diff --git a/nova/tests/hyperv/basetestcase.py b/nova/tests/hyperv/basetestcase.py
index 4458dbd9d..c4f6cf95f 100644
--- a/nova/tests/hyperv/basetestcase.py
+++ b/nova/tests/hyperv/basetestcase.py
@@ -43,9 +43,16 @@ class BaseTestCase(test.TestCase):
def tearDown(self):
super(BaseTestCase, self).tearDown()
- has_errors = len([test for (test, msgs) in self._currentResult.errors
+ # python-subunit will wrap test results with a decorator.
+ # Need to access the decorated member of results to get the
+ # actual test result when using python-subunit.
+ if hasattr(self._currentResult, 'decorated'):
+ result = self._currentResult.decorated
+ else:
+ result = self._currentResult
+ has_errors = len([test for (test, msgs) in result.errors
if test.id() == self.id()]) > 0
- failed = len([test for (test, msgs) in self._currentResult.failures
+ failed = len([test for (test, msgs) in result.failures
if test.id() == self.id()]) > 0
if not has_errors and not failed:
diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py
index 3c92ffb2e..4f8790cc7 100644
--- a/nova/tests/image/test_s3.py
+++ b/nova/tests/image/test_s3.py
@@ -21,6 +21,8 @@ import mox
import os
import tempfile
+import fixtures
+
from nova import context
import nova.db.api
from nova import exception
@@ -83,6 +85,7 @@ class TestS3ImageService(test.TestCase):
def setUp(self):
super(TestS3ImageService, self).setUp()
self.context = context.RequestContext(None, None)
+ self.useFixture(fixtures.FakeLogger('boto'))
# set up one fixture to test shows, should have id '1'
nova.db.api.s3_image_create(self.context,
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index efd6893bb..4a8d96844 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -305,6 +305,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-networks-associate",
+ "description": "%(text)s",
+ "links": [],
+ "name": "NetworkAssociationSupport",
+ "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-quota-class-sets",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index ee957be90..7d4683986 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -114,6 +114,9 @@
<extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks/api/v1.1" name="Networks">
<description>%(text)s</description>
</extension>
+ <extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-quota-class-sets" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
new file mode 100644
index 000000000..762e88175
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "associate_host": "%(host)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
new file mode 100644
index 000000000..7c96c96a1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<associate_host>%(host)s</associate_host>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
new file mode 100644
index 000000000..46f69b3e8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_host": null
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
new file mode 100644
index 000000000..910504a44
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate_host/>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
new file mode 100644
index 000000000..63b6eb683
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_project": null
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
new file mode 100644
index 000000000..d4162c19e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate_project/>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
new file mode 100644
index 000000000..2e09d15a6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
new file mode 100644
index 000000000..c26f7b61a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate/>
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
index 78ddbb5af..d83dd87c3 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
@@ -14,9 +14,9 @@
"maxTotalRAMSize": 51200,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
- "totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
- "totalSecurityGroupsUsed": 0
+ "totalSecurityGroupsUsed": 0,
+ "totalFloatingIpsUsed": 0
},
"rate": [
{
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
index 75526473a..c1b907670 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
@@ -31,7 +31,7 @@
<limit name="maxTotalCores" value="20"/>
<limit name="totalSecurityGroupsUsed" value="0"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="totalKeyPairsUsed" value="0"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
</limits>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 0e2cba7a9..49ff77306 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -15,6 +15,7 @@
import base64
import datetime
+import inspect
import os
import re
import urllib
@@ -22,11 +23,14 @@ import uuid
from lxml import etree
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.api.openstack.compute import extensions
from nova.cloudpipe.pipelib import CloudPipe
from nova.compute import api
from nova import context
from nova import db
from nova.db.sqlalchemy import models
+from nova.network import api
from nova.network.manager import NetworkManager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -41,7 +45,8 @@ from nova.tests.integrated import integrated_helpers
CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
-CONF.import_opt('osapi_compute_extension', 'nova.config')
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.extensions')
CONF.import_opt('vpn_image_id', 'nova.config')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
@@ -311,6 +316,77 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
return self._get_response(url, 'DELETE')
+class ApiSamplesTrap(ApiSampleTestBase):
+ """Make sure extensions don't get added without tests"""
+
+ all_extensions = True
+
+ def _get_extensions_tested(self):
+ tests = []
+ for attr in globals().values():
+ if not inspect.isclass(attr):
+ continue # Skip non-class objects
+ if not issubclass(attr, integrated_helpers._IntegratedTestBase):
+ continue # Skip non-test classes
+ if attr.extension_name is None:
+ continue # Skip base tests
+ cls = importutils.import_class(attr.extension_name)
+ tests.append(cls.alias)
+ return tests
+
+ def _get_extensions(self):
+ extensions = []
+ response = self._do_get('extensions')
+ for extension in jsonutils.loads(response.read())['extensions']:
+ extensions.append(str(extension['alias']))
+ return extensions
+
+ def test_all_extensions_have_samples(self):
+ # NOTE(danms): This is a list of extensions which are currently
+ # in the tree but that don't (yet) have tests. This list should
+ # NOT be allowed to grow, and should shrink to zero (and be
+ # removed) soon.
+ do_not_approve_additions = []
+ do_not_approve_additions.append('NMN')
+ do_not_approve_additions.append('OS-FLV-DISABLED')
+ do_not_approve_additions.append('os-config-drive')
+ do_not_approve_additions.append('os-coverage')
+ do_not_approve_additions.append('os-create-server-ext')
+ do_not_approve_additions.append('os-fixed-ips')
+ do_not_approve_additions.append('os-flavor-access')
+ do_not_approve_additions.append('os-flavor-extra-specs')
+ do_not_approve_additions.append('os-flavor-rxtx')
+ do_not_approve_additions.append('os-flavor-swap')
+ do_not_approve_additions.append('os-floating-ip-dns')
+ do_not_approve_additions.append('os-floating-ip-pools')
+ do_not_approve_additions.append('os-fping')
+ do_not_approve_additions.append('os-hypervisors')
+ do_not_approve_additions.append('os-instance_usage_audit_log')
+ do_not_approve_additions.append('os-networks')
+ do_not_approve_additions.append('os-quota-class-sets')
+ do_not_approve_additions.append('os-services')
+ do_not_approve_additions.append('os-volumes')
+
+ tests = self._get_extensions_tested()
+ extensions = self._get_extensions()
+ missing_tests = []
+ for extension in extensions:
+ # NOTE(danms): if you add tests, remove it from the
+ # exclusions list
+ self.assertFalse(extension in do_not_approve_additions and
+ extension in tests)
+
+ # NOTE(danms): if you add an extension, it must come with
+ # api_samples tests!
+ if (extension not in tests and
+ extension not in do_not_approve_additions):
+ missing_tests.append(extension)
+
+ if missing_tests:
+ LOG.error("Extensions are missing tests: %s" % missing_tests)
+ self.assertEqual(missing_tests, [])
+
+
class VersionsSampleJsonTest(ApiSampleTestBase):
def test_versions_get(self):
response = self._do_get('', strip_version=True)
@@ -1973,3 +2049,56 @@ class DiskConfigJsonTest(ServersSampleBase):
class DiskConfigXmlTest(DiskConfigJsonTest):
ctype = 'xml'
+
+
+class NetworksAssociateJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".networks_associate.Networks_associate")
+
+ _sentinel = object()
+
+ def _get_flags(self):
+ f = super(NetworksAssociateJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Networks_associate requires Networks to be update
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.networks.Networks')
+ return f
+
+ def setUp(self):
+ super(NetworksAssociateJsonTests, self).setUp()
+
+ def fake_associate(self, context, network_id,
+ host=NetworksAssociateJsonTests._sentinel,
+ project=NetworksAssociateJsonTests._sentinel):
+ return True
+
+ self.stubs.Set(api.API, "associate", fake_associate)
+
+ def test_disassociate(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-req',
+ {})
+ self.assertEqual(response.status, 202)
+
+ def test_disassociate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-host-req',
+ {})
+ self.assertEqual(response.status, 202)
+
+ def test_disassociate_project(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-project-req',
+ {})
+ self.assertEqual(response.status, 202)
+
+ def test_associate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-associate-host-req',
+ {"host": "testHost"})
+ self.assertEqual(response.status, 202)
+
+
+class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
+ ctype = 'xml'
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index 61e4e32d0..968379a6c 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.api.openstack.compute import extensions
from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index dd6cccf0f..2ded5230d 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -92,6 +92,13 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('disassociate_network', rpc_method='call',
network_uuid='fake_uuid')
+ def test_associate_host_and_project(self):
+ self._test_network_api('associate', rpc_method='call',
+ network_uuid='fake_uuid',
+ associations={'host': "testHost",
+ 'project': 'testProject'},
+ version="1.5")
+
def test_get_fixed_ip(self):
self._test_network_api('get_fixed_ip', rpc_method='call', id='id')
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index cf6e1de90..163afda7d 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -29,6 +29,7 @@ try:
from boto.connection import HTTPResponse
except ImportError:
from httplib import HTTPResponse
+import fixtures
import webob
from nova.api import auth
@@ -221,6 +222,7 @@ class ApiEc2TestCase(test.TestCase):
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
+ self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index eca2267a6..c15259066 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -27,7 +27,7 @@ from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
-class _ImageTestCase(test.TestCase):
+class _ImageTestCase(object):
INSTANCES_PATH = '/fake'
def mock_create_image(self, image):
@@ -111,7 +111,7 @@ class _ImageTestCase(test.TestCase):
self.mox.VerifyAll()
-class RawTestCase(_ImageTestCase):
+class RawTestCase(_ImageTestCase, test.TestCase):
SIZE = 1024
@@ -161,7 +161,7 @@ class RawTestCase(_ImageTestCase):
self.mox.VerifyAll()
-class Qcow2TestCase(_ImageTestCase):
+class Qcow2TestCase(_ImageTestCase, test.TestCase):
SIZE = 1024 * 1024 * 1024
def setUp(self):
@@ -224,7 +224,7 @@ class Qcow2TestCase(_ImageTestCase):
self.mox.VerifyAll()
-class LvmTestCase(_ImageTestCase):
+class LvmTestCase(_ImageTestCase, test.TestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index d72bf57f5..39c84c1d8 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -27,6 +27,7 @@ import webob
from nova.api.metadata import base
from nova.api.metadata import handler
+from nova.api.metadata import password
from nova import block_device
from nova import db
from nova.db.sqlalchemy import api
@@ -319,6 +320,14 @@ class OpenStackMetadataTestCase(test.TestCase):
for key, val in extra.iteritems():
self.assertEqual(mddict[key], val)
+ def test_password(self):
+ # make sure extra_md makes it through to metadata
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ result = mdinst.lookup("/openstack/latest/password")
+ self.assertEqual(result, password.handle_password)
+
def test_userdata(self):
inst = copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
@@ -351,6 +360,20 @@ class MetadataHandlerTestCase(test.TestCase):
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
+ def test_callable(self):
+
+ def verify(req, meta_data):
+ self.assertTrue(isinstance(meta_data, CallableMD))
+ return "foo"
+
+ class CallableMD(object):
+ def lookup(self, path_info):
+ return verify
+
+ response = fake_request(self.stubs, CallableMD(), "/bar")
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, "foo")
+
def test_root(self):
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
@@ -469,3 +492,47 @@ class MetadataHandlerTestCase(test.TestCase):
'8387b96cbc5bd2474665192d2ec28'
'8ffb67'})
self.assertEqual(response.status_int, 500)
+
+
+class MetadataPasswordTestCase(test.TestCase):
+ def setUp(self):
+ super(MetadataPasswordTestCase, self).setUp()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ spectacular=True)
+ self.instance = copy(INSTANCES[0])
+ self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
+ address=None, sgroups=None)
+
+ def test_get_password(self):
+ request = webob.Request.blank('')
+ self.mdinst.password = 'foo'
+ result = password.handle_password(request, self.mdinst)
+ self.assertEqual(result, 'foo')
+
+ def test_bad_method(self):
+ request = webob.Request.blank('')
+ request.method = 'PUT'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ password.handle_password, request, self.mdinst)
+
+ def _try_set_password(self, val='bar'):
+ request = webob.Request.blank('')
+ request.method = 'POST'
+ request.body = val
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ lambda *a, **kw: None)
+ password.handle_password(request, self.mdinst)
+
+ def test_set_password(self):
+ self.mdinst.password = ''
+ self._try_set_password()
+
+ def test_conflict(self):
+ self.mdinst.password = 'foo'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._try_set_password)
+
+ def test_too_large(self):
+ self.mdinst.password = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._try_set_password, 'a' * 257)
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index 1dfd57a72..df7b88f2c 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -14,6 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ConfigParser
+import logging
+import logging.handlers
import os
import subprocess
@@ -149,3 +152,47 @@ class RootwrapTestCase(test.TestCase):
usercmd = ["cat", "/"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertTrue(filtermatch is self.filters[-1])
+
+ def test_RootwrapConfig(self):
+ raw = ConfigParser.RawConfigParser()
+
+ # Empty config should raise ConfigParser.Error
+ self.assertRaises(ConfigParser.Error, wrapper.RootwrapConfig, raw)
+
+ # Check default values
+ raw.set('DEFAULT', 'filters_path', '/a,/b')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.filters_path, ['/a', '/b'])
+ self.assertEqual(config.exec_dirs, os.environ["PATH"].split(':'))
+ self.assertFalse(config.use_syslog)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_SYSLOG)
+ self.assertEqual(config.syslog_log_level, logging.ERROR)
+
+ # Check general values
+ raw.set('DEFAULT', 'exec_dirs', '/a,/x')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.exec_dirs, ['/a', '/x'])
+
+ raw.set('DEFAULT', 'use_syslog', 'oui')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'use_syslog', 'true')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertTrue(config.use_syslog)
+
+ raw.set('DEFAULT', 'syslog_log_facility', 'moo')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'syslog_log_facility', 'local0')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_LOCAL0)
+ raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_AUTH)
+
+ raw.set('DEFAULT', 'syslog_log_level', 'bar')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'syslog_log_level', 'INFO')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_level, logging.INFO)
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 563b3a44b..cd525d2a1 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -57,7 +57,7 @@ def catch_notimplementederror(f):
return wrapped_func
-class _FakeDriverBackendTestCase(test.TestCase):
+class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
@@ -142,7 +142,7 @@ class _FakeDriverBackendTestCase(test.TestCase):
super(_FakeDriverBackendTestCase, self).tearDown()
-class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase):
+class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class"""
@@ -532,19 +532,19 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
-class AbstractDriverTestCase(_VirtDriverTestCase):
+class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
-class FakeConnectionTestCase(_VirtDriverTestCase):
+class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
super(FakeConnectionTestCase, self).setUp()
-class LibvirtConnTestCase(_VirtDriverTestCase):
+class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index b226b34df..f2799b8f3 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -113,6 +113,33 @@ def set_image_fixtures():
image_service.create(None, image_meta)
+def get_fake_device_info():
+ # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
+ # can be removed from the dict when LP bug #1087308 is fixed
+ fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
+ fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
+ fake = {'block_device_mapping':
+ [{'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'sr_uuid': 'falseSR',
+ 'introduce_sr_keys': ['sr_type'],
+ 'sr_type': 'iscsi',
+ 'vdi_uuid': fake_vdi_uuid,
+ 'target_discovered': False,
+ 'target_iqn': 'foo_iqn:foo_volid',
+ 'target_portal': 'localhost:3260',
+ 'volume_id': 'foo_volid',
+ 'target_lun': 1,
+ 'auth_password': 'my-p@55w0rd',
+ 'auth_username': 'johndoe',
+ 'auth_method': u'CHAP'}, },
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}, ],
+ 'root_device_name': '/dev/sda',
+ 'ephemerals': [],
+ 'swap': None, }
+ return fake
+
+
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
@@ -312,8 +339,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
- super(XenAPIVMTestCase, self).tearDown()
fake_image.FakeImageService_reset()
+ super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -550,7 +577,10 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
- create_record=True, empty_dns=False):
+ create_record=True, empty_dns=False,
+ image_meta={'id': IMAGE_VHD,
+ 'disk_format': 'vhd'},
+ block_device_info=None):
if injected_files is None:
injected_files = []
@@ -582,10 +612,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd'}
self.conn.spawn(self.context, instance, image_meta, injected_files,
- 'herp', network_info)
+ 'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance['os_type'])
@@ -681,6 +709,16 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
+ def test_spawn_boot_from_volume_no_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ image_meta={}, block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_with_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ block_device_info=dev_info)
+
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
@@ -978,6 +1016,33 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
pass
self.assertTrue(was['called'])
+ def test_per_instance_usage_running(self):
+ instance = self._create_instance(spawn=True)
+ instance_type = instance_types.get_instance_type(3)
+
+ expected = {instance['uuid']: {'memory_mb': instance_type['memory_mb'],
+ 'uuid': instance['uuid']}}
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ # Paused instances still consume resources:
+ self.conn.pause(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ def test_per_instance_usage_suspended(self):
+ # Suspended instances do not consume memory:
+ instance = self._create_instance(spawn=True)
+ self.conn.suspend(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def test_per_instance_usage_halted(self):
+ instance = self._create_instance(spawn=True)
+ self.conn.power_off(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
@@ -1321,6 +1386,40 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
+ def test_none(self):
+ image_meta = None
+ self.assert_disk_type(image_meta, None)
+
+
+class XenAPIDetermineIsPVTestCase(test.TestCase):
+ """Unit tests for code that detects the PV status based on ImageType."""
+ def assert_pv_status(self, disk_image_type, os_type, expected_pv_status):
+ session = None
+ vdi_ref = None
+ actual = vm_utils.determine_is_pv(session, vdi_ref,
+ disk_image_type, os_type)
+ self.assertEqual(expected_pv_status, actual)
+
+ def test_windows_vhd(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'windows', False)
+
+ def test_linux_vhd(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'linux', True)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_raw(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_RAW, 'linux', True)
+
+ def test_disk(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK, None, True)
+
+ def test_iso(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_ISO, None, False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_none(self):
+ self.assert_pv_status(None, None, True)
+
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
diff --git a/nova/tests/virt/disk/test_nbd.py b/nova/tests/virt/disk/test_nbd.py
index 5e08215a5..4b067d405 100644
--- a/nova/tests/virt/disk/test_nbd.py
+++ b/nova/tests/virt/disk/test_nbd.py
@@ -73,13 +73,16 @@ def _fake_noop(*args, **kwargs):
class NbdTestCase(test.TestCase):
def setUp(self):
super(NbdTestCase, self).setUp()
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices)
self.useFixture(fixtures.MonkeyPatch('os.listdir',
_fake_listdir_nbd_devices))
def test_nbd_no_devices(self):
tempdir = self.useFixture(fixtures.TempDir()).path
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices_none)
n = nbd.NbdMount(None, tempdir)
- n.detect_nbd_device = _fake_detect_nbd_devices_none
self.assertEquals(None, n._allocate_nbd())
def test_nbd_no_free_devices(self):
@@ -109,7 +112,6 @@ class NbdTestCase(test.TestCase):
def test_nbd_allocation(self):
tempdir = self.useFixture(fixtures.TempDir()).path
n = nbd.NbdMount(None, tempdir)
- n.detect_nbd_device = _fake_detect_nbd_devices
self.useFixture(fixtures.MonkeyPatch('os.path.exists',
_fake_exists_no_users))
self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
@@ -139,13 +141,12 @@ class NbdTestCase(test.TestCase):
# re-added. I will fix this in a later patch.
self.assertEquals('/dev/nbd1', n._allocate_nbd())
- def test_get_dev_no_devices(self):
+ def test_inner_get_dev_no_devices(self):
tempdir = self.useFixture(fixtures.TempDir()).path
n = nbd.NbdMount(None, tempdir)
- n.detect_nbd_device = _fake_detect_nbd_devices
- self.assertFalse(n.get_dev())
+ self.assertFalse(n._inner_get_dev())
- def test_get_dev_qemu_fails(self):
+ def test_inner_get_dev_qemu_fails(self):
tempdir = self.useFixture(fixtures.TempDir()).path
n = nbd.NbdMount(None, tempdir)
self.useFixture(fixtures.MonkeyPatch('os.path.exists',
@@ -157,13 +158,12 @@ class NbdTestCase(test.TestCase):
self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
# Error logged, no device consumed
- self.assertFalse(n.get_dev())
+ self.assertFalse(n._inner_get_dev())
self.assertTrue(n.error.startswith('qemu-nbd error'))
- def test_get_dev_qemu_timeout(self):
+ def test_inner_get_dev_qemu_timeout(self):
tempdir = self.useFixture(fixtures.TempDir()).path
n = nbd.NbdMount(None, tempdir)
- n.detect_nbd_device = _fake_detect_nbd_devices
self.useFixture(fixtures.MonkeyPatch('os.path.exists',
_fake_exists_no_users))
@@ -174,46 +174,45 @@ class NbdTestCase(test.TestCase):
self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
# Error logged, no device consumed
- self.assertFalse(n.get_dev())
+ self.assertFalse(n._inner_get_dev())
self.assertTrue(n.error.endswith('did not show up'))
- def test_get_dev_works(self):
- tempdir = self.useFixture(fixtures.TempDir()).path
- n = nbd.NbdMount(None, tempdir)
- n.detect_nbd_device = _fake_detect_nbd_devices
- self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
- self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
-
+ def fake_exists_one(self, path):
# We need the pid file for the device which is allocated to exist, but
# only once it is allocated to us
- def fake_exists_one(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd1/pid':
+ return False
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+ def fake_trycmd_creates_pid(self, *args, **kwargs):
+ def fake_exists_two(path):
if path.startswith('/sys/block/nbd'):
- if path == '/sys/block/nbd1/pid':
- return False
+ if path == '/sys/block/nbd0/pid':
+ return True
if path.endswith('pid'):
return False
return True
return ORIG_EXISTS(path)
self.useFixture(fixtures.MonkeyPatch('os.path.exists',
- fake_exists_one))
+ fake_exists_two))
+ return '', ''
- # We have a trycmd that always passed
- def fake_trycmd(*args, **kwargs):
- def fake_exists_two(path):
- if path.startswith('/sys/block/nbd'):
- if path == '/sys/block/nbd0/pid':
- return True
- if path.endswith('pid'):
- return False
- return True
- return ORIG_EXISTS(path)
- self.useFixture(fixtures.MonkeyPatch('os.path.exists',
- fake_exists_two))
- return '', ''
- self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+ def test_inner_get_dev_works(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
# No error logged, device consumed
- self.assertTrue(n.get_dev())
+ self.assertTrue(n._inner_get_dev())
self.assertTrue(n.linked)
self.assertEquals('', n.error)
self.assertEquals('/dev/nbd0', n.device)
@@ -229,4 +228,48 @@ class NbdTestCase(test.TestCase):
# something we don't have
tempdir = self.useFixture(fixtures.TempDir()).path
n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ n.unget_dev()
+
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+
+ # No error logged, device consumed
+ self.assertTrue(n.get_dev())
+ self.assertTrue(n.linked)
+ self.assertEquals('', n.error)
+ self.assertEquals('/dev/nbd0', n.device)
+
+ # Free
n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEquals('', n.error)
+ self.assertEquals(None, n.device)
+
+ def test_get_dev_timeout(self):
+ # Always fail to get a device
+ def fake_get_dev_fails(self):
+ return False
+ self.stubs.Set(nbd.NbdMount, '_inner_get_dev', fake_get_dev_fails)
+
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.nbd.'
+ 'MAX_NBD_WAIT'), -10))
+
+ # No error logged, device consumed
+ self.assertFalse(n.get_dev())
diff --git a/nova/tests/xenapi/test_vm_utils.py b/nova/tests/xenapi/test_vm_utils.py
index 2d3e30e92..6d7f9a624 100644
--- a/nova/tests/xenapi/test_vm_utils.py
+++ b/nova/tests/xenapi/test_vm_utils.py
@@ -10,6 +10,29 @@ from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
+XENSM_TYPE = 'xensm'
+ISCSI_TYPE = 'iscsi'
+
+
+def get_fake_dev_params(sr_type):
+ fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
+ 'name_label': 'fake_storage',
+ 'name_description': 'test purposes',
+ 'server': 'myserver',
+ 'serverpath': '/local/scratch/myname',
+ 'sr_type': 'nfs',
+ 'introduce_sr_keys': ['server',
+ 'serverpath',
+ 'sr_type'],
+ 'vdi_uuid': 'falseVDI'},
+ ISCSI_TYPE: {'volume_id': 'fake_volume_id',
+ 'target_lun': 1,
+ 'target_iqn': 'fake_iqn:volume-fake_volume_id',
+ 'target_portal': u'localhost:3260',
+ 'target_discovered': False}, }
+ return fakes[sr_type]
+
+
class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
@@ -50,15 +73,8 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
self.assertEquals([], result)
- def test_get_vdis_for_boot_from_vol(self):
- dev_params = {'sr_uuid': 'falseSR',
- 'name_label': 'fake_storage',
- 'name_description': 'test purposes',
- 'server': 'myserver',
- 'serverpath': '/local/scratch/myname',
- 'sr_type': 'nfs',
- 'introduce_sr_keys': ['server', 'serverpath', 'sr_type'],
- 'vdi_uuid': 'falseVDI'}
+ def test_get_vdis_for_boot_from_vol_with_sr_uuid(self):
+ dev_params = get_fake_dev_params(XENSM_TYPE)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
@@ -74,18 +90,20 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
return None
self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr)
- dev_params = {'sr_uuid': 'falseSR',
- 'name_label': 'fake_storage',
- 'name_description': 'test purposes',
- 'server': 'myserver',
- 'serverpath': '/local/scratch/myname',
- 'sr_type': 'nfs',
- 'introduce_sr_keys': ['server', 'serverpath', 'sr_type'],
- 'vdi_uuid': 'falseVDI'}
+ dev_params = get_fake_dev_params(XENSM_TYPE)
self.assertRaises(exception.NovaException,
vm_utils.get_vdis_for_boot_from_vol,
driver._session, dev_params)
+ def test_get_vdis_for_boot_from_iscsi_vol_missing_sr_uuid(self):
+ dev_params = get_fake_dev_params(ISCSI_TYPE)
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
+ dev_params)
+ self.assertNotEquals(result['root']['uuid'], None)
+
class VMRefOrRaiseVMFoundTestCase(test.TestCase):
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 90d858a4b..1b6cc0778 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -37,15 +37,13 @@ CONF = cfg.CONF
CONF.register_opts(nbd_opts)
NBD_DEVICE_RE = re.compile('nbd[0-9]+')
+MAX_NBD_WAIT = 30
class NbdMount(api.Mount):
"""qemu-nbd support disk images."""
mode = 'nbd'
- # NOTE(padraig): The remaining issue with this code is that multiple
- # workers on a system can race against each other.
-
def _detect_nbd_devices(self):
"""Detect nbd device files."""
return filter(NBD_DEVICE_RE.match, os.listdir('/sys/block/'))
@@ -78,7 +76,7 @@ class NbdMount(api.Mount):
pid = int(f.readline())
return pid
- def get_dev(self):
+ def _inner_get_dev(self):
device = self._allocate_nbd()
if not device:
return False
@@ -102,12 +100,33 @@ class NbdMount(api.Mount):
break
time.sleep(1)
else:
+ _out, err = utils.trycmd('qemu-nbd', '-d', device,
+ run_as_root=True)
+ if err:
+ LOG.warn(_('Detaching from erroneous nbd device returned '
+ 'error: %s'), err)
self.error = _('nbd device %s did not show up') % device
return False
+ self.error = ''
self.linked = True
return True
+ def get_dev(self):
+ """Retry requests for NBD devices."""
+ start_time = time.time()
+ device = self._inner_get_dev()
+ while not device:
+ LOG.info(_('nbd device allocation failed. Will retry in 2 '
+ 'seconds.'))
+ time.sleep(2)
+ if time.time() - start_time > MAX_NBD_WAIT:
+ LOG.warn(_('nbd device allocation failed after repeated '
+ 'retries.'))
+ return False
+ device = self._inner_get_dev()
+ return True
+
def unget_dev(self):
if not self.linked:
return
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 991a0f6ce..cb72aa2dc 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -767,6 +767,13 @@ class ComputeDriver(object):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
+ def get_per_instance_usage(self):
+ """Get information about instance resource usage.
+
+ :returns: dict of nova uuid => dict of usage info
+ """
+ return {}
+
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index cc2b1294c..28d0fd95d 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -417,13 +417,13 @@ class FakeVirtAPI(virtapi.VirtAPI):
def aggregate_metadata_delete(self, context, aggregate, key):
return db.aggregate_metadata_delete(context, aggregate['id'], key)
- def security_group_get_by_instance(self, context, instance_uuid):
- return db.security_group_get_by_instance(context, instance_uuid)
+ def security_group_get_by_instance(self, context, instance):
+ return db.security_group_get_by_instance(context, instance['id'])
def security_group_rule_get_by_security_group(self, context,
- security_group_id):
- return db.security_group_rule_get_by_security_group(context,
- security_group_id)
+ security_group):
+ return db.security_group_rule_get_by_security_group(
+ context, security_group['id'])
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index 5b4024ab5..8776e59f8 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -349,12 +349,12 @@ class IptablesFirewallDriver(FirewallDriver):
self._do_ra_rules(ipv6_rules, network_info)
security_groups = self._virtapi.security_group_get_by_instance(
- ctxt, instance['id'])
+ ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
rules = self._virtapi.security_group_rule_get_by_security_group(
- ctxt, security_group['id'])
+ ctxt, security_group)
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule,
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
index 24a66f53b..53cbabc30 100644
--- a/nova/virt/virtapi.py
+++ b/nova/virt/virtapi.py
@@ -66,19 +66,19 @@ class VirtAPI(object):
"""
raise NotImplementedError()
- def security_group_get_by_instance(self, context, instance_uuid):
+ def security_group_get_by_instance(self, context, instance):
"""Get the security group for a specified instance
:param context: security context
- :param instance_uuid: instance defining the security group we want
+ :param instance: instance defining the security group we want
"""
raise NotImplementedError()
def security_group_rule_get_by_security_group(self, context,
- security_group_id):
+ security_group):
"""Get the rules associated with a specified security group
:param context: security context
- :param security_group_id: the security group for which the rules
- should be returned
+ :param security_group: the security group for which the rules
+ should be returned
"""
raise NotImplementedError()
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 1649ffb47..8e9e74d02 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -607,6 +607,14 @@ class XenAPIDriver(driver.ComputeDriver):
"""resume guest state when a host is booted"""
self._vmops.power_on(instance)
+ def get_per_instance_usage(self):
+ """Get information about instance resource usage.
+
+ :returns: dict of nova uuid => dict of usage
+ info
+ """
+ return self._vmops.get_per_instance_usage()
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index db4f5d03e..9af8a9f41 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -635,6 +635,14 @@ class SessionBase(object):
db_ref['power_state'] = 'Halted'
VM_clean_shutdown = VM_hard_shutdown
+ def VM_suspend(self, session, vm_ref):
+ db_ref = _db_content['VM'][vm_ref]
+ db_ref['power_state'] = 'Suspended'
+
+ def VM_pause(self, session, vm_ref):
+ db_ref = _db_content['VM'][vm_ref]
+ db_ref['power_state'] = 'Paused'
+
def pool_eject(self, session, host_ref):
pass
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index be1a0f4ef..d15d89515 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -462,35 +462,29 @@ def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
def get_vdis_for_boot_from_vol(session, dev_params):
vdis = {}
- sr_uuid = dev_params['sr_uuid']
- sr_ref = volume_utils.find_sr_by_uuid(session,
- sr_uuid)
+ sr_uuid, label, sr_params = volume_utils.parse_sr_info(dev_params)
+ sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid)
# Try introducing SR if it is not present
if not sr_ref:
- if 'name_label' not in dev_params:
- label = 'tempSR-%s' % dev_params['volume_id']
- else:
- label = dev_params['name_label']
-
- if 'name_description' not in dev_params:
- desc = ''
- else:
- desc = dev_params.get('name_description')
- sr_params = {}
- for k in dev_params['introduce_sr_keys']:
- sr_params[k] = dev_params[k]
-
- sr_params['name_description'] = desc
- sr_ref = volume_utils.introduce_sr(session, sr_uuid, label,
- sr_params)
+ sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params)
if sr_ref is None:
raise exception.NovaException(_('SR not present and could not be '
'introduced'))
else:
- session.call_xenapi("SR.scan", sr_ref)
- return {'root': dict(uuid=dev_params['vdi_uuid'],
- file=None, osvol=True)}
+ if 'vdi_uuid' in dev_params:
+ session.call_xenapi("SR.scan", sr_ref)
+ vdis = {'root': dict(uuid=dev_params['vdi_uuid'],
+ file=None, osvol=True)}
+ else:
+ try:
+ vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ vdis = {'root': dict(uuid=vdi_rec['uuid'],
+ file=None, osvol=True)}
+ except volume_utils.StorageError, exc:
+ LOG.exception(exc)
+ volume_utils.forget_sr(session, sr_uuid)
return vdis
@@ -523,8 +517,7 @@ def get_vdis_for_instance(context, session, instance, name_label, image,
bdm_root_dev = block_device_info['block_device_mapping'][0]
dev_params = bdm_root_dev['connection_info']['data']
LOG.debug(dev_params)
- return get_vdis_for_boot_from_vol(session,
- dev_params)
+ return get_vdis_for_boot_from_vol(session, dev_params)
return _create_image(context, session, instance, name_label, image,
image_type)
@@ -1255,6 +1248,9 @@ def determine_disk_image_type(image_meta):
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
+ if not image_meta:
+ return None
+
disk_format = image_meta['disk_format']
disk_format_map = {
@@ -1293,6 +1289,16 @@ def determine_is_pv(session, vdi_ref, disk_image_type, os_type):
3. Glance (DISK): pv is assumed
4. Glance (DISK_ISO): no pv is assumed
+
+ 5. Boot From Volume - without image metadata (None): attempt to
+ use Pygrub to figure out if the volume stores a PV VM or a
+ HVM one. Log a warning, because there may be cases where the
+ volume is RAW (in which case using pygrub is fine) and cases
+ where the content of the volume is VHD, and pygrub might not
+ work as expected.
+ NOTE: if disk_image_type is not specified, instances launched
+ from remote volumes will have to include kernel and ramdisk
+ because external kernel and ramdisk will not be fetched.
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
@@ -1312,6 +1318,12 @@ def determine_is_pv(session, vdi_ref, disk_image_type, os_type):
elif disk_image_type == ImageType.DISK_ISO:
# 4. ISO
is_pv = False
+ elif not disk_image_type:
+ LOG.warning(_("Image format is None: trying to determine PV status "
+ "using pygrub; if instance with vdi %s does not boot "
+ "correctly, try with image metadata.") % vdi_ref)
+ with vdi_attached_here(session, vdi_ref, read_only=True) as dev:
+ is_pv = _is_vdi_pv(dev)
else:
msg = _("Unknown image format %(disk_image_type)s") % locals()
raise exception.NovaException(msg)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 588ae1604..8d4687fe8 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -239,7 +239,7 @@ class VMOps(object):
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
- image_meta['id'],
+ image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
@@ -1639,3 +1639,24 @@ class VMOps(object):
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
+
+ def get_per_instance_usage(self):
+ """Get usage info about each active instance."""
+ usage = {}
+
+ def _is_active(vm_rec):
+ power_state = vm_rec['power_state'].lower()
+ return power_state in ['running', 'paused']
+
+ def _get_uuid(vm_rec):
+ other_config = vm_rec['other_config']
+ return other_config.get('nova_uuid', None)
+
+ for vm_ref, vm_rec in vm_utils.list_vms(self._session):
+ uuid = _get_uuid(vm_rec)
+
+ if _is_active(vm_rec) and uuid is not None:
+ memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
+ usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
+
+ return usage
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 17c4c3300..b632401ac 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -67,8 +67,9 @@ def create_sr(session, label, params):
def introduce_sr(session, sr_uuid, label, params):
LOG.debug(_("introducing sr within volume_utils"))
- type = params['sr_type']
- del params['sr_type']
+ # If the sr_type is missing, we assume we are
+ # using the default iscsi back-end
+ type = params.pop('sr_type', 'iscsi')
LOG.debug(_('type is = %s') % type)
if 'name_description' in params:
desc = params['name_description']
@@ -283,18 +284,29 @@ def get_device_number(mountpoint):
return device_number
+def parse_sr_info(connection_data, description=''):
+ label = connection_data.pop('name_label',
+ 'tempSR-%s' % connection_data.get('volume_id'))
+ params = {}
+ if 'sr_uuid' not in connection_data:
+ params = parse_volume_info(connection_data)
+ # This magic label sounds a lot like 'False Disc' in leet-speak
+ uuid = "FA15E-D15C-" + str(params['id'])
+ else:
+ uuid = connection_data['sr_uuid']
+ for k in connection_data.get('introduce_sr_keys', {}):
+ params[k] = connection_data[k]
+ params['name_description'] = connection_data.get('name_description',
+ description)
+
+ return (uuid, label, params)
+
+
def parse_volume_info(connection_data):
"""
Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
- FIXME(armando):
- As for device_path, currently cannot be used as it is,
- because it does not contain target information. As for interim
- solution, target details are passed either via Flags or obtained
- by iscsiadm. Long-term solution is to add a few more fields to the
- db in the iscsi_target table with the necessary info and modify
- the iscsi driver to set them.
"""
volume_id = connection_data['volume_id']
target_portal = connection_data['target_portal']
@@ -369,12 +381,3 @@ def _get_target_port(iscsi_string):
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
-
-
-def _get_iqn(iscsi_string, id):
- """Retrieve target IQN"""
- if iscsi_string:
- return iscsi_string
- elif iscsi_string is None or CONF.iqn_prefix:
- volume_id = _get_volume_id(id)
- return '%s:%s' % (CONF.iqn_prefix, volume_id)
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index ae21518d8..d17adeba6 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -130,28 +130,9 @@ class VolumeOps(object):
def connect_volume(self, connection_data, dev_number, instance_name,
vm_ref):
- if 'name_label' not in connection_data:
- label = 'tempSR-%s' % connection_data['volume_id']
- else:
- label = connection_data['name_label']
- del connection_data['name_label']
-
- if 'name_description' not in connection_data:
- desc = 'Disk-for:%s' % instance_name
- else:
- desc = connection_data['name_description']
-
- sr_params = {}
- if u'sr_uuid' not in connection_data:
- sr_params = volume_utils.parse_volume_info(connection_data)
- uuid = "FA15E-D15C-" + str(sr_params['id'])
- sr_params['sr_type'] = 'iscsi'
- else:
- uuid = connection_data['sr_uuid']
- for k in connection_data['introduce_sr_keys']:
- sr_params[k] = connection_data[k]
-
- sr_params['name_description'] = desc
+ description = 'Disk-for:%s' % instance_name
+ uuid, label, sr_params = volume_utils.parse_sr_info(connection_data,
+ description)
# Introduce SR
try:
diff --git a/run_tests.sh b/run_tests.sh
index 30279eae6..014837e32 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -11,14 +11,11 @@ function usage {
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
- echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
- echo " -v, --verbose Display nosetests in the console"
- echo " -d, --debug Enable pdb's prompt to be displayed during tests. This will run nosetests with --pdb option"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
@@ -39,10 +36,8 @@ function process_option {
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
- -d|--debug) debug=1;;
- -v|--verbose) verbose=1;;
- -*) noseopts="$noseopts $1";;
- *) noseargs="$noseargs $1"
+ -*) testropts="$testropts $1";;
+ *) testrargs="$testrargs $1"
esac
}
@@ -53,81 +48,61 @@ never_venv=0
force=0
no_site_packages=0
installvenvopts=
-noseargs=
-noseopts=
+testrargs=
+testropts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
recreate_db=1
-verbose=0
-debug=0
-export NOSE_WITH_OPENSTACK=1
-export NOSE_OPENSTACK_COLOR=1
-export NOSE_OPENSTACK_RED=0.05
-export NOSE_OPENSTACK_YELLOW=0.025
-export NOSE_OPENSTACK_SHOW_ELAPSED=1
-export NOSE_OPENSTACK_STDOUT=1
-
-export LANG=en_US.UTF-8
-export LANGUAGE=en_US:en
-export LC_ALL=C
+LANG=en_US.UTF-8
+LANGUAGE=en_US:en
+LC_ALL=C
+OS_STDOUT_NOCAPTURE=False
+OS_STDERR_NOCAPTURE=False
for arg in "$@"; do
process_option $arg
done
-# If enabled, tell nose to collect coverage data
-if [ $coverage -eq 1 ]; then
- noseopts="$noseopts --with-coverage --cover-package=nova"
-fi
-
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
+function init_testr {
+ if [ ! -d .testrepository ]; then
+ ${wrapper} testr init
+ fi
+}
+
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
- if [ "$debug" -eq 0 ];
- then
- # Just run the test suites in current environment
- if [ "$verbose" -eq 1 ];
- then
- ${wrapper} $NOSETESTS 2>&1 | tee nosetests.log
- else
- ${wrapper} $NOSETESTS | tee nosetests.log
- fi
- # If we get some short import error right away, print the error log directly
- RESULT=$?
- if [ "$RESULT" -ne "0" ];
- then
- ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
- if [ "$ERRSIZE" -lt "40" ];
- then
- cat run_tests.log
- fi
- else
- tests_run=$(awk '/^Ran/ {print $2}' nosetests.log)
- if [ -z "$tests_run" ] || [ "$tests_run" -eq 0 ];
- then
- echo "ERROR: Zero tests ran, something is wrong!"
- echo "This is usually caused by a parse error in some python"
- echo "file or a failure to set up the environment (i.e. during"
- echo "temporary database preparation). Running nosetests directly"
- echo "may offer more clues."
- return 1
- fi
- fi
- else
- ${wrapper} $NOSETESTS --pdb
- RESULT=$?
+ if [ $coverage -eq 1 ]; then
+ # Do not test test_coverage_ext when gathering coverage.
+ TESTRTESTS="$TESTRTESTS ^(?!.*test_coverage_ext).*$"
+ export PYTHON="${wrapper} coverage run --source nova --parallel-mode"
fi
+ # Just run the test suites in current environment
+ set +e
+ echo "Running \`${wrapper} $TESTRTESTS\`"
+ ${wrapper} $TESTRTESTS
+ RESULT=$?
+ set -e
+
+ copy_subunit_log
+
return $RESULT
}
+function copy_subunit_log {
+ LOGNAME=`cat .testrepository/next-stream`
+ LOGNAME=$(($LOGNAME - 1))
+ LOGNAME=".testrepository/${LOGNAME}"
+ cp $LOGNAME subunit.log
+}
function run_pep8 {
echo "Running PEP8 and HACKING compliance check..."
@@ -155,7 +130,7 @@ function run_pep8 {
}
-NOSETESTS="nosetests $noseopts $noseargs"
+TESTRTESTS="testr run --parallel $testropts $testrargs"
if [ $never_venv -eq 0 ]
then
@@ -197,13 +172,14 @@ if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
+init_testr
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
-# distinguish between options (noseopts), which begin with a '-', and
-# arguments (noseargs).
-if [ -z "$noseargs" ]; then
+# distinguish between options (testropts), which begin with a '-', and
+# arguments (testrargs).
+if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
@@ -212,5 +188,6 @@ fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
+ ${wrapper} coverage combine
${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
fi
diff --git a/setup.cfg b/setup.cfg
index 07a80bb68..a4932f63b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,10 +21,3 @@ input_file = nova/locale/nova.pot
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = nova/locale/nova.pot
-
-[nosetests]
-verbosity=2
-cover-package = nova
-cover-html = true
-cover-erase = true
-where=nova/tests
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 19b8f3f1e..b1ceb74f0 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -196,9 +196,6 @@ def install_dependencies(venv=VENV):
pip_install('-r', PIP_REQUIRES)
pip_install('-r', TEST_REQUIRES)
- # Install nova into the virtual_env. No more path munging!
- run_command([os.path.join(venv, 'bin/python'), 'setup.py', 'develop'])
-
def post_process():
get_distro().post_process()
diff --git a/tools/test-requires b/tools/test-requires
index 8a97720fa..6ee42d31c 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -2,14 +2,14 @@
distribute>=0.6.24
coverage
-fixtures
+discover
+feedparser
+fixtures>=0.3.12
mox==0.5.3
-nose
-testtools
-openstack.nose_plugin>=0.7
-nosehtmloutput
+MySQL-python
pep8==1.3.3
pylint==0.25.2
+python-subunit
sphinx>=1.1.2
-feedparser
-MySQL-python
+testrepository>=0.0.8
+testtools>=0.9.22
diff --git a/tox.ini b/tox.ini
index 586013081..4fa567518 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,19 +3,16 @@ envlist = py26,py27,pep8
[testenv]
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=0.05
- NOSE_OPENSTACK_YELLOW=0.025
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
+ OS_STDOUT_NOCAPTURE=False
+ OS_STDERR_NOCAPTURE=False
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
-commands = nosetests {posargs}
+commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
+ bash -c 'testr run --parallel {posargs} ; RET=$? ; echo "Slowest Tests" ; testr slowest && exit $RET'
[tox:jenkins]
sitepackages = True
@@ -40,7 +37,13 @@ deps = pyflakes
commands = python tools/flakes.py nova
[testenv:cover]
-setenv = NOSE_WITH_COVERAGE=1
+# Need to omit DynamicallyCompiledCheetahTemplate.py from coverage because
+# it ceases to exist post test run. Also do not run test_coverage_ext tests
+# while gathering coverage as those tests conflict with coverage.
+setenv = OMIT=--omit=DynamicallyCompiledCheetahTemplate.py
+ PYTHON=coverage run --source nova --parallel-mode
+commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
+ bash -c 'testr run --parallel \^\(\?\!\.\*test_coverage_ext\)\.\*\$ ; RET=$? ; coverage combine ; coverage html -d ./cover $OMIT && exit $RET'
[testenv:venv]
commands = {posargs}