summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-all3
-rwxr-xr-xbin/nova-api4
-rwxr-xr-xbin/nova-cert5
-rwxr-xr-xbin/nova-clear-rabbit-queues8
-rwxr-xr-xbin/nova-compute14
-rwxr-xr-xbin/nova-console5
-rwxr-xr-xbin/nova-consoleauth4
-rwxr-xr-xbin/nova-dhcpbridge19
-rwxr-xr-xbin/nova-manage22
-rwxr-xr-xbin/nova-network5
-rwxr-xr-xbin/nova-novncproxy33
-rwxr-xr-xbin/nova-rpc-zmq-receiver14
-rwxr-xr-xbin/nova-scheduler5
-rwxr-xr-xbin/nova-xvpvncproxy2
-rw-r--r--etc/nova/policy.json1
-rw-r--r--nova/api/auth.py13
-rw-r--r--nova/api/ec2/__init__.py28
-rw-r--r--nova/api/ec2/apirequest.py1
-rw-r--r--nova/api/ec2/cloud.py29
-rw-r--r--nova/api/ec2/ec2utils.py10
-rw-r--r--nova/api/ec2/faults.py5
-rw-r--r--nova/api/manager.py5
-rw-r--r--nova/api/metadata/base.py9
-rw-r--r--nova/api/metadata/handler.py10
-rw-r--r--nova/api/openstack/auth.py3
-rw-r--r--nova/api/openstack/common.py17
-rw-r--r--nova/api/openstack/compute/__init__.py5
-rw-r--r--nova/api/openstack/compute/contrib/__init__.py5
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py3
-rw-r--r--nova/api/openstack/compute/contrib/certificates.py2
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py8
-rw-r--r--nova/api/openstack/compute/contrib/config_drive.py2
-rw-r--r--nova/api/openstack/compute/contrib/extended_server_attributes.py2
-rw-r--r--nova/api/openstack/compute/contrib/extended_status.py2
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips.py4
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py2
-rw-r--r--nova/api/openstack/compute/contrib/instance_usage_audit_log.py5
-rw-r--r--nova/api/openstack/compute/contrib/networks.py2
-rw-r--r--nova/api/openstack/compute/contrib/rescue.py5
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py2
-rw-r--r--nova/api/openstack/compute/contrib/simple_tenant_usage.py2
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py2
-rw-r--r--nova/api/openstack/compute/extensions.py5
-rw-r--r--nova/api/openstack/compute/image_metadata.py3
-rw-r--r--nova/api/openstack/compute/images.py1
-rw-r--r--nova/api/openstack/compute/ips.py2
-rw-r--r--nova/api/openstack/compute/servers.py24
-rw-r--r--nova/api/openstack/compute/views/addresses.py2
-rw-r--r--nova/api/openstack/compute/views/images.py6
-rw-r--r--nova/api/openstack/compute/views/versions.py5
-rw-r--r--nova/api/openstack/extensions.py2
-rw-r--r--nova/api/sizelimit.py9
-rw-r--r--nova/compute/api.py12
-rw-r--r--nova/compute/instance_types.py5
-rw-r--r--nova/compute/manager.py47
-rw-r--r--nova/compute/rpcapi.py26
-rw-r--r--nova/compute/utils.py7
-rw-r--r--nova/console/api.py4
-rw-r--r--nova/console/vmrc_manager.py12
-rw-r--r--nova/db/sqlalchemy/api.py134
-rw-r--r--nova/db/sqlalchemy/session.py288
-rw-r--r--nova/locale/nova.pot1206
-rw-r--r--nova/network/api.py2
-rw-r--r--nova/network/manager.py5
-rw-r--r--nova/network/quantumv2/api.py22
-rw-r--r--nova/network/rpcapi.py22
-rw-r--r--nova/openstack/common/uuidutils.py35
-rw-r--r--nova/scheduler/chance.py7
-rw-r--r--nova/scheduler/driver.py10
-rw-r--r--nova/scheduler/filter_scheduler.py18
-rw-r--r--nova/scheduler/filters/compute_filter.py5
-rw-r--r--nova/scheduler/filters/core_filter.py9
-rw-r--r--nova/scheduler/filters/disk_filter.py7
-rw-r--r--nova/scheduler/filters/io_ops_filter.py7
-rw-r--r--nova/scheduler/filters/isolated_hosts_filter.py9
-rw-r--r--nova/scheduler/filters/num_instances_filter.py7
-rw-r--r--nova/scheduler/filters/ram_filter.py7
-rw-r--r--nova/scheduler/filters/trusted_filter.py17
-rw-r--r--nova/scheduler/host_manager.py11
-rw-r--r--nova/scheduler/least_cost.py5
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/scheduler/multi.py9
-rw-r--r--nova/scheduler/rpcapi.py6
-rw-r--r--nova/scheduler/scheduler_options.py7
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py3
-rw-r--r--nova/tests/api/ec2/test_cloud.py3
-rw-r--r--nova/tests/compute/test_compute.py131
-rw-r--r--nova/tests/compute/test_rpcapi.py13
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/test_api_samples.py4
-rw-r--r--nova/tests/network/test_api.py64
-rw-r--r--nova/tests/network/test_quantumv2.py51
-rw-r--r--nova/tests/network/test_rpcapi.py50
-rw-r--r--nova/tests/test_imagecache.py24
-rw-r--r--nova/tests/test_powervm.py3
-rw-r--r--nova/tests/test_utils.py3
-rw-r--r--nova/tests/test_virt_drivers.py3
-rw-r--r--nova/tests/test_xenapi.py43
-rw-r--r--nova/utils.py22
-rw-r--r--nova/virt/baremetal/driver.py2
-rw-r--r--nova/virt/driver.py10
-rw-r--r--nova/virt/fake.py7
-rw-r--r--nova/virt/hyperv/hostops.py1
-rw-r--r--nova/virt/libvirt/driver.py5
-rw-r--r--nova/virt/libvirt/imagecache.py5
-rw-r--r--nova/virt/powervm/operator.py1
-rw-r--r--nova/virt/xenapi/agent.py8
-rw-r--r--nova/virt/xenapi/driver.py15
-rw-r--r--nova/virt/xenapi/fake.py4
-rw-r--r--nova/virt/xenapi/host.py1
-rw-r--r--nova/virt/xenapi/pool.py2
-rw-r--r--nova/virt/xenapi/vmops.py120
-rw-r--r--openstack-common.conf2
-rw-r--r--tools/pip-requires1
-rw-r--r--tools/test-requires1
122 files changed, 1748 insertions, 1231 deletions
diff --git a/bin/nova-all b/bin/nova-all
index ce0a459b4..531116d69 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -49,6 +49,7 @@ from nova import utils
from nova.vnc import xvp_proxy
+CONF = config.CONF
LOG = logging.getLogger('nova.all')
if __name__ == '__main__':
@@ -58,7 +59,7 @@ if __name__ == '__main__':
launcher = service.ProcessLauncher()
# nova-api
- for api in flags.FLAGS.enabled_apis:
+ for api in CONF.enabled_apis:
try:
server = service.WSGIService(api)
launcher.launch_server(server, workers=server.workers or 1)
diff --git a/bin/nova-api b/bin/nova-api
index 4bcfa7f79..776152e43 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -42,12 +42,14 @@ from nova.openstack.common import log as logging
from nova import service
from nova import utils
+CONF = config.CONF
+
if __name__ == '__main__':
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
launcher = service.ProcessLauncher()
- for api in flags.FLAGS.enabled_apis:
+ for api in CONF.enabled_apis:
server = service.WSGIService(api)
launcher.launch_server(server, workers=server.workers or 1)
launcher.wait()
diff --git a/bin/nova-cert b/bin/nova-cert
index 317739329..441bda9e5 100755
--- a/bin/nova-cert
+++ b/bin/nova-cert
@@ -38,11 +38,12 @@ from nova.openstack.common import log as logging
from nova import service
from nova import utils
+CONF = config.CONF
+
if __name__ == '__main__':
config.parse_args(sys.argv)
- FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
- server = service.Service.create(binary='nova-cert', topic=FLAGS.cert_topic)
+ server = service.Service.create(binary='nova-cert', topic=CONF.cert_topic)
service.serve(server)
service.wait()
diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues
index 05531de9b..be1d98e3e 100755
--- a/bin/nova-clear-rabbit-queues
+++ b/bin/nova-clear-rabbit-queues
@@ -53,8 +53,8 @@ delete_exchange_opt = cfg.BoolOpt('delete_exchange',
default=False,
help='delete nova exchange too.')
-FLAGS = flags.FLAGS
-FLAGS.register_cli_opt(delete_exchange_opt)
+CONF = config.CONF
+CONF.register_cli_opt(delete_exchange_opt)
def delete_exchange(exch):
@@ -73,5 +73,5 @@ if __name__ == '__main__':
args = config.parse_args(sys.argv)
logging.setup("nova")
delete_queues(args[1:])
- if FLAGS.delete_exchange:
- delete_exchange(FLAGS.control_exchange)
+ if CONF.delete_exchange:
+ delete_exchange(CONF.control_exchange)
diff --git a/bin/nova-compute b/bin/nova-compute
index 2ff98ccfc..f850e1b8c 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -20,7 +20,14 @@
"""Starter script for Nova Compute."""
import eventlet
-eventlet.monkey_patch()
+import os
+
+if os.name == 'nt':
+ # eventlet monkey patching causes subprocess.Popen to fail on Windows
+ # when using pipes due to missing non blocking I/O support
+ eventlet.monkey_patch(os=False)
+else:
+ eventlet.monkey_patch()
import os
import sys
@@ -40,12 +47,13 @@ from nova.openstack.common import log as logging
from nova import service
from nova import utils
+CONF = config.CONF
+
if __name__ == '__main__':
config.parse_args(sys.argv)
- FLAGS = flags.FLAGS
logging.setup('nova')
utils.monkey_patch()
server = service.Service.create(binary='nova-compute',
- topic=FLAGS.compute_topic)
+ topic=CONF.compute_topic)
service.serve(server)
service.wait()
diff --git a/bin/nova-console b/bin/nova-console
index 92b99edfb..c75e088c8 100755
--- a/bin/nova-console
+++ b/bin/nova-console
@@ -38,11 +38,12 @@ from nova import flags
from nova.openstack.common import log as logging
from nova import service
+CONF = config.CONF
+
if __name__ == '__main__':
config.parse_args(sys.argv)
- FLAGS = flags.FLAGS
logging.setup("nova")
server = service.Service.create(binary='nova-console',
- topic=FLAGS.console_topic)
+ topic=CONF.console_topic)
service.serve(server)
service.wait()
diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth
index 14ef701a7..654a3f824 100755
--- a/bin/nova-consoleauth
+++ b/bin/nova-consoleauth
@@ -37,12 +37,12 @@ from nova import flags
from nova.openstack.common import log as logging
from nova import service
+CONF = config.CONF
if __name__ == "__main__":
config.parse_args(sys.argv)
- FLAGS = flags.FLAGS
logging.setup("nova")
server = service.Service.create(binary='nova-consoleauth',
- topic=FLAGS.consoleauth_topic)
+ topic=CONF.consoleauth_topic)
service.serve(server)
service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 48639ce87..ed36c47bc 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -46,21 +46,20 @@ from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import utils
-FLAGS = flags.FLAGS
-
+CONF = config.CONF
LOG = logging.getLogger('nova.dhcpbridge')
def add_lease(mac, ip_address):
"""Set the IP that was assigned by the DHCP server."""
- if FLAGS.fake_rabbit:
+ if CONF.fake_rabbit:
LOG.debug(_("leasing ip"))
- network_manager = importutils.import_object(FLAGS.network_manager)
+ network_manager = importutils.import_object(CONF.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
ip_address)
else:
api = network_rpcapi.NetworkAPI()
- api.lease_fixed_ip(context.get_admin_context(), ip_address, FLAGS.host)
+ api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host)
def old_lease(mac, ip_address):
@@ -73,28 +72,28 @@ def old_lease(mac, ip_address):
def del_lease(mac, ip_address):
"""Called when a lease expires."""
- if FLAGS.fake_rabbit:
+ if CONF.fake_rabbit:
LOG.debug(_("releasing ip"))
- network_manager = importutils.import_object(FLAGS.network_manager)
+ network_manager = importutils.import_object(CONF.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
ip_address)
else:
api = network_rpcapi.NetworkAPI()
api.release_fixed_ip(context.get_admin_context(), ip_address,
- FLAGS.host)
+ CONF.host)
def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
network_ref = db.network_get(ctxt, network_id)
- network_manager = importutils.import_object(FLAGS.network_manager)
+ network_manager = importutils.import_object(CONF.network_manager)
return network_manager.get_dhcp_leases(ctxt, network_ref)
def main():
"""Parse environment and arguments and call the approproate action."""
- flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
+ flagfile = os.environ.get('FLAGFILE', CONF.dhcpbridge_flagfile)
argv = config.parse_args(sys.argv, default_config_files=[flagfile])
logging.setup("nova")
diff --git a/bin/nova-manage b/bin/nova-manage
index 9d3b07bae..43ff5ebca 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -89,8 +89,6 @@ from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova import version
-FLAGS = flags.FLAGS
-
CONF = config.CONF
CONF.import_opt('flat_network_bridge', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
@@ -381,9 +379,9 @@ class FloatingIpCommands(object):
"""Creates floating ips for zone by range"""
admin_context = context.get_admin_context()
if not pool:
- pool = FLAGS.default_floating_pool
+ pool = CONF.default_floating_pool
if not interface:
- interface = FLAGS.public_interface
+ interface = CONF.public_interface
ips = ({'address': str(address), 'pool': pool, 'interface': interface}
for address in self.address_to_hosts(ip_range))
@@ -477,7 +475,7 @@ class NetworkCommands(object):
if v and k != "self"))
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
- net_manager = importutils.import_object(FLAGS.network_manager)
+ net_manager = importutils.import_object(CONF.network_manager)
net_manager.create_networks(context.get_admin_context(), **kwargs)
def list(self):
@@ -521,8 +519,8 @@ class NetworkCommands(object):
if fixed_range is None and uuid is None:
raise Exception("Please specify either fixed_range or uuid")
- net_manager = importutils.import_object(FLAGS.network_manager)
- if "QuantumManager" in FLAGS.network_manager:
+ net_manager = importutils.import_object(CONF.network_manager)
+ if "QuantumManager" in CONF.network_manager:
if uuid is None:
raise Exception("UUID is required to delete Quantum Networks")
if fixed_range:
@@ -636,7 +634,7 @@ class ServiceCommands(object):
_('Updated_At'))
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
- alive = abs(utils.total_seconds(delta)) <= FLAGS.service_down_time
+ alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
@@ -1123,10 +1121,10 @@ class GetLogCommands(object):
def errors(self):
"""Get all of the errors from the log files"""
error_found = 0
- if FLAGS.logdir:
- logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
+ if CONF.logdir:
+ logs = [x for x in os.listdir(CONF.logdir) if x.endswith('.log')]
for file in logs:
- log_file = os.path.join(FLAGS.logdir, file)
+ log_file = os.path.join(CONF.logdir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
@@ -1226,7 +1224,7 @@ def main():
argv = config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
- cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None
+ cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print _("Could not read %s. Re-running with sudo") % cfgfile
diff --git a/bin/nova-network b/bin/nova-network
index d23d7882c..def7782d7 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -40,12 +40,13 @@ from nova.openstack.common import log as logging
from nova import service
from nova import utils
+CONF = config.CONF
+
if __name__ == '__main__':
config.parse_args(sys.argv)
- FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-network',
- topic=FLAGS.network_topic)
+ topic=CONF.network_topic)
service.serve(server)
service.wait()
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index d3d9702af..1ba43aa01 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -67,8 +67,9 @@ opts = [
default=6080,
help='Port on which to listen for incoming requests'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_cli_opts(opts)
+
+CONF = config.CONF
+CONF.register_cli_opts(opts)
LOG = logging.getLogger(__name__)
@@ -130,28 +131,28 @@ class NovaWebSocketProxy(websockify.WebSocketProxy):
if __name__ == '__main__':
- if FLAGS.ssl_only and not os.path.exists(FLAGS.cert):
- parser.error("SSL only and %s not found" % FLAGS.cert)
+ if CONF.ssl_only and not os.path.exists(CONF.cert):
+ parser.error("SSL only and %s not found" % CONF.cert)
# Setup flags
config.parse_args(sys.argv)
# Check to see if novnc html/js/css files are present
- if not os.path.exists(FLAGS.web):
- print "Can not find novnc html/js/css files at %s." % FLAGS.web
+ if not os.path.exists(CONF.web):
+ print "Can not find novnc html/js/css files at %s." % CONF.web
sys.exit(-1)
# Create and start the NovaWebSockets proxy
- server = NovaWebSocketProxy(listen_host=FLAGS.novncproxy_host,
- listen_port=FLAGS.novncproxy_port,
- source_is_ipv6=FLAGS.source_is_ipv6,
- verbose=FLAGS.verbose,
- cert=FLAGS.cert,
- key=FLAGS.key,
- ssl_only=FLAGS.ssl_only,
- daemon=FLAGS.daemon,
- record=FLAGS.record,
- web=FLAGS.web,
+ server = NovaWebSocketProxy(listen_host=CONF.novncproxy_host,
+ listen_port=CONF.novncproxy_port,
+ source_is_ipv6=CONF.source_is_ipv6,
+ verbose=CONF.verbose,
+ cert=CONF.cert,
+ key=CONF.key,
+ ssl_only=CONF.ssl_only,
+ daemon=CONF.daemon,
+ record=CONF.record,
+ web=CONF.web,
target_host='ignore',
target_port='ignore',
wrap_mode='exit',
diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver
index d63ea108e..d6849ce9d 100755
--- a/bin/nova-rpc-zmq-receiver
+++ b/bin/nova-rpc-zmq-receiver
@@ -40,9 +40,9 @@ from nova.openstack.common import rpc
from nova.openstack.common.rpc import impl_zmq
from nova import utils
-FLAGS = flags.FLAGS
-FLAGS.register_opts(rpc.rpc_opts)
-FLAGS.register_opts(impl_zmq.zmq_opts)
+CONF = config.CONF
+CONF.register_opts(rpc.rpc_opts)
+CONF.register_opts(impl_zmq.zmq_opts)
def main():
@@ -50,7 +50,7 @@ def main():
logging.setup("nova")
utils.monkey_patch()
- ipc_dir = FLAGS.rpc_zmq_ipc_dir
+ ipc_dir = CONF.rpc_zmq_ipc_dir
# Create the necessary directories/files for this service.
if not os.path.isdir(ipc_dir):
@@ -63,10 +63,10 @@ def main():
logging.error(_("Could not create IPC socket directory."))
return
- with contextlib.closing(impl_zmq.ZmqProxy(FLAGS)) as reactor:
+ with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
consume_in = "tcp://%s:%s" % \
- (FLAGS.rpc_zmq_bind_address,
- FLAGS.rpc_zmq_port)
+ (CONF.rpc_zmq_bind_address,
+ CONF.rpc_zmq_port)
consumption_proxy = impl_zmq.InternalContext(None)
reactor.register(consumption_proxy,
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index fc345808a..73dfab207 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -42,12 +42,13 @@ from nova.openstack.common import log as logging
from nova import service
from nova import utils
+CONF = config.CONF
+
if __name__ == '__main__':
config.parse_args(sys.argv)
- FLAGS = flags.FLAGS
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-scheduler',
- topic=FLAGS.scheduler_topic)
+ topic=CONF.scheduler_topic)
service.serve(server)
service.wait()
diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy
index e884b3f52..b816bf2e9 100755
--- a/bin/nova-xvpvncproxy
+++ b/bin/nova-xvpvncproxy
@@ -38,8 +38,6 @@ from nova.openstack.common import rpc
from nova import service
from nova.vnc import xvp_proxy
-FLAGS = flags.FLAGS
-
if __name__ == "__main__":
config.parse_args(sys.argv)
logging.setup("nova")
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index f77f733c6..bd015802a 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -7,6 +7,7 @@
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
+ "compute:create:forced_host": "is_admin:True",
"compute:get_all": "",
diff --git a/nova/api/auth.py b/nova/api/auth.py
index be99f7041..1562aeede 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -21,6 +21,7 @@ Common Auth Middleware.
import webob.dec
import webob.exc
+from nova import config
from nova import context
from nova import flags
from nova.openstack.common import cfg
@@ -34,16 +35,16 @@ use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(use_forwarded_for_opt)
+CONF = config.CONF
+CONF.register_opt(use_forwarded_for_opt)
LOG = logging.getLogger(__name__)
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
- pipeline = local_conf[FLAGS.auth_strategy]
- if not FLAGS.api_rate_limit:
- limit_name = FLAGS.auth_strategy + '_nolimit'
+ pipeline = local_conf[CONF.auth_strategy]
+ if not CONF.api_rate_limit:
+ limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
@@ -95,7 +96,7 @@ class NovaKeystoneContext(wsgi.Middleware):
# Build a context, including the auth_token...
remote_address = req.remote_addr
- if FLAGS.use_forwarded_for:
+ if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index b1ec45864..1bf1f9f70 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -72,10 +72,8 @@ ec2_opts = [
help='Time in seconds before ec2 timestamp expires'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(ec2_opts)
-
CONF = config.CONF
+CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
@@ -165,11 +163,11 @@ class Lockout(wsgi.Middleware):
def __init__(self, application):
"""middleware can use fake for testing."""
- if FLAGS.memcached_servers:
+ if CONF.memcached_servers:
import memcache
else:
from nova.common import memorycache as memcache
- self.mc = memcache.Client(FLAGS.memcached_servers,
+ self.mc = memcache.Client(CONF.memcached_servers,
debug=0)
super(Lockout, self).__init__(application)
@@ -178,7 +176,7 @@ class Lockout(wsgi.Middleware):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
- if failures >= FLAGS.lockout_attempts:
+ if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
@@ -186,15 +184,15 @@ class Lockout(wsgi.Middleware):
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
- self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
- elif failures >= FLAGS.lockout_attempts:
- lock_mins = FLAGS.lockout_minutes
+ self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
+ elif failures >= CONF.lockout_attempts:
+ lock_mins = CONF.lockout_minutes
msg = _('Access key %(access_key)s has had %(failures)d'
' failed authentications and will be locked out'
' for %(lock_mins)d minutes.') % locals()
LOG.warn(msg)
self.mc.set(failures_key, str(failures),
- time=FLAGS.lockout_minutes * 60)
+ time=CONF.lockout_minutes * 60)
return res
@@ -226,14 +224,14 @@ class EC2KeystoneAuth(wsgi.Middleware):
'path': req.path,
'params': auth_params,
}
- if "ec2" in FLAGS.keystone_ec2_url:
+ if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
- o = urlparse.urlparse(FLAGS.keystone_ec2_url)
+ o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
@@ -264,7 +262,7 @@ class EC2KeystoneAuth(wsgi.Middleware):
return ec2_error(req, request_id, "Unauthorized", msg)
remote_address = req.remote_addr
- if FLAGS.use_forwarded_for:
+ if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
@@ -293,7 +291,7 @@ class NoAuth(wsgi.Middleware):
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
- if FLAGS.use_forwarded_for:
+ if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
@@ -317,7 +315,7 @@ class Requestify(wsgi.Middleware):
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
- expires=FLAGS.ec2_timestamp_expiry)
+ expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 70b1e3b80..6cd7c4431 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -30,7 +30,6 @@ from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def _underscore_to_camelcase(str):
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 3446b5a8f..8a7471951 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -33,6 +33,7 @@ from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import vm_states
+from nova import config
from nova import db
from nova import exception
from nova import flags
@@ -45,7 +46,7 @@ from nova import utils
from nova import volume
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -283,22 +284,22 @@ class CloudController(object):
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
- if FLAGS.region_list:
+ if CONF.region_list:
regions = []
- for region in FLAGS.region_list:
+ for region in CONF.region_list:
name, _sep, host = region.partition('=')
- endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme,
+ endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
host,
- FLAGS.ec2_port,
- FLAGS.ec2_path)
+ CONF.ec2_port,
+ CONF.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
- 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
- FLAGS.ec2_host,
- FLAGS.ec2_port,
- FLAGS.ec2_path)}]
+ 'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
+ CONF.ec2_host,
+ CONF.ec2_port,
+ CONF.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
@@ -366,7 +367,7 @@ class CloudController(object):
result = []
for key_pair in key_pairs:
# filter out the vpn keys
- suffix = FLAGS.vpn_key_suffix
+ suffix = CONF.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
@@ -652,7 +653,7 @@ class CloudController(object):
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = group_name.encode('utf-8')
- if FLAGS.ec2_strict_validation:
+ if CONF.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
@@ -1048,7 +1049,7 @@ class CloudController(object):
instances = []
for instance in instances:
if not context.is_admin:
- if instance['image_ref'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(CONF.vpn_image_id):
continue
i = {}
instance_uuid = instance['uuid']
@@ -1070,7 +1071,7 @@ class CloudController(object):
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
- if FLAGS.ec2_private_dns_show_ip:
+ if CONF.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 4d0a926df..de05aa903 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -25,10 +25,8 @@ from nova import flags
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
-from nova import utils
+from nova.openstack.common import uuidutils
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
@@ -130,7 +128,7 @@ def id_to_ec2_id(instance_id, template='i-%08x'):
def id_to_ec2_inst_id(instance_id):
"""Get or create an ec2 instance ID (i-[base 16 number]) from uuid."""
- if utils.is_uuid_like(instance_id):
+ if uuidutils.is_uuid_like(instance_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_instance_uuid(ctxt, instance_id)
return id_to_ec2_id(int_id)
@@ -150,7 +148,7 @@ def get_instance_uuid_from_int_id(context, int_id):
def id_to_ec2_snap_id(snapshot_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
- if utils.is_uuid_like(snapshot_id):
+ if uuidutils.is_uuid_like(snapshot_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id)
return id_to_ec2_id(int_id, 'snap-%08x')
@@ -160,7 +158,7 @@ def id_to_ec2_snap_id(snapshot_id):
def id_to_ec2_vol_id(volume_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
- if utils.is_uuid_like(volume_id):
+ if uuidutils.is_uuid_like(volume_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_volume_uuid(ctxt, volume_id)
return id_to_ec2_id(int_id, 'vol-%08x')
diff --git a/nova/api/ec2/faults.py b/nova/api/ec2/faults.py
index ef16f086e..331603a3a 100644
--- a/nova/api/ec2/faults.py
+++ b/nova/api/ec2/faults.py
@@ -15,11 +15,12 @@
import webob.dec
import webob.exc
+from nova import config
from nova import context
from nova import flags
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
class Fault(webob.exc.HTTPException):
@@ -44,7 +45,7 @@ class Fault(webob.exc.HTTPException):
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
- if FLAGS.use_forwarded_for:
+ if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctxt = context.RequestContext(user_id,
diff --git a/nova/api/manager.py b/nova/api/manager.py
index 204e55a0e..dc081d9a6 100644
--- a/nova/api/manager.py
+++ b/nova/api/manager.py
@@ -16,11 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova import manager
from nova.openstack.common import importutils
-FLAGS = flags.FLAGS
+CONF = config.CONF
class MetadataManager(manager.Manager):
@@ -31,7 +32,7 @@ class MetadataManager(manager.Manager):
"""
def __init__(self, *args, **kwargs):
super(MetadataManager, self).__init__(*args, **kwargs)
- self.network_driver = importutils.import_module(FLAGS.network_driver)
+ self.network_driver = importutils.import_module(CONF.network_driver)
def init_host(self):
"""Perform any initialization.
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 5252641fb..21fb4a7da 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -41,9 +41,8 @@ metadata_opts = [
'config drive')),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(metadata_opts)
CONF = config.CONF
+CONF.register_opts(metadata_opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
@@ -310,8 +309,8 @@ class InstanceMetadata():
def _get_hostname(self):
return "%s%s%s" % (self.instance['hostname'],
- '.' if FLAGS.dhcp_domain else '',
- FLAGS.dhcp_domain)
+ '.' if CONF.dhcp_domain else '',
+ CONF.dhcp_domain)
def lookup(self, path):
if path == "" or path[0] != "/":
@@ -353,7 +352,7 @@ class InstanceMetadata():
"""Yields (path, value) tuples for metadata elements."""
# EC2 style metadata
for version in VERSIONS + ["latest"]:
- if version in FLAGS.config_drive_skip_versions.split(' '):
+ if version in CONF.config_drive_skip_versions.split(' '):
continue
data = self.get_ec2_metadata(version)
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 25f40e592..14ec696cd 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -29,12 +29,12 @@ from nova import flags
from nova.openstack.common import log as logging
from nova import wsgi
-LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
-if FLAGS.memcached_servers:
+LOG = logging.getLogger(__name__)
+
+if CONF.memcached_servers:
import memcache
else:
from nova.common import memorycache as memcache
@@ -44,7 +44,7 @@ class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
- self._cache = memcache.Client(FLAGS.memcached_servers, debug=0)
+ self._cache = memcache.Client(CONF.memcached_servers, debug=0)
def get_metadata(self, address):
if not address:
@@ -67,7 +67,7 @@ class MetadataRequestHandler(wsgi.Application):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
remote_address = req.remote_addr
- if FLAGS.use_forwarded_for:
+ if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
if os.path.normpath("/" + req.path_info) == "/":
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 8bb8bacae..78064012b 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -28,7 +28,6 @@ from nova.openstack.common import log as logging
from nova import wsgi as base_wsgi
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
@@ -56,7 +55,7 @@ class NoAuthMiddleware(base_wsgi.Middleware):
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
- if FLAGS.use_forwarded_for:
+ if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index ccc70cd1f..50ac76179 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -28,6 +28,7 @@ from nova.api.openstack import xmlutil
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
@@ -35,7 +36,7 @@ from nova import quota
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
QUOTAS = quota.QUOTAS
@@ -148,7 +149,7 @@ def _get_marker_param(request):
return request.GET['marker']
-def limited(items, request, max_limit=FLAGS.osapi_max_limit):
+def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
@@ -185,7 +186,7 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
return items[offset:range_end]
-def get_limit_and_marker(request, max_limit=FLAGS.osapi_max_limit):
+def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
"""get limited parameter from request"""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
@@ -195,7 +196,7 @@ def get_limit_and_marker(request, max_limit=FLAGS.osapi_max_limit):
return limit, marker
-def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
+def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
limit, marker = get_limit_and_marker(request, max_limit)
@@ -414,7 +415,7 @@ class MetadataTemplate(xmlutil.TemplateBuilder):
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
- if not FLAGS.allow_instance_snapshots:
+ if not CONF.allow_instance_snapshots:
LOG.warn(_('Rejecting snapshot request, snapshots currently'
' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
@@ -443,7 +444,7 @@ class ViewBuilder(object):
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(request.application_url,
- FLAGS.osapi_compute_link_prefix)
+ CONF.osapi_compute_link_prefix)
url = os.path.join(prefix,
request.environ["nova.context"].project_id,
collection_name)
@@ -452,7 +453,7 @@ class ViewBuilder(object):
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(request.application_url,
- FLAGS.osapi_compute_link_prefix)
+ CONF.osapi_compute_link_prefix)
return os.path.join(prefix,
request.environ["nova.context"].project_id,
collection_name,
@@ -462,7 +463,7 @@ class ViewBuilder(object):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_link_prefix(base_url,
- FLAGS.osapi_compute_link_prefix)
+ CONF.osapi_compute_link_prefix)
return os.path.join(base_url,
request.environ["nova.context"].project_id,
collection_name,
diff --git a/nova/api/openstack/compute/__init__.py b/nova/api/openstack/compute/__init__.py
index 4af679ffb..e6704951f 100644
--- a/nova/api/openstack/compute/__init__.py
+++ b/nova/api/openstack/compute/__init__.py
@@ -31,6 +31,7 @@ from nova.api.openstack.compute import limits
from nova.api.openstack.compute import server_metadata
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import versions
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -42,8 +43,8 @@ allow_instance_snapshots_opt = cfg.BoolOpt('allow_instance_snapshots',
default=True,
help='Permit instance snapshot operations.')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(allow_instance_snapshots_opt)
+CONF = config.CONF
+CONF.register_opt(allow_instance_snapshots_opt)
class APIRouter(nova.api.openstack.APIRouter):
diff --git a/nova/api/openstack/compute/contrib/__init__.py b/nova/api/openstack/compute/contrib/__init__.py
index d44254eb6..e6a1e9c4d 100644
--- a/nova/api/openstack/compute/contrib/__init__.py
+++ b/nova/api/openstack/compute/contrib/__init__.py
@@ -22,11 +22,12 @@ It can't be called 'extensions' because that causes namespacing problems.
"""
from nova.api.openstack import extensions
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -36,4 +37,4 @@ def standard_extensions(ext_mgr):
def select_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
- FLAGS.osapi_compute_ext_list)
+ CONF.osapi_compute_ext_list)
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index 8432f02fc..1bac0851d 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -27,11 +27,8 @@ from nova import exception
from nova import flags
from nova.openstack.common import log as logging
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
-
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py
index ccc6b84a2..c05a208a3 100644
--- a/nova/api/openstack/compute/contrib/certificates.py
+++ b/nova/api/openstack/compute/contrib/certificates.py
@@ -24,9 +24,7 @@ from nova import flags
from nova import network
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'certificates')
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index afc24b95d..77d88144a 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -21,6 +21,7 @@ from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import config
from nova import db
from nova import exception
from nova import flags
@@ -30,8 +31,7 @@ from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'cloudpipe')
@@ -70,12 +70,12 @@ class CloudpipeController(object):
# NOTE(vish): One of the drawbacks of doing this in the api is
# the keys will only be on the api node that launched
# the cloudpipe.
- fileutils.ensure_tree(FLAGS.keys_path)
+ fileutils.ensure_tree(CONF.keys_path)
def _get_all_cloudpipes(self, context):
"""Get all cloudpipes"""
return [instance for instance in self.compute_api.get_all(context)
- if instance['image_ref'] == str(FLAGS.vpn_image_id)
+ if instance['image_ref'] == str(CONF.vpn_image_id)
and instance['vm_state'] != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context, project_id):
diff --git a/nova/api/openstack/compute/contrib/config_drive.py b/nova/api/openstack/compute/contrib/config_drive.py
index 779aad539..ac294f660 100644
--- a/nova/api/openstack/compute/contrib/config_drive.py
+++ b/nova/api/openstack/compute/contrib/config_drive.py
@@ -23,8 +23,6 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import flags
-
-FLAGS = flags.FLAGS
authorize = extensions.soft_extension_authorizer('compute', 'config_drive')
diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py
index 6ca10559f..15f6456ea 100644
--- a/nova/api/openstack/compute/contrib/extended_server_attributes.py
+++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py
@@ -22,8 +22,6 @@ from nova import db
from nova import flags
from nova.openstack.common import log as logging
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute',
'extended_server_attributes')
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index d88f4e14b..f7ccdcbff 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -21,8 +21,6 @@ from nova import compute
from nova import flags
from nova.openstack.common import log as logging
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'extended_status')
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index 56a6a8fad..4f6dbffdb 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -27,7 +27,7 @@ from nova.compute import utils as compute_utils
from nova import exception
from nova import network
from nova.openstack.common import log as logging
-from nova import utils
+from nova.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
@@ -307,7 +307,7 @@ class FloatingIPActionController(wsgi.Controller):
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
- (utils.is_uuid_like(id) and
+ (uuidutils.is_uuid_like(id) and
[instance['uuid'] == id] or
[instance['id'] == id])[0]):
disassociate_floating_ip(self, context, instance, address)
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 67fc897fb..237872405 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -28,9 +28,7 @@ from nova import exception
from nova import flags
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'hosts')
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index 4547bbd01..7c98cb8d6 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -21,11 +21,12 @@ import datetime
import webob.exc
from nova.api.openstack import extensions
+from nova import config
from nova import db
from nova import flags
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
authorize = extensions.extension_authorizer('compute',
@@ -82,7 +83,7 @@ class InstanceUsageAuditLogController(object):
# We do this this way to include disabled compute services,
# which can have instances on them. (mdragon)
services = [svc for svc in db.service_get_all(context)
- if svc['topic'] == FLAGS.compute_topic]
+ if svc['topic'] == CONF.compute_topic]
hosts = set(serv['host'] for serv in services)
seen_hosts = set()
done_hosts = set()
diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/networks.py
index 62b4a6c80..4537e1ec7 100644
--- a/nova/api/openstack/compute/contrib/networks.py
+++ b/nova/api/openstack/compute/contrib/networks.py
@@ -26,8 +26,6 @@ from nova import flags
from nova import network
from nova.openstack.common import log as logging
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'networks')
authorize_view = extensions.extension_authorizer('compute', 'networks:view')
diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py
index 918f17100..054eaf870 100644
--- a/nova/api/openstack/compute/contrib/rescue.py
+++ b/nova/api/openstack/compute/contrib/rescue.py
@@ -21,13 +21,14 @@ from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
authorize = exts.extension_authorizer('compute', 'rescue')
@@ -54,7 +55,7 @@ class RescueController(wsgi.Controller):
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
- password = utils.generate_password(FLAGS.password_length)
+ password = utils.generate_password(CONF.password_length)
instance = self._get_instance(context, id)
try:
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index ee36ee58d..b86397694 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -32,9 +32,7 @@ from nova import exception
from nova import flags
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index ee7924dec..f6e9a63f6 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -26,8 +26,6 @@ from nova import exception
from nova import flags
from nova.openstack.common import timeutils
-
-FLAGS = flags.FLAGS
authorize_show = extensions.extension_authorizer('compute',
'simple_tenant_usage:show')
authorize_list = extensions.extension_authorizer('compute',
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 6eaa51079..1de6134ad 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -30,9 +30,7 @@ from nova.openstack.common import log as logging
from nova import utils
from nova import volume
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'volumes')
diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py
index 01b728a30..c46a6b034 100644
--- a/nova/api/openstack/compute/extensions.py
+++ b/nova/api/openstack/compute/extensions.py
@@ -16,19 +16,20 @@
# under the License.
from nova.api.openstack import extensions as base_extensions
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ExtensionManager(base_extensions.ExtensionManager):
def __init__(self):
LOG.audit(_('Initializing extension manager.'))
- self.cls_list = FLAGS.osapi_compute_extension
+ self.cls_list = CONF.osapi_compute_extension
self.PluginManager = pluginmanager.PluginManager('nova',
'compute-extensions')
self.PluginManager.load_plugins()
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 4273e40cd..3bc817076 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -24,9 +24,6 @@ from nova import flags
from nova.image import glance
-FLAGS = flags.FLAGS
-
-
class Controller(object):
"""The image metadata API controller for the OpenStack API"""
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 1b20531de..0c280618e 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -27,7 +27,6 @@ import nova.utils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
SUPPORTED_FILTERS = {
'name': 'name',
diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py
index 6ad888fd7..ec9759759 100644
--- a/nova/api/openstack/compute/ips.py
+++ b/nova/api/openstack/compute/ips.py
@@ -25,9 +25,7 @@ from nova.api.openstack import xmlutil
from nova import flags
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def make_network(elem):
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index ba88d72e7..d8d2f1c28 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -30,17 +30,19 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import utils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
def make_fault(elem):
@@ -602,7 +604,7 @@ class Controller(wsgi.Controller):
self.quantum_attempted = True
from nova.network.quantumv2 import api as quantum_api
self.have_quantum = issubclass(
- importutils.import_class(FLAGS.network_api_class),
+ importutils.import_class(CONF.network_api_class),
quantum_api.API)
except ImportError:
self.have_quantum = False
@@ -621,7 +623,7 @@ class Controller(wsgi.Controller):
# port parameter is only for qunatum v2.0
msg = _("Unknown argment : port")
raise exc.HTTPBadRequest(explanation=msg)
- if not utils.is_uuid_like(port_id):
+ if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
@@ -629,9 +631,9 @@ class Controller(wsgi.Controller):
else:
network_uuid = network['uuid']
- if not port_id and not utils.is_uuid_like(network_uuid):
+ if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
- if not utils.is_uuid_like(br_uuid):
+ if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
@@ -920,7 +922,7 @@ class Controller(wsgi.Controller):
if '_is_precooked' in server['server'].keys():
del server['server']['_is_precooked']
else:
- if FLAGS.enable_instance_password:
+ if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
@@ -929,7 +931,7 @@ class Controller(wsgi.Controller):
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
- if FLAGS.reclaim_instance_interval:
+ if CONF.reclaim_instance_interval:
self.compute_api.soft_delete(context, instance)
else:
self.compute_api.delete(context, instance)
@@ -1099,7 +1101,7 @@ class Controller(wsgi.Controller):
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
- if not utils.is_uuid_like(image_uuid):
+ if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
@@ -1184,7 +1186,7 @@ class Controller(wsgi.Controller):
try:
password = body['adminPass']
except (KeyError, TypeError):
- password = utils.generate_password(FLAGS.password_length)
+ password = utils.generate_password(CONF.password_length)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
@@ -1252,7 +1254,7 @@ class Controller(wsgi.Controller):
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
- if FLAGS.enable_instance_password:
+ if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
@@ -1326,7 +1328,7 @@ class Controller(wsgi.Controller):
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
- password = utils.generate_password(FLAGS.password_length)
+ password = utils.generate_password(CONF.password_length)
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
diff --git a/nova/api/openstack/compute/views/addresses.py b/nova/api/openstack/compute/views/addresses.py
index 41d1d0730..ec5fda64a 100644
--- a/nova/api/openstack/compute/views/addresses.py
+++ b/nova/api/openstack/compute/views/addresses.py
@@ -21,8 +21,6 @@ from nova.api.openstack import common
from nova import flags
from nova.openstack.common import log as logging
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py
index c0ea71385..d1d7d008f 100644
--- a/nova/api/openstack/compute/views/images.py
+++ b/nova/api/openstack/compute/views/images.py
@@ -18,11 +18,11 @@
import os.path
from nova.api.openstack import common
+from nova import config
from nova import flags
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ViewBuilder(common.ViewBuilder):
@@ -123,7 +123,7 @@ class ViewBuilder(common.ViewBuilder):
"""Create an alternate link for a specific image id."""
glance_url = utils.generate_glance_url()
glance_url = self._update_link_prefix(glance_url,
- FLAGS.osapi_glance_link_prefix)
+ CONF.osapi_glance_link_prefix)
return os.path.join(glance_url,
request.environ["nova.context"].project_id,
self._collection_name,
diff --git a/nova/api/openstack/compute/views/versions.py b/nova/api/openstack/compute/views/versions.py
index 454761b32..826c8b4a5 100644
--- a/nova/api/openstack/compute/views/versions.py
+++ b/nova/api/openstack/compute/views/versions.py
@@ -19,10 +19,11 @@ import copy
import os
from nova.api.openstack import common
+from nova import config
from nova import flags
-FLAGS = flags.FLAGS
+CONF = config.CONF
def get_view_builder(req):
@@ -93,7 +94,7 @@ class ViewBuilder(common.ViewBuilder):
def generate_href(self, path=None):
"""Create an url that refers to a specific version_number."""
prefix = self._update_link_prefix(self.base_url,
- FLAGS.osapi_compute_link_prefix)
+ CONF.osapi_compute_link_prefix)
version_number = 'v2'
if path:
path = path.strip('/')
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 01c2516c8..298e98603 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -30,9 +30,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.policy
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class ExtensionDescriptor(object):
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 6c991408d..1d22e74fc 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -21,6 +21,7 @@ Request Body limiting middleware.
import webob.dec
import webob.exc
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -33,8 +34,8 @@ max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
help='the maximum body size '
'per each osapi request(bytes)')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(max_request_body_size_opt)
+CONF = config.CONF
+CONF.register_opt(max_request_body_size_opt)
LOG = logging.getLogger(__name__)
@@ -46,8 +47,8 @@ class RequestBodySizeLimiter(wsgi.Middleware):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- if (req.content_length > FLAGS.osapi_max_request_body_size
- or len(req.body) > FLAGS.osapi_max_request_body_size):
+ if (req.content_length > CONF.osapi_max_request_body_size
+ or len(req.body) > CONF.osapi_max_request_body_size):
msg = _("Request is too large.")
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 021d92034..858e55070 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -49,6 +49,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
@@ -501,7 +502,8 @@ class API(base.Base):
LOG.debug(_("Going to run %s instances...") % num_instances)
filter_properties = dict(scheduler_hints=scheduler_hints)
- if context.is_admin and forced_host:
+ if forced_host:
+ check_policy(context, 'create:forced_host', {})
filter_properties['force_hosts'] = [forced_host]
for i in xrange(num_instances):
@@ -955,7 +957,6 @@ class API(base.Base):
elevated = context.elevated()
self.network_api.deallocate_for_instance(elevated,
instance)
- self.db.instance_destroy(context, instance_uuid)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
@@ -980,6 +981,7 @@ class API(base.Base):
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
+ self.db.instance_destroy(context, instance_uuid)
compute_utils.notify_about_instance_usage(
context, instance, "delete.end", system_metadata=system_meta)
@@ -1069,7 +1071,7 @@ class API(base.Base):
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
- if utils.is_uuid_like(instance_id):
+ if uuidutils.is_uuid_like(instance_id):
instance = self.db.instance_get_by_uuid(context, instance_id)
else:
instance = self.db.instance_get(context, instance_id)
@@ -1559,7 +1561,7 @@ class API(base.Base):
expected_task_state=None)
self.compute_rpcapi.revert_resize(context,
- instance=instance, migration_id=migration_ref['id'],
+ instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
self.db.migration_update(elevated, migration_ref['id'],
@@ -2175,7 +2177,7 @@ class AggregateAPI(base.Base):
self.db.aggregate_host_add(context, aggregate_id, host)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
- aggregate_id=aggregate_id, host_param=host, host=host)
+ aggregate=aggregate, host_param=host, host=host)
return self.get_aggregate(context, aggregate_id)
def remove_host_from_aggregate(self, context, aggregate_id, host):
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index d89e6409a..fd796bd91 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -130,7 +130,7 @@ def get_default_instance_type():
return get_instance_type_by_name(name)
-def get_instance_type(instance_type_id, ctxt=None):
+def get_instance_type(instance_type_id, ctxt=None, inactive=False):
"""Retrieves single instance type by id."""
if instance_type_id is None:
return get_default_instance_type()
@@ -138,6 +138,9 @@ def get_instance_type(instance_type_id, ctxt=None):
if ctxt is None:
ctxt = context.get_admin_context()
+ if inactive:
+ ctxt = ctxt.elevated(read_deleted="yes")
+
return db.instance_type_get(ctxt, instance_type_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 39c3faddf..de848abdd 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -230,7 +230,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.11'
+ RPC_API_VERSION = '2.14'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1532,16 +1532,17 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def revert_resize(self, context, instance, migration_id,
- reservations=None):
+ def revert_resize(self, context, instance, migration=None,
+ migration_id=None, reservations=None):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
- migration_ref = self.db.migration_get(context.elevated(),
- migration_id)
+ if not migration:
+ migration = self.db.migration_get(context.elevated(),
+ migration_id)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
@@ -1555,7 +1556,7 @@ class ComputeManager(manager.SchedulerDependentManager):
teardown=True)
self.network_api.migrate_instance_start(context, instance,
- migration_ref)
+ migration)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
@@ -1567,14 +1568,14 @@ class ComputeManager(manager.SchedulerDependentManager):
self._terminate_volume_connections(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
- migration_ref['id'], migration_ref['source_compute'],
+ migration, migration['source_compute'],
reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
- def finish_revert_resize(self, context, migration_id, instance,
- reservations=None):
+ def finish_revert_resize(self, context, instance, reservations=None,
+ migration=None, migration_id=None):
"""Finishes the second half of reverting a resize.
Power back on the source instance and revert the resized attributes
@@ -1582,7 +1583,9 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
elevated = context.elevated()
- migration_ref = self.db.migration_get(elevated, migration_id)
+
+ if not migration:
+ migration = self.db.migration_get(elevated, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
@@ -1593,11 +1596,11 @@ class ComputeManager(manager.SchedulerDependentManager):
instance = self._instance_update(context,
instance['uuid'],
- host=migration_ref['source_compute'])
+ host=migration['source_compute'])
self.network_api.setup_networks_on_host(context, instance,
- migration_ref['source_compute'])
+ migration['source_compute'])
- old_instance_type = migration_ref['old_instance_type_id']
+ old_instance_type = migration['old_instance_type_id']
instance_type = instance_types.get_instance_type(old_instance_type)
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
@@ -1628,13 +1631,13 @@ class ComputeManager(manager.SchedulerDependentManager):
RESIZE_REVERTING)
self.network_api.migrate_instance_finish(context, instance,
- migration_ref)
+ migration)
self._instance_update(context, instance['uuid'],
vm_state=vm_states.ACTIVE,
task_state=None)
- self.db.migration_update(elevated, migration_id,
+ self.db.migration_update(elevated, migration['id'],
{'status': 'reverted'})
self._notify_about_instance_usage(
@@ -2667,7 +2670,10 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _poll_rebooting_instances(self, context):
if FLAGS.reboot_timeout > 0:
- self.driver.poll_rebooting_instances(FLAGS.reboot_timeout)
+ instances = self.db.instance_get_all_hung_in_rebooting(
+ context, FLAGS.reboot_timeout)
+ self.driver.poll_rebooting_instances(FLAGS.reboot_timeout,
+ instances)
@manager.periodic_task
def _poll_rescued_instances(self, context):
@@ -3108,9 +3114,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._set_instance_error_state(context, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- def add_aggregate_host(self, context, aggregate_id, host, slave_info=None):
+ def add_aggregate_host(self, context, host, slave_info=None,
+ aggregate=None, aggregate_id=None):
"""Notify hypervisor of change (for hypervisor pools)."""
- aggregate = self.db.aggregate_get(context, aggregate_id)
+ if not aggregate:
+ aggregate = self.db.aggregate_get(context, aggregate_id)
+
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
@@ -3118,7 +3127,7 @@ class ComputeManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(context,
self.db.aggregate_host_delete,
- aggregate.id, host)
+ aggregate['id'], host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_aggregate_host(self, context, aggregate_id,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index e42e025dc..5bf17adcd 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -139,6 +139,9 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
2.11 - Adds soft_delete_instance() and restore_instance()
+ 2.12 - Remove migration_id, add migration to revert_resize
+ 2.13 - Remove migration_id, add migration to finish_revert_resize
+ 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
'''
#
@@ -156,7 +159,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=FLAGS.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
- def add_aggregate_host(self, ctxt, aggregate_id, host_param, host,
+ def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
@@ -167,11 +170,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
:param host: This is the host to send the message to.
'''
+ aggregate_p = jsonutils.to_primitive(aggregate)
self.cast(ctxt, self.make_msg('add_aggregate_host',
- aggregate_id=aggregate_id, host=host_param,
+ aggregate=aggregate_p, host=host_param,
slave_info=slave_info),
topic=_compute_topic(self.topic, ctxt, host, None),
- version='2.2')
+ version='2.14')
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
instance_p = jsonutils.to_primitive(instance)
@@ -237,13 +241,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.8')
- def finish_revert_resize(self, ctxt, instance, migration_id, host,
+ def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('finish_revert_resize',
- instance=instance_p, migration_id=migration_id,
+ instance=instance_p, migration=migration_p,
reservations=reservations),
- topic=_compute_topic(self.topic, ctxt, host, None))
+ topic=_compute_topic(self.topic, ctxt, host, None),
+ version='2.13')
def get_console_output(self, ctxt, instance, tail_length):
instance_p = jsonutils.to_primitive(instance)
@@ -441,13 +447,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def revert_resize(self, ctxt, instance, migration_id, host,
+ def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('revert_resize',
- instance=instance_p, migration_id=migration_id,
+ instance=instance_p, migration=migration_p,
reservations=reservations),
- topic=_compute_topic(self.topic, ctxt, host, instance))
+ topic=_compute_topic(self.topic, ctxt, host, instance),
+ version='2.12')
def rollback_live_migration_at_destination(self, ctxt, instance, host):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 17416f991..4a284be64 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -39,8 +39,13 @@ def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None):
"""Adds the specified fault to the database."""
code = 500
+ message = fault.__class__.__name__
+
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
+ # get the message from the exception that was thrown
+ # if that does not exist, use the name of the exception class itself
+ message = fault.kwargs.get('value', message)
details = unicode(fault)
if exc_info and code == 500:
@@ -50,7 +55,7 @@ def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None):
values = {
'instance_uuid': instance_uuid,
'code': code,
- 'message': fault.__class__.__name__,
+ 'message': unicode(message),
'details': unicode(details),
}
db.instance_fault_create(context, values)
diff --git a/nova/console/api.py b/nova/console/api.py
index 8becf35cf..5a9294ce7 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -22,7 +22,7 @@ from nova.console import rpcapi as console_rpcapi
from nova.db import base
from nova import flags
from nova.openstack.common import rpc
-from nova import utils
+from nova.openstack.common import uuidutils
FLAGS = flags.FLAGS
@@ -63,7 +63,7 @@ class API(base.Base):
return rpcapi.get_console_topic(context, instance_host)
def _get_instance(self, context, instance_uuid):
- if utils.is_uuid_like(instance_uuid):
+ if uuidutils.is_uuid_like(instance_uuid):
instance = self.db.instance_get_by_uuid(context, instance_uuid)
else:
instance = self.db.instance_get(context, instance_uuid)
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index 219119724..e654780a3 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -29,17 +29,9 @@ from nova.virt.vmwareapi import driver as vmwareapi_conn
LOG = logging.getLogger(__name__)
-vmrc_manager_opts = [
- cfg.StrOpt('console_public_hostname',
- default='',
- help='Publicly visible name for this console host'),
- cfg.StrOpt('console_driver',
- default='nova.console.vmrc.VMRCConsole',
- help='Driver to use for the console'),
- ]
-
FLAGS = flags.FLAGS
-FLAGS.register_opts(vmrc_manager_opts)
+flags.DECLARE('console_driver', 'nova.console.manager')
+flags.DECLARE('console_public_hostname', 'nova.console.manager')
class ConsoleVMRCManager(manager.Manager):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a00895c57..865bb05f1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -23,7 +23,16 @@ import collections
import copy
import datetime
import functools
-import warnings
+
+from sqlalchemy import and_
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.sql.expression import literal_column
+from sqlalchemy.sql import func
from nova import block_device
from nova.common.sqlalchemyutils import paginate_query
@@ -35,16 +44,9 @@ from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import utils
-from sqlalchemy import and_
-from sqlalchemy.exc import IntegrityError
-from sqlalchemy import or_
-from sqlalchemy.orm import joinedload
-from sqlalchemy.orm import joinedload_all
-from sqlalchemy.sql.expression import asc
-from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import literal_column
-from sqlalchemy.sql import func
+
FLAGS = flags.FLAGS
@@ -770,9 +772,9 @@ def floating_ip_create(context, values, session=None):
# check uniqueness for not deleted addresses
if not floating_ip_ref.deleted:
try:
- floating_ip = floating_ip_get_by_address(context,
- floating_ip_ref.address,
- session)
+ floating_ip = _floating_ip_get_by_address(context,
+ floating_ip_ref.address,
+ session)
except exception.FloatingIpNotFoundForAddress:
pass
else:
@@ -799,9 +801,9 @@ def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- floating_address,
- session=session)
+ floating_ip_ref = _floating_ip_get_by_address(context,
+ floating_address,
+ session=session)
fixed_ip_ref = fixed_ip_get_by_address(context,
fixed_address,
session=session)
@@ -812,25 +814,18 @@ def floating_ip_fixed_ip_associate(context, floating_address,
@require_context
def floating_ip_deallocate(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- address,
- session=session)
- floating_ip_ref['project_id'] = None
- floating_ip_ref['host'] = None
- floating_ip_ref['auto_assigned'] = False
- floating_ip_ref.save(session=session)
+ model_query(context, models.FloatingIp).\
+ filter_by(address=address).\
+ update({'project_id': None,
+ 'host': None,
+ 'auto_assigned': False})
@require_context
def floating_ip_destroy(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- address,
- session=session)
- floating_ip_ref.delete(session=session)
+ model_query(context, models.FloatingIp).\
+ filter_by(address=address).\
+ delete()
@require_context
@@ -860,13 +855,9 @@ def floating_ip_disassociate(context, address):
@require_context
def floating_ip_set_auto_assigned(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context,
- address,
- session=session)
- floating_ip_ref.auto_assigned = True
- floating_ip_ref.save(session=session)
+ model_query(context, models.FloatingIp).\
+ filter_by(address=address).\
+ update({'auto_assigned': True})
def _floating_ip_get_all(context, session=None):
@@ -903,7 +894,12 @@ def floating_ip_get_all_by_project(context, project_id):
@require_context
-def floating_ip_get_by_address(context, address, session=None):
+def floating_ip_get_by_address(context, address):
+ return _floating_ip_get_by_address(context, address)
+
+
+@require_context
+def _floating_ip_get_by_address(context, address, session=None):
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
first()
@@ -920,16 +916,14 @@ def floating_ip_get_by_address(context, address, session=None):
@require_context
-def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
- if not session:
- session = get_session()
-
- fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
- fixed_ip_id = fixed_ip['id']
-
- return model_query(context, models.FloatingIp, session=session).\
- filter_by(fixed_ip_id=fixed_ip_id).\
- all()
+def floating_ip_get_by_fixed_address(context, fixed_address):
+ subq = model_query(context, models.FixedIp.id).\
+ filter_by(address=fixed_address).\
+ limit(1).\
+ subquery()
+ return model_query(context, models.FloatingIp).\
+ filter_by(fixed_ip_id=subq.as_scalar()).\
+ all()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@@ -948,7 +942,9 @@ def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
- floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ floating_ip_ref = _floating_ip_get_by_address(context,
+ address,
+ session)
for (key, value) in values.iteritems():
floating_ip_ref[key] = value
floating_ip_ref.save(session=session)
@@ -1035,7 +1031,7 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
@@ -1067,7 +1063,7 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
- if instance_uuid and not utils.is_uuid_like(instance_uuid):
+ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
@@ -1211,7 +1207,7 @@ def fixed_ip_get_by_address(context, address, session=None):
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
result = model_query(context, models.FixedIp, read_deleted="no").\
@@ -1463,7 +1459,7 @@ def instance_data_get_for_project(context, project_id, session=None):
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
- if utils.is_uuid_like(instance_uuid):
+ if uuidutils.is_uuid_like(instance_uuid):
instance_ref = instance_get_by_uuid(context, instance_uuid,
session=session)
else:
@@ -1601,7 +1597,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
if marker is not None:
try:
marker = instance_get_by_uuid(context, marker, session=session)
- except exception.InstanceNotFound as e:
+ except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
query_prefix = paginate_query(query_prefix, models.Instance, limit,
[sort_key, 'created_at', 'id'],
@@ -1783,7 +1779,7 @@ def instance_test_and_set(context, instance_uuid, attr, ok_states,
query = model_query(context, models.Instance, session=session,
project_only=True)
- if utils.is_uuid_like(instance_uuid):
+ if uuidutils.is_uuid_like(instance_uuid):
query = query.filter_by(uuid=instance_uuid)
else:
raise exception.InvalidUUID(instance_uuid)
@@ -1835,7 +1831,7 @@ def instance_update_and_get_original(context, instance_uuid, values):
def _instance_update(context, instance_uuid, values, copy_old_instance=False):
session = get_session()
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
@@ -2926,7 +2922,7 @@ def volume_allocate_iscsi_target(context, volume_id, host):
@require_admin_context
def volume_attached(context, volume_id, instance_uuid, mountpoint):
- if not utils.is_uuid_like(instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
session = get_session()
@@ -3237,7 +3233,7 @@ def volume_metadata_update(context, volume_id, metadata, delete):
try:
meta_ref = volume_metadata_get_item(context, volume_id,
meta_key, session)
- except exception.VolumeMetadataNotFound, e:
+ except exception.VolumeMetadataNotFound:
meta_ref = models.VolumeMetadata()
item.update({"key": meta_key, "volume_id": volume_id})
@@ -4183,7 +4179,7 @@ def instance_metadata_update(context, instance_uuid, metadata, delete,
try:
meta_ref = instance_metadata_get_item(context, instance_uuid,
meta_key, session)
- except exception.InstanceMetadataNotFound, e:
+ except exception.InstanceMetadataNotFound:
meta_ref = models.InstanceMetadata()
item.update({"key": meta_key, "instance_uuid": instance_uuid})
@@ -4265,7 +4261,7 @@ def instance_system_metadata_update(context, instance_uuid, metadata, delete,
try:
meta_ref = _instance_system_metadata_get_item(
context, instance_uuid, meta_key, session)
- except exception.InstanceSystemMetadataNotFound, e:
+ except exception.InstanceSystemMetadataNotFound:
meta_ref = models.InstanceSystemMetadata()
item.update({"key": meta_key, "instance_uuid": instance_uuid})
@@ -4452,7 +4448,7 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
try:
spec_ref = instance_type_extra_specs_get_item(
context, flavor_id, key, session)
- except exception.InstanceTypeExtraSpecsNotFound, e:
+ except exception.InstanceTypeExtraSpecsNotFound:
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type["id"],
@@ -4631,7 +4627,7 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
try:
spec_ref = volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
- except exception.VolumeTypeExtraSpecsNotFound, e:
+ except exception.VolumeTypeExtraSpecsNotFound:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"volume_type_id": volume_type_id,
@@ -4991,12 +4987,12 @@ def aggregate_delete(context, aggregate_id):
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
#Delete Metadata
- rows = model_query(context,
- models.AggregateMetadata).\
- filter_by(aggregate_id=aggregate_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ model_query(context,
+ models.AggregateMetadata).\
+ filter_by(aggregate_id=aggregate_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_admin_context
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 6c349e0c2..184d279ae 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -16,7 +16,149 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Session Handling for SQLAlchemy backend."""
+"""Session Handling for SQLAlchemy backend.
+
+Recommended ways to use sessions within this framework:
+
+* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
+ model_query() will implicitly use a session when called without one
+ supplied. This is the ideal situation because it will allow queries
+ to be automatically retried if the database connection is interrupted.
+
+ Note: Automatic retry will be enabled in a future patch.
+
+ It is generally fine to issue several queries in a row like this. Even though
+ they may be run in separate transactions and/or separate sessions, each one
+ will see the data from the prior calls. If needed, undo- or rollback-like
+ functionality should be handled at a logical level. For an example, look at
+ the code around quotas and reservation_rollback().
+
+ Examples:
+
+ def get_foo(context, foo):
+ return model_query(context, models.Foo).\
+ filter_by(foo=foo).\
+ first()
+
+ def update_foo(context, id, newfoo):
+ model_query(context, models.Foo).\
+ filter_by(id=id).\
+ update({'foo': newfoo})
+
+ def create_foo(context, values):
+ foo_ref = models.Foo()
+ foo_ref.update(values)
+ foo_ref.save()
+ return foo_ref
+
+
+* Within the scope of a single method, keeping all the reads and writes within
+ the context managed by a single session. In this way, the session's __exit__
+ handler will take care of calling flush() and commit() for you.
+ If using this approach, you should not explicitly call flush() or commit().
+ Any error within the context of the session will cause the session to emit
+ a ROLLBACK. If the connection is dropped before this is possible, the
+ database will implicitly rollback the transaction.
+
+ Note: statements in the session scope will not be automatically retried.
+
+ If you create models within the session, they need to be added, but you
+ do not need to call model.save()
+
+ def create_many_foo(context, foos):
+ session = get_session()
+ with session.begin():
+ for foo in foos:
+ foo_ref = models.Foo()
+ foo_ref.update(foo)
+ session.add(foo_ref)
+
+ def update_bar(context, foo_id, newbar):
+ session = get_session()
+ with session.begin():
+ foo_ref = model_query(context, models.Foo, session).\
+ filter_by(id=foo_id).\
+ first()
+ model_query(context, models.Bar, session).\
+ filter_by(id=foo_ref['bar_id']).\
+ update({'bar': newbar})
+
+ Note: update_bar is a trivially simple example of using "with session.begin".
+ Whereas create_many_foo is a good example of when a transaction is needed,
+ it is always best to use as few queries as possible. The two queries in
+ update_bar can be better expressed using a single query which avoids
+ the need for an explicit transaction. It can be expressed like so:
+
+ def update_bar(context, foo_id, newbar):
+ subq = model_query(context, models.Foo.id).\
+ filter_by(id=foo_id).\
+ limit(1).\
+ subquery()
+ model_query(context, models.Bar).\
+ filter_by(id=subq.as_scalar()).\
+ update({'bar': newbar})
+
+ For reference, this emits approximagely the following SQL statement:
+
+ UPDATE bar SET bar = ${newbar}
+ WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
+
+* Passing an active session between methods. Sessions should only be passed
+ to private methods. The private method must use a subtransaction; otherwise
+ SQLAlchemy will throw an error when you call session.begin() on an existing
+ transaction. Public methods should not accept a session parameter and should
+ not be involved in sessions within the caller's scope.
+
+ Note that this incurs more overhead in SQLAlchemy than the above means
+ due to nesting transactions, and it is not possible to implicitly retry
+ failed database operations when using this approach.
+
+ This also makes code somewhat more difficult to read and debug, because a
+ single database transaction spans more than one method. Error handling
+ becomes less clear in this situation. When this is needed for code clarity,
+ it should be clearly documented.
+
+ def myfunc(foo):
+ session = get_session()
+ with session.begin():
+ # do some database things
+ bar = _private_func(foo, session)
+ return bar
+
+ def _private_func(foo, session=None):
+ if not session:
+ session = get_session()
+ with session.begin(subtransaction=True):
+ # do some other database things
+ return bar
+
+
+There are some things which it is best to avoid:
+
+* Don't keep a transaction open any longer than necessary.
+
+ This means that your "with session.begin()" block should be as short
+ as possible, while still containing all the related calls for that
+ transaction.
+
+* Avoid "with_lockmode('UPDATE')" when possible.
+
+ In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
+ any rows, it will take a gap-lock. This is a form of write-lock on the
+ "gap" where no rows exist, and prevents any other writes to that space.
+ This can effectively prevent any INSERT into a table by locking the gap
+ at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
+ has an overly broad WHERE clause, or doesn't properly use an index.
+
+ One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
+ number of rows matching a query, and if only one row is returned,
+ then issue the SELECT FOR UPDATE.
+
+ The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
+ However, this can not be done until the "deleted" columns are removed and
+ proper UNIQUE constraints are added to the tables.
+
+"""
import re
import time
@@ -48,11 +190,25 @@ def get_session(autocommit=True, expire_on_commit=False):
_MAKER = get_maker(engine, autocommit, expire_on_commit)
session = _MAKER()
+ session = wrap_session(session)
+ return session
+
+
+def wrap_session(session):
+ """Return a session whose exceptions are wrapped."""
session.query = nova.exception.wrap_db_error(session.query)
session.flush = nova.exception.wrap_db_error(session.flush)
return session
+def get_engine():
+ """Return a SQLAlchemy engine."""
+ global _ENGINE
+ if _ENGINE is None:
+ _ENGINE = create_engine(FLAGS.sql_connection)
+ return _ENGINE
+
+
def synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode"""
dbapi_conn.execute("PRAGMA synchronous = OFF")
@@ -106,72 +262,70 @@ def is_db_connection_error(args):
return False
-def get_engine():
- """Return a SQLAlchemy engine."""
- global _ENGINE
- if _ENGINE is None:
- connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
-
- engine_args = {
- "pool_recycle": FLAGS.sql_idle_timeout,
- "echo": False,
- 'convert_unicode': True,
- }
-
- # Map our SQL debug level to SQLAlchemy's options
- if FLAGS.sql_connection_debug >= 100:
- engine_args['echo'] = 'debug'
- elif FLAGS.sql_connection_debug >= 50:
- engine_args['echo'] = True
-
- if "sqlite" in connection_dict.drivername:
- engine_args["poolclass"] = NullPool
-
- if FLAGS.sql_connection == "sqlite://":
- engine_args["poolclass"] = StaticPool
- engine_args["connect_args"] = {'check_same_thread': False}
-
- _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args)
-
- sqlalchemy.event.listen(_ENGINE, 'checkin', greenthread_yield)
-
- if 'mysql' in connection_dict.drivername:
- sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener)
- elif 'sqlite' in connection_dict.drivername:
- if not FLAGS.sqlite_synchronous:
- sqlalchemy.event.listen(_ENGINE, 'connect',
- synchronous_switch_listener)
- sqlalchemy.event.listen(_ENGINE, 'connect', add_regexp_listener)
-
- if (FLAGS.sql_connection_trace and
- _ENGINE.dialect.dbapi.__name__ == 'MySQLdb'):
- import MySQLdb.cursors
- _do_query = debug_mysql_do_query()
- setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
-
- try:
- _ENGINE.connect()
- except OperationalError, e:
- if not is_db_connection_error(e.args[0]):
- raise
-
- remaining = FLAGS.sql_max_retries
- if remaining == -1:
- remaining = 'infinite'
- while True:
- msg = _('SQL connection failed. %s attempts left.')
- LOG.warn(msg % remaining)
- if remaining != 'infinite':
- remaining -= 1
- time.sleep(FLAGS.sql_retry_interval)
- try:
- _ENGINE.connect()
- break
- except OperationalError, e:
- if (remaining != 'infinite' and remaining == 0) or \
- not is_db_connection_error(e.args[0]):
- raise
- return _ENGINE
+def create_engine(sql_connection):
+ """Return a new SQLAlchemy engine."""
+ connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
+
+ engine_args = {
+ "pool_recycle": FLAGS.sql_idle_timeout,
+ "echo": False,
+ 'convert_unicode': True,
+ }
+
+ # Map our SQL debug level to SQLAlchemy's options
+ if FLAGS.sql_connection_debug >= 100:
+ engine_args['echo'] = 'debug'
+ elif FLAGS.sql_connection_debug >= 50:
+ engine_args['echo'] = True
+
+ if "sqlite" in connection_dict.drivername:
+ engine_args["poolclass"] = NullPool
+
+ if FLAGS.sql_connection == "sqlite://":
+ engine_args["poolclass"] = StaticPool
+ engine_args["connect_args"] = {'check_same_thread': False}
+
+ engine = sqlalchemy.create_engine(sql_connection, **engine_args)
+
+ sqlalchemy.event.listen(engine, 'checkin', greenthread_yield)
+
+ if 'mysql' in connection_dict.drivername:
+ sqlalchemy.event.listen(engine, 'checkout', ping_listener)
+ elif 'sqlite' in connection_dict.drivername:
+ if not FLAGS.sqlite_synchronous:
+ sqlalchemy.event.listen(engine, 'connect',
+ synchronous_switch_listener)
+ sqlalchemy.event.listen(engine, 'connect', add_regexp_listener)
+
+ if (FLAGS.sql_connection_trace and
+ engine.dialect.dbapi.__name__ == 'MySQLdb'):
+ import MySQLdb.cursors
+ _do_query = debug_mysql_do_query()
+ setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
+
+ try:
+ engine.connect()
+ except OperationalError, e:
+ if not is_db_connection_error(e.args[0]):
+ raise
+
+ remaining = FLAGS.sql_max_retries
+ if remaining == -1:
+ remaining = 'infinite'
+ while True:
+ msg = _('SQL connection failed. %s attempts left.')
+ LOG.warn(msg % remaining)
+ if remaining != 'infinite':
+ remaining -= 1
+ time.sleep(FLAGS.sql_retry_interval)
+ try:
+ engine.connect()
+ break
+ except OperationalError, e:
+ if (remaining != 'infinite' and remaining == 0) or \
+ not is_db_connection_error(e.args[0]):
+ raise
+ return engine
def get_maker(engine, autocommit=True, expire_on_commit=False):
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index e9eb9b034..5301ee2c5 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2012-11-03 00:01+0000\n"
+"POT-Creation-Date: 2012-11-07 00:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -154,8 +154,8 @@ msgstr ""
msgid "Volume %(volume_id)s is still attached, detach volume first."
msgstr ""
-#: nova/exception.py:235 nova/api/ec2/cloud.py:389 nova/api/ec2/cloud.py:414
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2234
+#: nova/exception.py:235 nova/api/ec2/cloud.py:390 nova/api/ec2/cloud.py:415
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2238
msgid "Keypair data is invalid"
msgstr ""
@@ -179,8 +179,8 @@ msgstr ""
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1283
-#: nova/api/openstack/compute/contrib/admin_actions.py:242
+#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1285
+#: nova/api/openstack/compute/contrib/admin_actions.py:239
msgid "Invalid metadata"
msgstr ""
@@ -193,7 +193,7 @@ msgstr ""
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:271 nova/api/ec2/cloud.py:571
+#: nova/exception.py:271 nova/api/ec2/cloud.py:572
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
@@ -1416,17 +1416,17 @@ msgstr ""
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:907
+#: nova/utils.py:895
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:985
+#: nova/utils.py:973
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1103 nova/virt/configdrive.py:151
+#: nova/utils.py:1091 nova/virt/configdrive.py:151
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
@@ -1453,15 +1453,15 @@ msgstr ""
msgid "Loading app %(name)s from %(path)s"
msgstr ""
-#: nova/api/auth.py:108
+#: nova/api/auth.py:109
msgid "Invalid service catalog json."
msgstr ""
-#: nova/api/auth.py:131
+#: nova/api/auth.py:132
msgid "Sourcing roles from deprecated X-Role HTTP header"
msgstr ""
-#: nova/api/sizelimit.py:51
+#: nova/api/sizelimit.py:52
msgid "Request is too large."
msgstr ""
@@ -1592,262 +1592,262 @@ msgstr ""
msgid "Environment: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:79
+#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:81
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
-#: nova/api/ec2/apirequest.py:64
+#: nova/api/ec2/apirequest.py:63
#, python-format
msgid "Unsupported API request: controller = %(controller)s, action = %(action)s"
msgstr ""
-#: nova/api/ec2/cloud.py:337
+#: nova/api/ec2/cloud.py:338
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:363
+#: nova/api/ec2/cloud.py:364
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:379
+#: nova/api/ec2/cloud.py:380
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:386 nova/api/ec2/cloud.py:411
+#: nova/api/ec2/cloud.py:387 nova/api/ec2/cloud.py:412
#: nova/api/openstack/compute/contrib/keypairs.py:93
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/ec2/cloud.py:392 nova/api/ec2/cloud.py:417
+#: nova/api/ec2/cloud.py:393 nova/api/ec2/cloud.py:418
#: nova/api/openstack/compute/contrib/keypairs.py:101
#, python-format
msgid "Key pair '%s' already exists."
msgstr ""
-#: nova/api/ec2/cloud.py:401
+#: nova/api/ec2/cloud.py:402
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:424
+#: nova/api/ec2/cloud.py:425
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:558 nova/api/ec2/cloud.py:679
+#: nova/api/ec2/cloud.py:559 nova/api/ec2/cloud.py:680
msgid "Not enough parameters, need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:563
+#: nova/api/ec2/cloud.py:564
#, python-format
msgid "%s Not enough parameters to build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:633
+#: nova/api/ec2/cloud.py:602 nova/api/ec2/cloud.py:634
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:624
+#: nova/api/ec2/cloud.py:625
#, python-format
msgid "%s - This rule already exists in group"
msgstr ""
-#: nova/api/ec2/cloud.py:690
+#: nova/api/ec2/cloud.py:691
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:766
+#: nova/api/ec2/cloud.py:767
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:770 nova/api/openstack/compute/contrib/volumes.py:243
+#: nova/api/ec2/cloud.py:771 nova/api/openstack/compute/contrib/volumes.py:241
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:798
+#: nova/api/ec2/cloud.py:799
msgid "Delete Failed"
msgstr ""
-#: nova/api/ec2/cloud.py:811
+#: nova/api/ec2/cloud.py:812
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:819
+#: nova/api/ec2/cloud.py:820
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:832 nova/api/openstack/compute/contrib/volumes.py:422
+#: nova/api/ec2/cloud.py:833 nova/api/openstack/compute/contrib/volumes.py:420
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:838
+#: nova/api/ec2/cloud.py:839
msgid "Detach Volume Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:864 nova/api/ec2/cloud.py:921
-#: nova/api/ec2/cloud.py:1458 nova/api/ec2/cloud.py:1473
+#: nova/api/ec2/cloud.py:865 nova/api/ec2/cloud.py:922
+#: nova/api/ec2/cloud.py:1459 nova/api/ec2/cloud.py:1474
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:987
+#: nova/api/ec2/cloud.py:988
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1138
+#: nova/api/ec2/cloud.py:1139
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1142
+#: nova/api/ec2/cloud.py:1143
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1146
+#: nova/api/ec2/cloud.py:1147
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1151
+#: nova/api/ec2/cloud.py:1152
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1154
+#: nova/api/ec2/cloud.py:1155
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1162
+#: nova/api/ec2/cloud.py:1163
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1170
+#: nova/api/ec2/cloud.py:1171
#: nova/api/openstack/compute/contrib/floating_ips.py:257
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1179
+#: nova/api/ec2/cloud.py:1180
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1182
+#: nova/api/ec2/cloud.py:1183
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1185
+#: nova/api/ec2/cloud.py:1186
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1193
+#: nova/api/ec2/cloud.py:1194
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1198
+#: nova/api/ec2/cloud.py:1199
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1201
+#: nova/api/ec2/cloud.py:1202
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1228
+#: nova/api/ec2/cloud.py:1229
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1260
+#: nova/api/ec2/cloud.py:1261
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1270
+#: nova/api/ec2/cloud.py:1271
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1279
+#: nova/api/ec2/cloud.py:1280
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1288
+#: nova/api/ec2/cloud.py:1289
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1379
+#: nova/api/ec2/cloud.py:1380
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1395
+#: nova/api/ec2/cloud.py:1396
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1414
+#: nova/api/ec2/cloud.py:1415
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1476
+#: nova/api/ec2/cloud.py:1477
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1478
+#: nova/api/ec2/cloud.py:1479
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1480
+#: nova/api/ec2/cloud.py:1481
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1482
+#: nova/api/ec2/cloud.py:1483
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1495
+#: nova/api/ec2/cloud.py:1496
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1524
+#: nova/api/ec2/cloud.py:1525
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1554
+#: nova/api/ec2/cloud.py:1555
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1572
+#: nova/api/ec2/cloud.py:1573
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1605
+#: nova/api/ec2/cloud.py:1606
msgid "Invalid CIDR"
msgstr ""
-#: nova/api/ec2/ec2utils.py:189
+#: nova/api/ec2/ec2utils.py:187
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
-#: nova/api/ec2/ec2utils.py:209
+#: nova/api/ec2/ec2utils.py:207
msgid "Timestamp is invalid."
msgstr ""
-#: nova/api/metadata/handler.py:77 nova/api/metadata/handler.py:84
+#: nova/api/metadata/handler.py:79 nova/api/metadata/handler.py:86
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr ""
@@ -1883,129 +1883,129 @@ msgstr ""
msgid "Extension %(ext_name)s extending resource: %(collection)s"
msgstr ""
-#: nova/api/openstack/common.py:99
+#: nova/api/openstack/common.py:100
#, python-format
msgid ""
"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
"Bad upgrade or db corrupted?"
msgstr ""
-#: nova/api/openstack/common.py:138 nova/api/openstack/common.py:172
+#: nova/api/openstack/common.py:139 nova/api/openstack/common.py:173
msgid "limit param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:141 nova/api/openstack/common.py:176
+#: nova/api/openstack/common.py:142 nova/api/openstack/common.py:177
msgid "limit param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:166
+#: nova/api/openstack/common.py:167
msgid "offset param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:180
+#: nova/api/openstack/common.py:181
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:215 nova/api/openstack/compute/servers.py:536
+#: nova/api/openstack/common.py:216 nova/api/openstack/compute/servers.py:538
#, python-format
msgid "marker [%s] not found"
msgstr ""
-#: nova/api/openstack/common.py:255
+#: nova/api/openstack/common.py:256
#, python-format
msgid "href %s does not contain version"
msgstr ""
-#: nova/api/openstack/common.py:270
+#: nova/api/openstack/common.py:271
msgid "Image metadata limit exceeded"
msgstr ""
-#: nova/api/openstack/common.py:278
+#: nova/api/openstack/common.py:279
msgid "Image metadata key cannot be blank"
msgstr ""
-#: nova/api/openstack/common.py:281
+#: nova/api/openstack/common.py:282
msgid "Image metadata key too long"
msgstr ""
-#: nova/api/openstack/common.py:284
+#: nova/api/openstack/common.py:285
msgid "Invalid image metadata"
msgstr ""
-#: nova/api/openstack/common.py:335
+#: nova/api/openstack/common.py:336
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr ""
-#: nova/api/openstack/common.py:338
+#: nova/api/openstack/common.py:339
#, python-format
msgid "Instance is in an invalid state for '%(action)s'"
msgstr ""
-#: nova/api/openstack/common.py:418
+#: nova/api/openstack/common.py:419
msgid "Rejecting snapshot request, snapshots currently disabled"
msgstr ""
-#: nova/api/openstack/common.py:420
+#: nova/api/openstack/common.py:421
msgid "Instance snapshots are not permitted at this time."
msgstr ""
-#: nova/api/openstack/extensions.py:198
+#: nova/api/openstack/extensions.py:196
#, python-format
msgid "Loaded extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:237
+#: nova/api/openstack/extensions.py:235
#, python-format
msgid "Ext name: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:238
+#: nova/api/openstack/extensions.py:236
#, python-format
msgid "Ext alias: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:239
+#: nova/api/openstack/extensions.py:237
#, python-format
msgid "Ext description: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:241
+#: nova/api/openstack/extensions.py:239
#, python-format
msgid "Ext namespace: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:242
+#: nova/api/openstack/extensions.py:240
#, python-format
msgid "Ext updated: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:244
+#: nova/api/openstack/extensions.py:242
#, python-format
msgid "Exception loading extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:258
+#: nova/api/openstack/extensions.py:256
#, python-format
msgid "Loading extension %s"
msgstr ""
-#: nova/api/openstack/extensions.py:267
+#: nova/api/openstack/extensions.py:265
#, python-format
msgid "Calling extension factory %s"
msgstr ""
-#: nova/api/openstack/extensions.py:279
+#: nova/api/openstack/extensions.py:277
#, python-format
msgid "Failed to load extension %(ext_factory)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:360
+#: nova/api/openstack/extensions.py:358
#, python-format
msgid "Failed to load extension %(classpath)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:384
+#: nova/api/openstack/extensions.py:382
#, python-format
msgid "Failed to load extension %(ext_name)s: %(exc)s"
msgstr ""
@@ -2015,7 +2015,7 @@ msgid "cannot understand JSON"
msgstr ""
#: nova/api/openstack/wsgi.py:223
-#: nova/api/openstack/compute/contrib/hosts.py:85
+#: nova/api/openstack/compute/contrib/hosts.py:83
msgid "cannot understand XML"
msgstr ""
@@ -2064,7 +2064,7 @@ msgstr ""
#: nova/api/openstack/compute/server_metadata.py:76
#: nova/api/openstack/compute/server_metadata.py:101
#: nova/api/openstack/compute/server_metadata.py:126
-#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#: nova/api/openstack/compute/contrib/admin_actions.py:212
msgid "Malformed request body"
msgstr ""
@@ -2098,7 +2098,7 @@ msgstr ""
msgid "subclasses must implement construct()!"
msgstr ""
-#: nova/api/openstack/compute/extensions.py:30
+#: nova/api/openstack/compute/extensions.py:31
msgid "Initializing extension manager."
msgstr ""
@@ -2117,37 +2117,37 @@ msgstr ""
msgid "Invalid minDisk filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:40
-#: nova/api/openstack/compute/images.py:146
-#: nova/api/openstack/compute/images.py:161
+#: nova/api/openstack/compute/image_metadata.py:37
+#: nova/api/openstack/compute/images.py:145
+#: nova/api/openstack/compute/images.py:160
msgid "Image not found."
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:80
+#: nova/api/openstack/compute/image_metadata.py:77
msgid "Incorrect request body format"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:84
+#: nova/api/openstack/compute/image_metadata.py:81
#: nova/api/openstack/compute/server_metadata.py:80
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:79
msgid "Request body and URI mismatch"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:87
+#: nova/api/openstack/compute/image_metadata.py:84
#: nova/api/openstack/compute/server_metadata.py:84
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:82
msgid "Request body contains too many items"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:113
+#: nova/api/openstack/compute/image_metadata.py:110
msgid "Invalid metadata key"
msgstr ""
-#: nova/api/openstack/compute/ips.py:74
+#: nova/api/openstack/compute/ips.py:72
msgid "Instance does not exist"
msgstr ""
-#: nova/api/openstack/compute/ips.py:97
+#: nova/api/openstack/compute/ips.py:95
msgid "Instance is not a member of specified network"
msgstr ""
@@ -2173,318 +2173,318 @@ msgstr ""
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:445
-#: nova/api/openstack/compute/servers.py:457
-#: nova/api/openstack/compute/servers.py:552
-#: nova/api/openstack/compute/servers.py:720
-#: nova/api/openstack/compute/servers.py:981
-#: nova/api/openstack/compute/servers.py:1084
-#: nova/api/openstack/compute/servers.py:1234
+#: nova/api/openstack/compute/servers.py:447
+#: nova/api/openstack/compute/servers.py:459
+#: nova/api/openstack/compute/servers.py:554
+#: nova/api/openstack/compute/servers.py:722
+#: nova/api/openstack/compute/servers.py:983
+#: nova/api/openstack/compute/servers.py:1086
+#: nova/api/openstack/compute/servers.py:1236
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:496
+#: nova/api/openstack/compute/servers.py:498
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:515
+#: nova/api/openstack/compute/servers.py:517
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:559
+#: nova/api/openstack/compute/servers.py:561
msgid "Server name is not a string or unicode"
msgstr ""
-#: nova/api/openstack/compute/servers.py:563
+#: nova/api/openstack/compute/servers.py:565
msgid "Server name is an empty string"
msgstr ""
-#: nova/api/openstack/compute/servers.py:567
+#: nova/api/openstack/compute/servers.py:569
msgid "Server name must be less than 256 characters."
msgstr ""
-#: nova/api/openstack/compute/servers.py:584
+#: nova/api/openstack/compute/servers.py:586
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:587
+#: nova/api/openstack/compute/servers.py:589
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:591
+#: nova/api/openstack/compute/servers.py:593
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:622
+#: nova/api/openstack/compute/servers.py:624
msgid "Unknown argment : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:625
+#: nova/api/openstack/compute/servers.py:627
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:635
+#: nova/api/openstack/compute/servers.py:637
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:645
+#: nova/api/openstack/compute/servers.py:647
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:658
+#: nova/api/openstack/compute/servers.py:660
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:664
+#: nova/api/openstack/compute/servers.py:666
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:667
+#: nova/api/openstack/compute/servers.py:669
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:693
+#: nova/api/openstack/compute/servers.py:695
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:700
+#: nova/api/openstack/compute/servers.py:702
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:707
+#: nova/api/openstack/compute/servers.py:709
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:736
+#: nova/api/openstack/compute/servers.py:738
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:785
-#: nova/api/openstack/compute/servers.py:891
+#: nova/api/openstack/compute/servers.py:787
+#: nova/api/openstack/compute/servers.py:893
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:825
+#: nova/api/openstack/compute/servers.py:827
msgid "min_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:828
+#: nova/api/openstack/compute/servers.py:830
msgid "min_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:833
+#: nova/api/openstack/compute/servers.py:835
msgid "max_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:836
+#: nova/api/openstack/compute/servers.py:838
msgid "max_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:839
+#: nova/api/openstack/compute/servers.py:841
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/openstack/compute/servers.py:888
+#: nova/api/openstack/compute/servers.py:890
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:894
+#: nova/api/openstack/compute/servers.py:896
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:973
+#: nova/api/openstack/compute/servers.py:975
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:999
-#: nova/api/openstack/compute/servers.py:1019
+#: nova/api/openstack/compute/servers.py:1001
+#: nova/api/openstack/compute/servers.py:1021
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1005
+#: nova/api/openstack/compute/servers.py:1007
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1025
+#: nova/api/openstack/compute/servers.py:1027
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1038
+#: nova/api/openstack/compute/servers.py:1040
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1042
+#: nova/api/openstack/compute/servers.py:1044
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1055
+#: nova/api/openstack/compute/servers.py:1057
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1067
+#: nova/api/openstack/compute/servers.py:1069
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1070
+#: nova/api/openstack/compute/servers.py:1072
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1094
+#: nova/api/openstack/compute/servers.py:1096
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1103
+#: nova/api/openstack/compute/servers.py:1105
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1112
+#: nova/api/openstack/compute/servers.py:1114
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1125
+#: nova/api/openstack/compute/servers.py:1127
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1129
-#: nova/api/openstack/compute/servers.py:1331
+#: nova/api/openstack/compute/servers.py:1131
+#: nova/api/openstack/compute/servers.py:1333
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1140
+#: nova/api/openstack/compute/servers.py:1142
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1153
+#: nova/api/openstack/compute/servers.py:1155
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1156
+#: nova/api/openstack/compute/servers.py:1158
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1174
+#: nova/api/openstack/compute/servers.py:1176
#: nova/api/openstack/compute/contrib/aggregates.py:142
#: nova/api/openstack/compute/contrib/keypairs.py:78
-#: nova/api/openstack/compute/contrib/networks.py:75
+#: nova/api/openstack/compute/contrib/networks.py:73
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1179
+#: nova/api/openstack/compute/servers.py:1181
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1241
+#: nova/api/openstack/compute/servers.py:1243
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1274
+#: nova/api/openstack/compute/servers.py:1276
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1358
+#: nova/api/openstack/compute/servers.py:1360
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:64
+#: nova/api/openstack/compute/contrib/admin_actions.py:61
#, python-format
msgid "Compute.api::pause %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:81
+#: nova/api/openstack/compute/contrib/admin_actions.py:78
#, python-format
msgid "Compute.api::unpause %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:98
+#: nova/api/openstack/compute/contrib/admin_actions.py:95
#, python-format
msgid "compute.api::suspend %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:115
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
#, python-format
msgid "compute.api::resume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:131
+#: nova/api/openstack/compute/contrib/admin_actions.py:128
#, python-format
msgid "Error in migrate %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:145
+#: nova/api/openstack/compute/contrib/admin_actions.py:142
#, python-format
msgid "Compute.api::reset_network %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:158
-#: nova/api/openstack/compute/contrib/admin_actions.py:174
-#: nova/api/openstack/compute/contrib/admin_actions.py:190
-#: nova/api/openstack/compute/contrib/admin_actions.py:312
+#: nova/api/openstack/compute/contrib/admin_actions.py:155
+#: nova/api/openstack/compute/contrib/admin_actions.py:171
+#: nova/api/openstack/compute/contrib/admin_actions.py:187
+#: nova/api/openstack/compute/contrib/admin_actions.py:309
#: nova/api/openstack/compute/contrib/multinic.py:41
-#: nova/api/openstack/compute/contrib/rescue.py:44
+#: nova/api/openstack/compute/contrib/rescue.py:45
msgid "Server not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:161
+#: nova/api/openstack/compute/contrib/admin_actions.py:158
#, python-format
msgid "Compute.api::inject_network_info %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:177
+#: nova/api/openstack/compute/contrib/admin_actions.py:174
#, python-format
msgid "Compute.api::lock %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:193
+#: nova/api/openstack/compute/contrib/admin_actions.py:190
#, python-format
msgid "Compute.api::unlock %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:223
+#: nova/api/openstack/compute/contrib/admin_actions.py:220
#, python-format
msgid "createBackup entity requires %s attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:227
+#: nova/api/openstack/compute/contrib/admin_actions.py:224
msgid "Malformed createBackup entity"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:233
+#: nova/api/openstack/compute/contrib/admin_actions.py:230
msgid "createBackup attribute 'rotation' must be an integer"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:248
+#: nova/api/openstack/compute/contrib/admin_actions.py:245
#: nova/api/openstack/compute/contrib/console_output.py:47
#: nova/api/openstack/compute/contrib/server_diagnostics.py:47
#: nova/api/openstack/compute/contrib/server_start_stop.py:38
msgid "Instance not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:276
+#: nova/api/openstack/compute/contrib/admin_actions.py:273
msgid "host and block_migration must be specified."
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:284
+#: nova/api/openstack/compute/contrib/admin_actions.py:281
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:302
+#: nova/api/openstack/compute/contrib/admin_actions.py:299
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:315
+#: nova/api/openstack/compute/contrib/admin_actions.py:312
#, python-format
msgid "Compute.api::resetState %s"
msgstr ""
@@ -2533,7 +2533,7 @@ msgstr ""
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/certificates.py:76
+#: nova/api/openstack/compute/contrib/certificates.py:74
msgid "Only root certificate can be retrieved."
msgstr ""
@@ -2598,7 +2598,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/floating_ips.py:234
#: nova/api/openstack/compute/contrib/floating_ips.py:290
-#: nova/api/openstack/compute/contrib/security_groups.py:417
+#: nova/api/openstack/compute/contrib/security_groups.py:415
msgid "Missing parameter dict"
msgstr ""
@@ -2632,41 +2632,41 @@ msgstr ""
msgid "Error. Unable to associate floating ip"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:124
+#: nova/api/openstack/compute/contrib/hosts.py:122
#, python-format
msgid "Host '%s' could not be found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:153
+#: nova/api/openstack/compute/contrib/hosts.py:151
#, python-format
msgid "Invalid status: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:157
+#: nova/api/openstack/compute/contrib/hosts.py:155
#, python-format
msgid "Invalid mode: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:161
+#: nova/api/openstack/compute/contrib/hosts.py:159
#, python-format
msgid "Invalid update setting: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:179
+#: nova/api/openstack/compute/contrib/hosts.py:177
#, python-format
msgid "Putting host %(host)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:190
+#: nova/api/openstack/compute/contrib/hosts.py:188
#, python-format
msgid "Setting host %(host)s to %(state)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:237
+#: nova/api/openstack/compute/contrib/hosts.py:235
msgid "Describe-resource is admin only functionality"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:245
+#: nova/api/openstack/compute/contrib/hosts.py:243
msgid "Host not found"
msgstr ""
@@ -2686,7 +2686,7 @@ msgstr ""
msgid "No hypervisor matching '%s' could be found."
msgstr ""
-#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:54
+#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:55
#, python-format
msgid "Invalid timestamp for date %s"
msgstr ""
@@ -2704,55 +2704,55 @@ msgstr ""
msgid "Unable to find address %r"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:72
+#: nova/api/openstack/compute/contrib/networks.py:70
#, python-format
msgid "Network does not have %s action"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:80
+#: nova/api/openstack/compute/contrib/networks.py:78
#, python-format
msgid "Disassociating network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:84
-#: nova/api/openstack/compute/contrib/networks.py:101
-#: nova/api/openstack/compute/contrib/networks.py:111
+#: nova/api/openstack/compute/contrib/networks.py:82
+#: nova/api/openstack/compute/contrib/networks.py:99
+#: nova/api/openstack/compute/contrib/networks.py:109
msgid "Network not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:97
+#: nova/api/openstack/compute/contrib/networks.py:95
#, python-format
msgid "Showing network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:107
+#: nova/api/openstack/compute/contrib/networks.py:105
#, python-format
msgid "Deleting network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:122
+#: nova/api/openstack/compute/contrib/networks.py:120
msgid "Missing network in body"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:126
+#: nova/api/openstack/compute/contrib/networks.py:124
msgid "Network label is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:130
+#: nova/api/openstack/compute/contrib/networks.py:128
msgid "Network cidr or cidr_v6 is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:132
+#: nova/api/openstack/compute/contrib/networks.py:130
#, python-format
msgid "Creating network with label %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:148
+#: nova/api/openstack/compute/contrib/networks.py:146
#, python-format
msgid "Associating network %(network)s with project %(project)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:156
+#: nova/api/openstack/compute/contrib/networks.py:154
#, python-format
msgid "Cannot associate network %(network)s with project %(project)s: %(message)s"
msgstr ""
@@ -2765,24 +2765,24 @@ msgstr ""
msgid "Malformed scheduler_hints attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:224
+#: nova/api/openstack/compute/contrib/security_groups.py:222
msgid "Security group id should be integer"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:333
+#: nova/api/openstack/compute/contrib/security_groups.py:331
msgid "Not enough parameters to build a valid rule."
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:339
+#: nova/api/openstack/compute/contrib/security_groups.py:337
#, python-format
msgid "This rule already exists in group %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:420
+#: nova/api/openstack/compute/contrib/security_groups.py:418
msgid "Security group not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:424
+#: nova/api/openstack/compute/contrib/security_groups.py:422
msgid "Security group name cannot be empty"
msgstr ""
@@ -2794,38 +2794,38 @@ msgstr ""
msgid "stop instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:75
+#: nova/api/openstack/compute/contrib/volumes.py:73
#, python-format
msgid "vol=%s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:187
+#: nova/api/openstack/compute/contrib/volumes.py:185
#, python-format
msgid "Delete volume with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:352
-#: nova/api/openstack/compute/contrib/volumes.py:432
+#: nova/api/openstack/compute/contrib/volumes.py:350
+#: nova/api/openstack/compute/contrib/volumes.py:430
#, python-format
msgid "Instance %s is not attached."
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:383
+#: nova/api/openstack/compute/contrib/volumes.py:381
#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:546
+#: nova/api/openstack/compute/contrib/volumes.py:544
#, python-format
msgid "Delete snapshot with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:589
+#: nova/api/openstack/compute/contrib/volumes.py:587
#, python-format
msgid "Create snapshot from volume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:593
+#: nova/api/openstack/compute/contrib/volumes.py:591
#, python-format
msgid "Invalid value '%s' for force. "
msgstr ""
@@ -2834,23 +2834,23 @@ msgstr ""
msgid "Instance has had its instance_type removed from the DB"
msgstr ""
-#: nova/cloudpipe/pipelib.py:44
+#: nova/cloudpipe/pipelib.py:45
msgid "Instance type for vpn instances"
msgstr ""
-#: nova/cloudpipe/pipelib.py:47
+#: nova/cloudpipe/pipelib.py:48
msgid "Template for cloudpipe instance boot script"
msgstr ""
-#: nova/cloudpipe/pipelib.py:50
+#: nova/cloudpipe/pipelib.py:51
msgid "Network to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:53
+#: nova/cloudpipe/pipelib.py:54
msgid "Netmask to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:106
+#: nova/cloudpipe/pipelib.py:109
#, python-format
msgid "Launching VPN for %s"
msgstr ""
@@ -2863,200 +2863,200 @@ msgstr ""
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/compute/api.py:221
+#: nova/compute/api.py:224
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:228
+#: nova/compute/api.py:231
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:237
+#: nova/compute/api.py:240
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:257
+#: nova/compute/api.py:260
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:267
+#: nova/compute/api.py:270
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:271
+#: nova/compute/api.py:274
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:275
+#: nova/compute/api.py:278
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:499
+#: nova/compute/api.py:502
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:570
+#: nova/compute/api.py:574
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:597
+#: nova/compute/api.py:601
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:829
+#: nova/compute/api.py:833
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:846
+#: nova/compute/api.py:850
msgid "No host for instance, deleting immediately"
msgstr ""
-#: nova/compute/api.py:946
+#: nova/compute/api.py:950
msgid "host for instance is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:990
+#: nova/compute/api.py:994
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1030
+#: nova/compute/api.py:1034
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1044
+#: nova/compute/api.py:1048
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1108
+#: nova/compute/api.py:1112
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1243
+#: nova/compute/api.py:1247
#, python-format
msgid "Image type not recognized %s"
msgstr ""
-#: nova/compute/api.py:1352
+#: nova/compute/api.py:1356
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1674
+#: nova/compute/api.py:1678
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1683
+#: nova/compute/api.py:1687
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1725
+#: nova/compute/api.py:1729
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s"
msgstr ""
-#: nova/compute/api.py:1897
+#: nova/compute/api.py:1901
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1905
+#: nova/compute/api.py:1909
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:1973
+#: nova/compute/api.py:1977
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2058
+#: nova/compute/api.py:2062
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2207
+#: nova/compute/api.py:2211
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2211
+#: nova/compute/api.py:2215
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2312
+#: nova/compute/api.py:2316
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2315
+#: nova/compute/api.py:2319
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2323
+#: nova/compute/api.py:2327
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2329
+#: nova/compute/api.py:2333
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2349
+#: nova/compute/api.py:2353
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2352
+#: nova/compute/api.py:2356
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2359
+#: nova/compute/api.py:2363
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2424
+#: nova/compute/api.py:2428
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2432
+#: nova/compute/api.py:2436
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2435
+#: nova/compute/api.py:2439
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2692
+#: nova/compute/api.py:2696
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2701
+#: nova/compute/api.py:2705
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2704
+#: nova/compute/api.py:2708
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2715
+#: nova/compute/api.py:2719
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3136,7 +3136,7 @@ msgstr ""
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:597 nova/compute/manager.py:1740
+#: nova/compute/manager.py:597 nova/compute/manager.py:1743
msgid "Error trying to reschedule"
msgstr ""
@@ -3225,8 +3225,8 @@ msgstr ""
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:995 nova/compute/manager.py:1909
-#: nova/compute/manager.py:3106
+#: nova/compute/manager.py:995 nova/compute/manager.py:1912
+#: nova/compute/manager.py:3112
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
@@ -3324,264 +3324,264 @@ msgstr ""
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1677
+#: nova/compute/manager.py:1680
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1696
+#: nova/compute/manager.py:1699
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:1906
+#: nova/compute/manager.py:1909
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:1962
+#: nova/compute/manager.py:1965
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:1979
+#: nova/compute/manager.py:1982
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2017
+#: nova/compute/manager.py:2020
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2047
+#: nova/compute/manager.py:2050
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2063
+#: nova/compute/manager.py:2066
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2068
+#: nova/compute/manager.py:2071
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2071
+#: nova/compute/manager.py:2074
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2088
+#: nova/compute/manager.py:2091
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2113
+#: nova/compute/manager.py:2116
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2141
+#: nova/compute/manager.py:2144
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2185
+#: nova/compute/manager.py:2188
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2194
+#: nova/compute/manager.py:2197
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2209
+#: nova/compute/manager.py:2212
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2238
+#: nova/compute/manager.py:2241
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2242
+#: nova/compute/manager.py:2245
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2255
+#: nova/compute/manager.py:2258
#, python-format
msgid "Faild to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2299
+#: nova/compute/manager.py:2302
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2359
+#: nova/compute/manager.py:2362
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2419
+#: nova/compute/manager.py:2422
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2445
+#: nova/compute/manager.py:2448
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2478
+#: nova/compute/manager.py:2481
msgid "No floating_ip found"
msgstr ""
-#: nova/compute/manager.py:2486
+#: nova/compute/manager.py:2489
msgid "No floating_ip found."
msgstr ""
-#: nova/compute/manager.py:2488
+#: nova/compute/manager.py:2491
#, python-format
msgid ""
"Live migration: Unexpected error: cannot inherit floating ip.\n"
"%(e)s"
msgstr ""
-#: nova/compute/manager.py:2514
+#: nova/compute/manager.py:2517
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2516
+#: nova/compute/manager.py:2519
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2530
+#: nova/compute/manager.py:2533
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2661
+#: nova/compute/manager.py:2664
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2687
+#: nova/compute/manager.py:2693
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2692
+#: nova/compute/manager.py:2698
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2701
+#: nova/compute/manager.py:2707
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2708
+#: nova/compute/manager.py:2714
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2712
+#: nova/compute/manager.py:2718
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2719
+#: nova/compute/manager.py:2725
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2727
+#: nova/compute/manager.py:2733
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2744
+#: nova/compute/manager.py:2750
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2762
+#: nova/compute/manager.py:2768
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2785
+#: nova/compute/manager.py:2791
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:2850
+#: nova/compute/manager.py:2856
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:2876
+#: nova/compute/manager.py:2882
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:2882 nova/compute/manager.py:2920
+#: nova/compute/manager.py:2888 nova/compute/manager.py:2926
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:2907
+#: nova/compute/manager.py:2913
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:2944
+#: nova/compute/manager.py:2950
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2956 nova/compute/manager.py:2967
-#: nova/compute/manager.py:2981
+#: nova/compute/manager.py:2962 nova/compute/manager.py:2973
+#: nova/compute/manager.py:2987
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:2961
+#: nova/compute/manager.py:2967
msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2974
+#: nova/compute/manager.py:2980
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2990
+#: nova/compute/manager.py:2996
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:2998
+#: nova/compute/manager.py:3004
msgid "FLAGS.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3011
+#: nova/compute/manager.py:3017
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3060
+#: nova/compute/manager.py:3066
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3067
+#: nova/compute/manager.py:3073
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3074
+#: nova/compute/manager.py:3080
#, python-format
msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action"
msgstr ""
@@ -3754,16 +3754,16 @@ msgstr ""
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/console/manager.py:81 nova/console/vmrc_manager.py:71
+#: nova/console/manager.py:81 nova/console/vmrc_manager.py:63
msgid "Adding console"
msgstr ""
-#: nova/console/manager.py:102 nova/console/vmrc_manager.py:123
+#: nova/console/manager.py:102 nova/console/vmrc_manager.py:115
#, python-format
msgid "Tried to remove non-existent console %(console_id)s."
msgstr ""
-#: nova/console/vmrc_manager.py:126
+#: nova/console/vmrc_manager.py:118
#, python-format
msgid "Removing console %(console_id)s."
msgstr ""
@@ -3808,50 +3808,50 @@ msgstr ""
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
-#: nova/db/sqlalchemy/api.py:206
+#: nova/db/sqlalchemy/api.py:208
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2796
+#: nova/db/sqlalchemy/api.py:2792
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4697
+#: nova/db/sqlalchemy/api.py:4693
msgid "Backend exists"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4717 nova/db/sqlalchemy/api.py:4743
+#: nova/db/sqlalchemy/api.py:4713 nova/db/sqlalchemy/api.py:4739
#, python-format
msgid "No backend config with id %(sm_backend_id)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4755
+#: nova/db/sqlalchemy/api.py:4751
#, python-format
msgid "No backend config with sr uuid %(sr_uuid)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4789
+#: nova/db/sqlalchemy/api.py:4785
msgid "Flavor exists"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4804
+#: nova/db/sqlalchemy/api.py:4800
#, python-format
msgid "%(sm_flavor_id) flavor not found"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4823
+#: nova/db/sqlalchemy/api.py:4819
#, python-format
msgid "No sm_flavor called %(sm_flavor_id)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4840
+#: nova/db/sqlalchemy/api.py:4836
#, python-format
msgid "No sm_flavor called %(sm_flavor_label)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4878
+#: nova/db/sqlalchemy/api.py:4874
#, python-format
msgid "No sm_volume with id %(volume_id)s"
msgstr ""
@@ -3864,7 +3864,7 @@ msgstr ""
msgid "Upgrade DB using Essex release first."
msgstr ""
-#: nova/db/sqlalchemy/session.py:162
+#: nova/db/sqlalchemy/session.py:316
#, python-format
msgid "SQL connection failed. %s attempts left."
msgstr ""
@@ -4108,69 +4108,69 @@ msgstr ""
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:284
+#: nova/network/manager.py:285
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/manager.py:293 nova/network/manager.py:552
+#: nova/network/manager.py:294 nova/network/manager.py:553
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/manager.py:308
+#: nova/network/manager.py:309
#, python-format
msgid "floating IP allocation for instance |%s|"
msgstr ""
-#: nova/network/manager.py:372
+#: nova/network/manager.py:373
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/manager.py:390
+#: nova/network/manager.py:391
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/manager.py:394
+#: nova/network/manager.py:395
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/manager.py:415
+#: nova/network/manager.py:416
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/manager.py:476
+#: nova/network/manager.py:477
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/manager.py:673
+#: nova/network/manager.py:674
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:680
+#: nova/network/manager.py:681
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/manager.py:706
+#: nova/network/manager.py:707
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:714
+#: nova/network/manager.py:715
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/manager.py:761
+#: nova/network/manager.py:762
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4178,39 +4178,39 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/manager.py:807
+#: nova/network/manager.py:808
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/manager.py:817
+#: nova/network/manager.py:818
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/manager.py:931
+#: nova/network/manager.py:932
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:935
+#: nova/network/manager.py:936
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1049
+#: nova/network/manager.py:1050
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1054
+#: nova/network/manager.py:1055
#, python-format
msgid "networks retrieved for instance: |%(networks)s|"
msgstr ""
-#: nova/network/manager.py:1084
+#: nova/network/manager.py:1085
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1307
+#: nova/network/manager.py:1308
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4218,89 +4218,89 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1388
+#: nova/network/manager.py:1389
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1409
+#: nova/network/manager.py:1410
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1413
+#: nova/network/manager.py:1414
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1421
+#: nova/network/manager.py:1422
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1426
+#: nova/network/manager.py:1427
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1430
+#: nova/network/manager.py:1431
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1433
+#: nova/network/manager.py:1434
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1452
+#: nova/network/manager.py:1453
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1476
+#: nova/network/manager.py:1477
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1496
+#: nova/network/manager.py:1497
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1577
+#: nova/network/manager.py:1578
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1580
+#: nova/network/manager.py:1581
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1591
+#: nova/network/manager.py:1592
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1650
+#: nova/network/manager.py:1651
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1670
+#: nova/network/manager.py:1671
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2136
+#: nova/network/manager.py:2137
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2143
+#: nova/network/manager.py:2144
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s. Network size"
@@ -4333,42 +4333,42 @@ msgstr ""
msgid "_get_auth_token() failed"
msgstr ""
-#: nova/network/quantumv2/api.py:98
+#: nova/network/quantumv2/api.py:105
#, python-format
msgid "allocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:101
+#: nova/network/quantumv2/api.py:108
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:154
+#: nova/network/quantumv2/api.py:161
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:166
+#: nova/network/quantumv2/api.py:173
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:175
+#: nova/network/quantumv2/api.py:182
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:185
+#: nova/network/quantumv2/api.py:192
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:200
+#: nova/network/quantumv2/api.py:207
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:452
+#: nova/network/quantumv2/api.py:459
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
@@ -4764,42 +4764,42 @@ msgstr ""
msgid "No key defining hosts for topic '%s', see ringfile"
msgstr ""
-#: nova/scheduler/chance.py:49
+#: nova/scheduler/chance.py:50
msgid "Is the appropriate service running?"
msgstr ""
-#: nova/scheduler/chance.py:54
+#: nova/scheduler/chance.py:55
msgid "Could not find another compute"
msgstr ""
-#: nova/scheduler/driver.py:64
+#: nova/scheduler/driver.py:66
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:68 nova/scheduler/manager.py:184
+#: nova/scheduler/driver.py:70 nova/scheduler/manager.py:185
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:110
+#: nova/scheduler/driver.py:112
#, python-format
msgid "Casted '%(method)s' to compute '%(host)s'"
msgstr ""
-#: nova/scheduler/driver.py:125
+#: nova/scheduler/driver.py:127
#, python-format
msgid "Casted '%(method)s' to %(topic)s '%(host)s'"
msgstr ""
-#: nova/scheduler/driver.py:173
+#: nova/scheduler/driver.py:175
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:181
+#: nova/scheduler/driver.py:183
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:313
+#: nova/scheduler/driver.py:315
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
@@ -4836,51 +4836,51 @@ msgstr ""
msgid "Weighted %(weighted_host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:237
+#: nova/scheduler/host_manager.py:247
#, python-format
msgid "Host filter fails for ignored host %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:244
+#: nova/scheduler/host_manager.py:254
#, python-format
msgid "Host filter fails for non-forced host %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:250
+#: nova/scheduler/host_manager.py:260
#, python-format
msgid "Host filter function %(func)s failed for %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:256
+#: nova/scheduler/host_manager.py:266
#, python-format
msgid "Host filter passes for %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:318
+#: nova/scheduler/host_manager.py:329
#, python-format
msgid "Received %(service_name)s service update from %(host)s."
msgstr ""
-#: nova/scheduler/host_manager.py:341
+#: nova/scheduler/host_manager.py:352
msgid "host_manager only implemented for 'compute'"
msgstr ""
-#: nova/scheduler/host_manager.py:351
+#: nova/scheduler/host_manager.py:360
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:168
+#: nova/scheduler/manager.py:169
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
-#: nova/scheduler/scheduler_options.py:69
+#: nova/scheduler/scheduler_options.py:70
#, python-format
msgid "Could not stat scheduler options file %(filename)s: '%(e)s'"
msgstr ""
-#: nova/scheduler/scheduler_options.py:78
+#: nova/scheduler/scheduler_options.py:79
#, python-format
msgid "Could not decode scheduler options: '%(e)s'"
msgstr ""
@@ -4892,21 +4892,21 @@ msgstr ""
msgid "%(host_state)s fails instance_type extra_specs requirements"
msgstr ""
-#: nova/scheduler/filters/compute_filter.py:38
+#: nova/scheduler/filters/compute_filter.py:39
#, python-format
msgid "%(host_state)s is disabled or has not been heard from in a while"
msgstr ""
-#: nova/scheduler/filters/compute_filter.py:42
+#: nova/scheduler/filters/compute_filter.py:43
#, python-format
msgid "%(host_state)s is disabled via capabilities"
msgstr ""
-#: nova/scheduler/filters/core_filter.py:45
+#: nova/scheduler/filters/core_filter.py:46
msgid "VCPUs not set; assuming CPU collection broken"
msgstr ""
-#: nova/scheduler/filters/disk_filter.py:47
+#: nova/scheduler/filters/disk_filter.py:48
#, python-format
msgid ""
"%(host_state)s does not have %(requested_disk)s MB usable disk, it only "
@@ -4939,21 +4939,21 @@ msgstr ""
msgid "%(host_state)s does not support requested instance_properties"
msgstr ""
-#: nova/scheduler/filters/io_ops_filter.py:42
+#: nova/scheduler/filters/io_ops_filter.py:43
#, python-format
msgid ""
"%(host_state)s fails I/O ops check: Max IOs per host is set to "
"%(max_io_ops)s"
msgstr ""
-#: nova/scheduler/filters/num_instances_filter.py:39
+#: nova/scheduler/filters/num_instances_filter.py:40
#, python-format
msgid ""
"%(host_state)s fails num_instances check: Max instances per host is set "
"to %(max_instances)s"
msgstr ""
-#: nova/scheduler/filters/ram_filter.py:46
+#: nova/scheduler/filters/ram_filter.py:47
#, python-format
msgid ""
"%(host_state)s does not have %(requested_ram)s MB usable ram, it only has"
@@ -4965,7 +4965,7 @@ msgstr ""
msgid "Previously tried hosts: %(hosts)s. (host=%(host)s)"
msgstr ""
-#: nova/scheduler/filters/trusted_filter.py:201
+#: nova/scheduler/filters/trusted_filter.py:202
#, python-format
msgid "TCP: trust state of %(host)s:%(level)s(%(trust)s)"
msgstr ""
@@ -5065,12 +5065,12 @@ msgstr ""
msgid "uuid"
msgstr ""
-#: nova/tests/test_xenapi.py:722
+#: nova/tests/test_xenapi.py:724
#, python-format
msgid "Creating files in %s to simulate guest agent"
msgstr ""
-#: nova/tests/test_xenapi.py:733
+#: nova/tests/test_xenapi.py:735
#, python-format
msgid "Removing simulated guest agent files in %s"
msgstr ""
@@ -5152,28 +5152,28 @@ msgstr ""
msgid "test_snapshot_detail: resp_dict=%s"
msgstr ""
-#: nova/tests/compute/test_compute.py:633
-#: nova/tests/compute/test_compute.py:651
-#: nova/tests/compute/test_compute.py:687
-#: nova/tests/compute/test_compute.py:712
-#: nova/tests/compute/test_compute.py:2403
+#: nova/tests/compute/test_compute.py:619
+#: nova/tests/compute/test_compute.py:637
+#: nova/tests/compute/test_compute.py:673
+#: nova/tests/compute/test_compute.py:698
+#: nova/tests/compute/test_compute.py:2373
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:639
-#: nova/tests/compute/test_compute.py:674
-#: nova/tests/compute/test_compute.py:700
-#: nova/tests/compute/test_compute.py:730
+#: nova/tests/compute/test_compute.py:625
+#: nova/tests/compute/test_compute.py:660
+#: nova/tests/compute/test_compute.py:686
+#: nova/tests/compute/test_compute.py:716
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1107
+#: nova/tests/compute/test_compute.py:1093
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2414
+#: nova/tests/compute/test_compute.py:2384
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
@@ -5522,7 +5522,7 @@ msgid "instance %s: finished toXML method"
msgstr ""
#: nova/virt/baremetal/driver.py:559 nova/virt/hyperv/hostops.py:43
-#: nova/virt/libvirt/driver.py:1986
+#: nova/virt/libvirt/driver.py:1988
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. This error can be safely ignored for now."
@@ -5538,7 +5538,7 @@ msgid "Updating!"
msgstr ""
#: nova/virt/baremetal/driver.py:726 nova/virt/hyperv/hostops.py:141
-#: nova/virt/libvirt/driver.py:3028 nova/virt/xenapi/host.py:149
+#: nova/virt/libvirt/driver.py:3030 nova/virt/xenapi/host.py:149
msgid "Updating host stats"
msgstr ""
@@ -6139,7 +6139,7 @@ msgstr ""
msgid "Failed to remove volume from VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:603
+#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:605
msgid "Could not determine iscsi initiator name"
msgstr ""
@@ -6257,219 +6257,219 @@ msgstr ""
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1462
+#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1464
#: nova/virt/xenapi/vm_utils.py:476
#, python-format
msgid "block_device_list %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:332
+#: nova/virt/libvirt/driver.py:334
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:338
+#: nova/virt/libvirt/driver.py:340
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:359
+#: nova/virt/libvirt/driver.py:361
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:381 nova/virt/libvirt/driver.py:384
+#: nova/virt/libvirt/driver.py:383 nova/virt/libvirt/driver.py:386
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:466
+#: nova/virt/libvirt/driver.py:468
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:480
+#: nova/virt/libvirt/driver.py:482
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:485
+#: nova/virt/libvirt/driver.py:487
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:507
+#: nova/virt/libvirt/driver.py:509
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:522
+#: nova/virt/libvirt/driver.py:524
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:535
+#: nova/virt/libvirt/driver.py:537
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:552
+#: nova/virt/libvirt/driver.py:554
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:566
+#: nova/virt/libvirt/driver.py:568
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:728
+#: nova/virt/libvirt/driver.py:730
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:738
+#: nova/virt/libvirt/driver.py:740
msgid "attaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:751
+#: nova/virt/libvirt/driver.py:753
msgid "detaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:883
+#: nova/virt/libvirt/driver.py:885
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:887
+#: nova/virt/libvirt/driver.py:889
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:919
+#: nova/virt/libvirt/driver.py:921
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:954
+#: nova/virt/libvirt/driver.py:956
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1084
+#: nova/virt/libvirt/driver.py:1086
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1091 nova/virt/powervm/operator.py:253
+#: nova/virt/libvirt/driver.py:1093 nova/virt/powervm/operator.py:253
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1107
+#: nova/virt/libvirt/driver.py:1109
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1153
+#: nova/virt/libvirt/driver.py:1155
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1197
+#: nova/virt/libvirt/driver.py:1199
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1201
+#: nova/virt/libvirt/driver.py:1203
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1205 nova/virt/libvirt/driver.py:1209
+#: nova/virt/libvirt/driver.py:1207 nova/virt/libvirt/driver.py:1211
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1275
+#: nova/virt/libvirt/driver.py:1277
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1401
+#: nova/virt/libvirt/driver.py:1403
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:1411
+#: nova/virt/libvirt/driver.py:1413
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1425
+#: nova/virt/libvirt/driver.py:1427
#, python-format
msgid "Injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1435
+#: nova/virt/libvirt/driver.py:1437
#, python-format
msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1509
+#: nova/virt/libvirt/driver.py:1511
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1515
+#: nova/virt/libvirt/driver.py:1517
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1519
+#: nova/virt/libvirt/driver.py:1521
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1523
+#: nova/virt/libvirt/driver.py:1525
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1539
+#: nova/virt/libvirt/driver.py:1541
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1831
+#: nova/virt/libvirt/driver.py:1833
msgid "Starting toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1835
+#: nova/virt/libvirt/driver.py:1837
msgid "Finished toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1852
+#: nova/virt/libvirt/driver.py:1854
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2104
+#: nova/virt/libvirt/driver.py:2106
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2291
+#: nova/virt/libvirt/driver.py:2293
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2299
+#: nova/virt/libvirt/driver.py:2301
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2334
+#: nova/virt/libvirt/driver.py:2336
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2354
+#: nova/virt/libvirt/driver.py:2356
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2366
+#: nova/virt/libvirt/driver.py:2368
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -6479,51 +6479,51 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2383
+#: nova/virt/libvirt/driver.py:2385
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2431
+#: nova/virt/libvirt/driver.py:2433
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2501
+#: nova/virt/libvirt/driver.py:2503
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2545
+#: nova/virt/libvirt/driver.py:2547
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2672
+#: nova/virt/libvirt/driver.py:2674
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2721
+#: nova/virt/libvirt/driver.py:2723
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2783
+#: nova/virt/libvirt/driver.py:2785
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:2842
+#: nova/virt/libvirt/driver.py:2844
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2849
+#: nova/virt/libvirt/driver.py:2851
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:2900
+#: nova/virt/libvirt/driver.py:2902
msgid "Starting finish_revert_migration"
msgstr ""
@@ -6564,127 +6564,127 @@ msgstr ""
msgid "Unknown image_type=%s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:164
+#: nova/virt/libvirt/imagecache.py:166
#, python-format
msgid "%s is a valid instance name"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:167
+#: nova/virt/libvirt/imagecache.py:169
#, python-format
msgid "%s has a disk file"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:169
+#: nova/virt/libvirt/imagecache.py:171
#, python-format
msgid "Instance %(instance)s is backed by %(backing)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:182
+#: nova/virt/libvirt/imagecache.py:184
#, python-format
msgid ""
"Instance %(instance)s is using a backing file %(backing)s which does not "
"appear in the image service"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:234
+#: nova/virt/libvirt/imagecache.py:239
#, python-format
msgid "%(id)s (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:244
+#: nova/virt/libvirt/imagecache.py:249
#, python-format
msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:263
+#: nova/virt/libvirt/imagecache.py:268
#, python-format
msgid "Cannot remove %(base_file)s, it does not exist"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:275
+#: nova/virt/libvirt/imagecache.py:280
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:278
+#: nova/virt/libvirt/imagecache.py:283
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:285
+#: nova/virt/libvirt/imagecache.py:290
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:296
+#: nova/virt/libvirt/imagecache.py:301
#, python-format
msgid "%(id)s (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:315
+#: nova/virt/libvirt/imagecache.py:320
#, python-format
msgid ""
"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d "
"on other nodes"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:327
+#: nova/virt/libvirt/imagecache.py:332
#, python-format
msgid ""
"%(id)s (%(base_file)s): warning -- an absent base file is in use! "
"instances: %(instance_list)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:335
+#: nova/virt/libvirt/imagecache.py:340
#, python-format
msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:345
+#: nova/virt/libvirt/imagecache.py:350
#, python-format
msgid "%(id)s (%(base_file)s): image is not in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:351
+#: nova/virt/libvirt/imagecache.py:356
#, python-format
msgid "%(id)s (%(base_file)s): image is in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:374
+#: nova/virt/libvirt/imagecache.py:379
#, python-format
msgid "Skipping verification, no base directory at %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:378
+#: nova/virt/libvirt/imagecache.py:383
msgid "Verify base images"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:385
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "Image id %(id)s yields fingerprint %(fingerprint)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:403
+#: nova/virt/libvirt/imagecache.py:408
#, python-format
msgid "Unknown base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:408
+#: nova/virt/libvirt/imagecache.py:413
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:411
+#: nova/virt/libvirt/imagecache.py:416
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:415
+#: nova/virt/libvirt/imagecache.py:420
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:423
+#: nova/virt/libvirt/imagecache.py:428
msgid "Verification complete"
msgstr ""
@@ -6693,14 +6693,14 @@ msgstr ""
msgid "LVM snapshots not implemented"
msgstr ""
-#: nova/virt/libvirt/utils.py:131
+#: nova/virt/libvirt/utils.py:134
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
msgstr ""
-#: nova/virt/libvirt/utils.py:140
+#: nova/virt/libvirt/utils.py:143
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. "
@@ -6708,68 +6708,68 @@ msgid ""
"%(free_space)db."
msgstr ""
-#: nova/virt/libvirt/utils.py:187
+#: nova/virt/libvirt/utils.py:190
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:406
+#: nova/virt/libvirt/utils.py:409
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/utils.py:495
+#: nova/virt/libvirt/utils.py:498
#, python-format
msgid "Reading image info file: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:499
+#: nova/virt/libvirt/utils.py:502
#, python-format
msgid "Read: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:505
+#: nova/virt/libvirt/utils.py:508
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/utils.py:529
+#: nova/virt/libvirt/utils.py:532
#, python-format
msgid "Writing image info file: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:533
#, python-format
msgid "Wrote: %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:95
+#: nova/virt/libvirt/vif.py:97
#, python-format
msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:105
+#: nova/virt/libvirt/vif.py:107
#, python-format
msgid "Ensuring bridge %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:181 nova/virt/libvirt/vif.py:247
-#: nova/virt/libvirt/vif.py:307
+#: nova/virt/libvirt/vif.py:183 nova/virt/libvirt/vif.py:249
+#: nova/virt/libvirt/vif.py:309
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/volume.py:174
+#: nova/virt/libvirt/volume.py:176
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:177
+#: nova/virt/libvirt/volume.py:179
#, python-format
msgid ""
"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. "
"Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:189
+#: nova/virt/libvirt/volume.py:191
#, python-format
msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
msgstr ""
@@ -7339,103 +7339,103 @@ msgstr ""
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:80 nova/virt/xenapi/vmops.py:1471
+#: nova/virt/xenapi/agent.py:85 nova/virt/xenapi/vmops.py:1491
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1475
+#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1495
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1480
+#: nova/virt/xenapi/agent.py:94 nova/virt/xenapi/vmops.py:1500
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:99
+#: nova/virt/xenapi/agent.py:104
#, python-format
msgid ""
"The agent call to %(method)s returned an invalid response: %(ret)r. "
"path=%(path)s; args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:109
+#: nova/virt/xenapi/agent.py:114
#, python-format
msgid "Failed to query agent version: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:127
+#: nova/virt/xenapi/agent.py:132
msgid "Querying agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:141
+#: nova/virt/xenapi/agent.py:146
msgid "Reached maximum time attempting to query agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:149
+#: nova/virt/xenapi/agent.py:154
#, python-format
msgid "Updating agent to %s"
msgstr ""
-#: nova/virt/xenapi/agent.py:157
+#: nova/virt/xenapi/agent.py:162
#, python-format
msgid "Failed to update agent: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:171
+#: nova/virt/xenapi/agent.py:176
msgid "Setting admin password"
msgstr ""
-#: nova/virt/xenapi/agent.py:182
+#: nova/virt/xenapi/agent.py:187
#, python-format
msgid "Failed to exchange keys: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:202
+#: nova/virt/xenapi/agent.py:207
#, python-format
msgid "Failed to update password: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:209
+#: nova/virt/xenapi/agent.py:214
#, python-format
msgid "Injecting file path: %r"
msgstr ""
-#: nova/virt/xenapi/agent.py:222
+#: nova/virt/xenapi/agent.py:227
#, python-format
msgid "Failed to inject file: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:229
+#: nova/virt/xenapi/agent.py:234
msgid "Resetting network"
msgstr ""
-#: nova/virt/xenapi/agent.py:235
+#: nova/virt/xenapi/agent.py:240
#, python-format
msgid "Failed to reset network: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:255
+#: nova/virt/xenapi/agent.py:263
msgid ""
"XenServer tools installed in this image are capable of network injection."
" Networking files will not bemanipulated"
msgstr ""
-#: nova/virt/xenapi/agent.py:263
+#: nova/virt/xenapi/agent.py:271
msgid ""
"XenServer tools are present in this image but are not capable of network "
"injection"
msgstr ""
-#: nova/virt/xenapi/agent.py:267
+#: nova/virt/xenapi/agent.py:275
msgid "XenServer tools are not installed in this image"
msgstr ""
-#: nova/virt/xenapi/agent.py:319
+#: nova/virt/xenapi/agent.py:327
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
@@ -7451,24 +7451,24 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:363
+#: nova/virt/xenapi/driver.py:360
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:574
+#: nova/virt/xenapi/driver.py:571
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:626
+#: nova/virt/xenapi/driver.py:623
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:664
+#: nova/virt/xenapi/driver.py:661
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:748 nova/virt/xenapi/driver.py:762
+#: nova/virt/xenapi/driver.py:745 nova/virt/xenapi/driver.py:759
#, python-format
msgid "Got exception: %s"
msgstr ""
@@ -8004,189 +8004,193 @@ msgstr ""
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:664
+#: nova/virt/xenapi/vmops.py:128 nova/virt/xenapi/vmops.py:674
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:229
+#: nova/virt/xenapi/vmops.py:169
+msgid "Error: Agent is disabled"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:237
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:297
+#: nova/virt/xenapi/vmops.py:305
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:369
+#: nova/virt/xenapi/vmops.py:377
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:400
+#: nova/virt/xenapi/vmops.py:408
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:473
+#: nova/virt/xenapi/vmops.py:481
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:490
+#: nova/virt/xenapi/vmops.py:498
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:516
+#: nova/virt/xenapi/vmops.py:524
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:523
+#: nova/virt/xenapi/vmops.py:530
+msgid "Waiting for instance state to become running"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:544
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:526
+#: nova/virt/xenapi/vmops.py:547
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:533
-msgid "Waiting for instance state to become running"
-msgstr ""
-
-#: nova/virt/xenapi/vmops.py:548
+#: nova/virt/xenapi/vmops.py:558
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:575
+#: nova/virt/xenapi/vmops.py:585
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:583
+#: nova/virt/xenapi/vmops.py:593
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:633
+#: nova/virt/xenapi/vmops.py:643
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:637
+#: nova/virt/xenapi/vmops.py:647
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:645
+#: nova/virt/xenapi/vmops.py:655
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:682
+#: nova/virt/xenapi/vmops.py:692
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:806
+#: nova/virt/xenapi/vmops.py:816
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:811
+#: nova/virt/xenapi/vmops.py:821
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:855
+#: nova/virt/xenapi/vmops.py:865
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:940
+#: nova/virt/xenapi/vmops.py:956
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:966
+#: nova/virt/xenapi/vmops.py:982
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:993
+#: nova/virt/xenapi/vmops.py:1009
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1000
+#: nova/virt/xenapi/vmops.py:1016
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1007
+#: nova/virt/xenapi/vmops.py:1023
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1033
+#: nova/virt/xenapi/vmops.py:1049
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1059
+#: nova/virt/xenapi/vmops.py:1075
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1110
+#: nova/virt/xenapi/vmops.py:1126
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1144
+#: nova/virt/xenapi/vmops.py:1160
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1192
+#: nova/virt/xenapi/vmops.py:1209
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1196
+#: nova/virt/xenapi/vmops.py:1213
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1295
+#: nova/virt/xenapi/vmops.py:1312
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1378
+#: nova/virt/xenapi/vmops.py:1395
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1397
+#: nova/virt/xenapi/vmops.py:1414
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1406
+#: nova/virt/xenapi/vmops.py:1423
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1409
+#: nova/virt/xenapi/vmops.py:1426
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1434
+#: nova/virt/xenapi/vmops.py:1454
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1530
+#: nova/virt/xenapi/vmops.py:1550
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1562
+#: nova/virt/xenapi/vmops.py:1582
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1610
+#: nova/virt/xenapi/vmops.py:1630
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1646
+#: nova/virt/xenapi/vmops.py:1666
msgid "Migrate Send failed"
msgstr ""
diff --git a/nova/network/api.py b/nova/network/api.py
index 4d6360aba..46a7e0360 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -351,6 +351,7 @@ class API(base.Base):
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
+ args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@@ -368,5 +369,6 @@ class API(base.Base):
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
+ args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index e27755e26..d032fe159 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -70,6 +70,7 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import utils
@@ -943,7 +944,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# NOTE(francois.charlier): the instance may have been deleted already
# thus enabling `read_deleted`
admin_context = context.get_admin_context(read_deleted='yes')
- if utils.is_uuid_like(instance_id):
+ if uuidutils.is_uuid_like(instance_id):
instance_ref = self.db.instance_get_by_uuid(admin_context,
instance_id)
else:
@@ -1277,7 +1278,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to an instance from specified network."""
- if utils.is_uuid_like(network_id):
+ if uuidutils.is_uuid_like(network_id):
network = self.get_network(context, network_id)
else:
network = self._get_network_by_id(context, network_id)
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 62291569a..8c2438669 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -27,7 +27,7 @@ from nova.network import quantumv2
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova import utils
+from nova.openstack.common import uuidutils
quantum_opts = [
@@ -90,6 +90,11 @@ class API(base.Base):
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
+ _ensure_requested_network_ordering(
+ lambda x: x['id'],
+ nets,
+ net_ids)
+
return nets
def allocate_for_instance(self, context, instance, **kwargs):
@@ -437,7 +442,7 @@ class API(base.Base):
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
- if utils.is_uuid_like(name_or_id):
+ if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
@@ -532,6 +537,13 @@ class API(base.Base):
if not networks:
networks = self._get_available_networks(context,
instance['project_id'])
+ else:
+ # ensure ports are in preferred network order
+ _ensure_requested_network_ordering(
+ lambda x: x['network_id'],
+ ports,
+ [n['id'] for n in networks])
+
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
@@ -645,3 +657,9 @@ class API(base.Base):
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
+
+
+def _ensure_requested_network_ordering(accessor, unordered, preferred):
+ """Sort a list with respect to the preferred network ordering."""
+ if preferred:
+ unordered.sort(key=lambda i: preferred.index(accessor(i)))
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index 1fe822ebd..eba0aae52 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -267,7 +267,11 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
- floating_addresses):
+ floating_addresses, host=None):
+ if host is not None:
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ else:
+ topic = self.topic
return self.call(ctxt, self.make_msg(
'migrate_instance_start',
instance_uuid=instance_uuid,
@@ -276,13 +280,16 @@ class NetworkAPI(rpc_proxy.RpcProxy):
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses),
- topic=rpc.queue_get_for(ctxt, self.topic,
- dest_compute),
- version='1.2')
+ topic=topic,
+ version='1.2')
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
- floating_addresses):
+ floating_addresses, host=None):
+ if host is not None:
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ else:
+ topic = self.topic
return self.call(ctxt, self.make_msg(
'migrate_instance_finish',
instance_uuid=instance_uuid,
@@ -291,6 +298,5 @@ class NetworkAPI(rpc_proxy.RpcProxy):
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses),
- topic=rpc.queue_get_for(ctxt, self.topic,
- dest_compute),
- version='1.2')
+ topic=topic,
+ version='1.2')
diff --git a/nova/openstack/common/uuidutils.py b/nova/openstack/common/uuidutils.py
new file mode 100644
index 000000000..51042a798
--- /dev/null
+++ b/nova/openstack/common/uuidutils.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Intel Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+UUID related utilities and helper functions.
+"""
+
+import uuid
+
+
+def is_uuid_like(val):
+ """Returns validation of a value as a UUID.
+
+ For our purposes, a UUID is a canonical form string:
+ aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
+
+ """
+ try:
+ return str(uuid.UUID(val)) == val
+ except (TypeError, ValueError, AttributeError):
+ return False
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 6d6288d83..1a608da29 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -23,11 +23,12 @@ Chance (Random) Scheduler implementation
import random
+from nova import config
from nova import exception
from nova import flags
from nova.scheduler import driver
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ChanceScheduler(driver.Scheduler):
@@ -65,7 +66,7 @@ class ChanceScheduler(driver.Scheduler):
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
try:
- host = self._schedule(context, FLAGS.compute_topic,
+ host = self._schedule(context, CONF.compute_topic,
request_spec, filter_properties)
updated_instance = driver.instance_update_db(context,
instance_uuid)
@@ -88,7 +89,7 @@ class ChanceScheduler(driver.Scheduler):
filter_properties, instance, instance_type,
reservations):
"""Select a target for resize."""
- host = self._schedule(context, FLAGS.compute_topic, request_spec,
+ host = self._schedule(context, CONF.compute_topic, request_spec,
filter_properties)
self.compute_rpcapi.prep_resize(context, image, instance,
instance_type, host, reservations)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 13f0029ee..f93268906 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -53,8 +53,8 @@ scheduler_driver_opts = [
help='Maximum number of attempts to schedule an instance'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(scheduler_driver_opts)
+CONF = config.CONF
+CONF.register_opts(scheduler_driver_opts)
CONF = config.CONF
CONF.import_opt('instances_path', 'nova.compute.manager')
@@ -107,7 +107,7 @@ def cast_to_compute_host(context, host, method, **kwargs):
instance_update_db(context, instance_uuid)
rpc.cast(context,
- rpc.queue_get_for(context, FLAGS.compute_topic, host),
+ rpc.queue_get_for(context, CONF.compute_topic, host),
{"method": method, "args": kwargs})
LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
@@ -115,7 +115,7 @@ def cast_to_compute_host(context, host, method, **kwargs):
def cast_to_host(context, topic, host, method, **kwargs):
"""Generic cast to host"""
- topic_mapping = {FLAGS.compute_topic: cast_to_compute_host}
+ topic_mapping = {CONF.compute_topic: cast_to_compute_host}
func = topic_mapping.get(topic)
if func:
@@ -151,7 +151,7 @@ class Scheduler(object):
def __init__(self):
self.host_manager = importutils.import_object(
- FLAGS.scheduler_host_manager)
+ CONF.scheduler_host_manager)
self.compute_api = compute_api.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 78bd49a96..c43e48876 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -21,6 +21,7 @@ Weighing Functions.
import operator
+from nova import config
from nova import exception
from nova import flags
from nova.openstack.common import importutils
@@ -30,8 +31,7 @@ from nova.scheduler import driver
from nova.scheduler import least_cost
from nova.scheduler import scheduler_options
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -61,7 +61,7 @@ class FilterScheduler(driver.Scheduler):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)
- weighted_hosts = self._schedule(context, FLAGS.compute_topic,
+ weighted_hosts = self._schedule(context, CONF.compute_topic,
request_spec, filter_properties,
instance_uuids)
@@ -108,7 +108,7 @@ class FilterScheduler(driver.Scheduler):
the prep_resize operation to it.
"""
- hosts = self._schedule(context, FLAGS.compute_topic, request_spec,
+ hosts = self._schedule(context, CONF.compute_topic, request_spec,
filter_properties, [instance['uuid']])
if not hosts:
raise exception.NoValidHost(reason="")
@@ -187,7 +187,7 @@ class FilterScheduler(driver.Scheduler):
filter_properties['os_type'] = os_type
def _max_attempts(self):
- max_attempts = FLAGS.scheduler_max_attempts
+ max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.NovaException(_("Invalid value for "
"'scheduler_max_attempts', must be >= 1"))
@@ -226,7 +226,7 @@ class FilterScheduler(driver.Scheduler):
ordered by their fitness.
"""
elevated = context.elevated()
- if topic != FLAGS.compute_topic:
+ if topic != CONF.compute_topic:
msg = _("Scheduler only understands Compute nodes (for now)")
raise NotImplementedError(msg)
@@ -306,12 +306,12 @@ class FilterScheduler(driver.Scheduler):
"""
if topic is None:
# Schedulers only support compute right now.
- topic = FLAGS.compute_topic
+ topic = CONF.compute_topic
if topic in self.cost_function_cache:
return self.cost_function_cache[topic]
cost_fns = []
- for cost_fn_str in FLAGS.least_cost_functions:
+ for cost_fn_str in CONF.least_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
@@ -333,7 +333,7 @@ class FilterScheduler(driver.Scheduler):
try:
flag_name = "%s_weight" % cost_fn.__name__
- weight = getattr(FLAGS, flag_name)
+ weight = getattr(CONF, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 4af71c7b4..202f8232a 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -13,12 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -29,7 +30,7 @@ class ComputeFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Returns True for only active compute nodes"""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != FLAGS.compute_topic or not instance_type:
+ if host_state.topic != CONF.compute_topic or not instance_type:
return True
capabilities = host_state.capabilities
service = host_state.service
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 0c4a67dcc..9c93df930 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -27,8 +28,8 @@ cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
default=16.0,
help='Virtual CPU to Physical CPU allocation ratio')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cpu_allocation_ratio_opt)
+CONF = config.CONF
+CONF.register_opt(cpu_allocation_ratio_opt)
class CoreFilter(filters.BaseHostFilter):
@@ -37,7 +38,7 @@ class CoreFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != FLAGS.compute_topic or not instance_type:
+ if host_state.topic != CONF.compute_topic or not instance_type:
return True
if not host_state.vcpus_total:
@@ -46,7 +47,7 @@ class CoreFilter(filters.BaseHostFilter):
return True
instance_vcpus = instance_type['vcpus']
- vcpus_total = host_state.vcpus_total * FLAGS.cpu_allocation_ratio
+ vcpus_total = host_state.vcpus_total * CONF.cpu_allocation_ratio
# Only provide a VCPU limit to compute if the virt driver is reporting
# an accurate count of installed VCPUs. (XenServer driver does not)
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index 88b8c3377..358583b8a 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -23,8 +24,8 @@ LOG = logging.getLogger(__name__)
disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio", default=1.0,
help="virtual disk to physical disk allocation ratio")
-FLAGS = flags.FLAGS
-FLAGS.register_opt(disk_allocation_ratio_opt)
+CONF = config.CONF
+CONF.register_opt(disk_allocation_ratio_opt)
class DiskFilter(filters.BaseHostFilter):
@@ -39,7 +40,7 @@ class DiskFilter(filters.BaseHostFilter):
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
- disk_mb_limit = total_usable_disk_mb * FLAGS.disk_allocation_ratio
+ disk_mb_limit = total_usable_disk_mb * CONF.disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index c2e0205a3..1b40bae62 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -24,8 +25,8 @@ max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="Ignore hosts that have too many builds/resizes/snaps/migrations")
-FLAGS = flags.FLAGS
-FLAGS.register_opt(max_io_ops_per_host_opt)
+CONF = config.CONF
+CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
@@ -36,7 +37,7 @@ class IoOpsFilter(filters.BaseHostFilter):
compute node statistics to decide whether to filter.
"""
num_io_ops = host_state.num_io_ops
- max_io_ops = FLAGS.max_io_ops_per_host
+ max_io_ops = CONF.max_io_ops_per_host
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug(_("%(host_state)s fails I/O ops check: Max IOs per host "
diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py
index 0d67b7b80..d1d16b80b 100644
--- a/nova/scheduler/filters/isolated_hosts_filter.py
+++ b/nova/scheduler/filters/isolated_hosts_filter.py
@@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-
+from nova import config
from nova import flags
from nova.scheduler import filters
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class IsolatedHostsFilter(filters.BaseHostFilter):
@@ -28,6 +27,6 @@ class IsolatedHostsFilter(filters.BaseHostFilter):
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})
image_ref = props.get('image_ref')
- image_isolated = image_ref in FLAGS.isolated_images
- host_isolated = host_state.host in FLAGS.isolated_hosts
+ image_isolated = image_ref in CONF.isolated_images
+ host_isolated = host_state.host in CONF.isolated_hosts
return image_isolated == host_isolated
diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py
index e96539c0c..17c7ebc22 100644
--- a/nova/scheduler/filters/num_instances_filter.py
+++ b/nova/scheduler/filters/num_instances_filter.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -24,8 +25,8 @@ max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
default=50,
help="Ignore hosts that have too many instances")
-FLAGS = flags.FLAGS
-FLAGS.register_opt(max_instances_per_host_opt)
+CONF = config.CONF
+CONF.register_opt(max_instances_per_host_opt)
class NumInstancesFilter(filters.BaseHostFilter):
@@ -33,7 +34,7 @@ class NumInstancesFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
num_instances = host_state.num_instances
- max_instances = FLAGS.max_instances_per_host
+ max_instances = CONF.max_instances_per_host
passes = num_instances < max_instances
if not passes:
LOG.debug(_("%(host_state)s fails num_instances check: Max "
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index 22ba0252c..85370dc2c 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -25,8 +26,8 @@ ram_allocation_ratio_opt = cfg.FloatOpt("ram_allocation_ratio",
default=1.5,
help="virtual ram to physical ram allocation ratio")
-FLAGS = flags.FLAGS
-FLAGS.register_opt(ram_allocation_ratio_opt)
+CONF = config.CONF
+CONF.register_opt(ram_allocation_ratio_opt)
class RamFilter(filters.BaseHostFilter):
@@ -39,7 +40,7 @@ class RamFilter(filters.BaseHostFilter):
free_ram_mb = host_state.free_ram_mb
total_usable_ram_mb = host_state.total_usable_ram_mb
- memory_mb_limit = total_usable_ram_mb * FLAGS.ram_allocation_ratio
+ memory_mb_limit = total_usable_ram_mb * CONF.ram_allocation_ratio
used_ram_mb = total_usable_ram_mb - free_ram_mb
usable_ram = memory_mb_limit - used_ram_mb
if not usable_ram >= requested_ram:
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 69968a766..4fd0488d9 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -48,6 +48,7 @@ import httplib
import socket
import ssl
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
@@ -81,10 +82,10 @@ trusted_opts = [
help='attestation authorization blob - must change'),
]
-FLAGS = flags.FLAGS
+CONF = config.CONF
trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters')
-FLAGS.register_group(trust_group)
-FLAGS.register_opts(trusted_opts, group='trusted_computing')
+CONF.register_group(trust_group)
+CONF.register_opts(trusted_opts, group=trust_group)
class HTTPSClientAuthConnection(httplib.HTTPSConnection):
@@ -124,13 +125,13 @@ class AttestationService(httplib.HTTPSConnection):
# Provide access wrapper to attestation server to get integrity report.
def __init__(self):
- self.api_url = FLAGS.trusted_computing.attestation_api_url
- self.host = FLAGS.trusted_computing.attestation_server
- self.port = FLAGS.trusted_computing.attestation_port
- self.auth_blob = FLAGS.trusted_computing.attestation_auth_blob
+ self.api_url = CONF.trusted_computing.attestation_api_url
+ self.host = CONF.trusted_computing.attestation_server
+ self.port = CONF.trusted_computing.attestation_port
+ self.auth_blob = CONF.trusted_computing.attestation_auth_blob
self.key_file = None
self.cert_file = None
- self.ca_file = FLAGS.trusted_computing.attestation_server_ca_file
+ self.ca_file = CONF.trusted_computing.attestation_server_ca_file
self.request_count = 100
def _do_request(self, method, action_url, body, headers):
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index fc9f3c8cc..91e16ad34 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -21,6 +21,7 @@ import UserDict
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import db
from nova import exception
from nova import flags
@@ -49,8 +50,8 @@ host_manager_opts = [
'when not specified in the request.'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(host_manager_opts)
+CONF = config.CONF
+CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
@@ -281,7 +282,7 @@ class HostManager(object):
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
self.host_state_map = {}
self.filter_classes = filters.get_filter_classes(
- FLAGS.scheduler_available_filters)
+ CONF.scheduler_available_filters)
def _choose_host_filters(self, filters):
"""Since the caller may specify which filters to use we need
@@ -290,7 +291,7 @@ class HostManager(object):
of acceptable filters.
"""
if filters is None:
- filters = FLAGS.scheduler_default_filters
+ filters = CONF.scheduler_default_filters
if not isinstance(filters, (list, tuple)):
filters = [filters]
good_filters = []
@@ -347,7 +348,7 @@ class HostManager(object):
with the instance (in case the InstanceType changed since the
instance was created)."""
- if topic != FLAGS.compute_topic:
+ if topic != CONF.compute_topic:
raise NotImplementedError(_(
"host_manager only implemented for 'compute'"))
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index bccdd0a4f..d3eaee735 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -22,6 +22,7 @@ The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
"""
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -45,8 +46,8 @@ least_cost_opts = [
'e.g. spread-first'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(least_cost_opts)
+CONF = config.CONF
+CONF.register_opts(least_cost_opts)
# TODO(sirp): Once we have enough of these rules, we can break them out into a
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 531c54726..77f5a0259 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -26,6 +26,7 @@ import sys
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import config
import nova.context
from nova import db
from nova import exception
@@ -46,8 +47,8 @@ scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='nova.scheduler.filter_scheduler.FilterScheduler',
help='Default driver to use for the scheduler')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(scheduler_driver_opt)
+CONF = config.CONF
+CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
@@ -59,7 +60,7 @@ class SchedulerManager(manager.Manager):
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
- scheduler_driver = FLAGS.scheduler_driver
+ scheduler_driver = CONF.scheduler_driver
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index 13e3c0e1a..7c68bb12a 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -27,6 +27,7 @@ schedule requests to compute nodes but provide their own manager and topic.
https://bugs.launchpad.net/nova/+bug/1009681
"""
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -43,8 +44,8 @@ multi_scheduler_opts = [
help='Default driver to use for scheduling calls'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(multi_scheduler_opts)
+CONF = config.CONF
+CONF.register_opts(multi_scheduler_opts)
class MultiScheduler(driver.Scheduler):
@@ -58,9 +59,9 @@ class MultiScheduler(driver.Scheduler):
def __init__(self):
super(MultiScheduler, self).__init__()
compute_driver = importutils.import_object(
- FLAGS.compute_scheduler_driver)
+ CONF.compute_scheduler_driver)
default_driver = importutils.import_object(
- FLAGS.default_scheduler_driver)
+ CONF.default_scheduler_driver)
self.drivers = {'compute': compute_driver,
'default': default_driver}
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index b41668733..cbb6712db 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -18,12 +18,12 @@
Client side of the scheduler manager RPC API.
"""
+from nova import config
from nova import flags
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -60,7 +60,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
BASE_RPC_API_VERSION = '2.0'
def __init__(self):
- super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
+ super(SchedulerAPI, self).__init__(topic=CONF.scheduler_topic,
default_version=self.BASE_RPC_API_VERSION)
def run_instance(self, ctxt, request_spec, admin_password,
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
index 7acf2f750..e8be0070b 100644
--- a/nova/scheduler/scheduler_options.py
+++ b/nova/scheduler/scheduler_options.py
@@ -26,6 +26,7 @@ import datetime
import json
import os
+from nova import config
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -37,8 +38,8 @@ scheduler_json_config_location_opt = cfg.StrOpt(
default='',
help='Absolute path to scheduler configuration JSON file.')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(scheduler_json_config_location_opt)
+CONF = config.CONF
+CONF.register_opt(scheduler_json_config_location_opt)
LOG = logging.getLogger(__name__)
@@ -86,7 +87,7 @@ class SchedulerOptions(object):
def get_configuration(self, filename=None):
"""Check the json file for changes and load it if needed."""
if not filename:
- filename = FLAGS.scheduler_json_config_location
+ filename = CONF.scheduler_json_config_location
if not filename:
return self.data
if self.last_checked:
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 49ee9c152..13e854077 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -114,8 +114,7 @@ class CinderCloudTestCase(test.TestCase):
# set up our cloud
self.cloud = cloud.CloudController()
- self.flags(compute_scheduler_driver='nova.scheduler.'
- 'chance.ChanceScheduler')
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# set up services
self.compute = self.start_service('compute')
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index a6995945b..95003ee87 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -127,8 +127,7 @@ class CloudTestCase(test.TestCase):
# set up our cloud
self.cloud = cloud.CloudController()
- self.flags(compute_scheduler_driver='nova.scheduler.'
- 'chance.ChanceScheduler')
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# set up services
self.compute = self.start_service('compute')
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index c3f0b5e33..0e7148a9a 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -209,6 +209,8 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(network_api.API, 'allocate_for_instance',
fake_get_nw_info)
self.compute_api = compute.API()
+ # Just to make long lines short
+ self.rt = self.compute.resource_tracker
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -307,20 +309,17 @@ class ComputeTestCase(BaseTestCase):
def test_create_instance_unlimited_memory(self):
"""Default of memory limit=None is unlimited"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(999999999999,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
+ self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
@@ -330,26 +329,21 @@ class ComputeTestCase(BaseTestCase):
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEquals(1024,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
- self.assertEquals(256,
- self.compute.resource_tracker.compute_node['local_gb_used'])
+ self.assertEquals(1024, self.rt.compute_node['memory_mb_used'])
+ self.assertEquals(256, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEquals(3072,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
- self.assertEquals(768,
- self.compute.resource_tracker.compute_node['local_gb_used'])
+ self.assertEquals(3072, self.rt.compute_node['memory_mb_used'])
+ self.assertEquals(768, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance(params)
@@ -361,8 +355,7 @@ class ComputeTestCase(BaseTestCase):
"""Test passing of oversubscribed ram policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -382,16 +375,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(instance_mb,
- self.compute.resource_tracker.compute_node['memory_mb_used'])
+ self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -416,8 +407,7 @@ class ComputeTestCase(BaseTestCase):
"""Test passing of oversubscribed cpu policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
@@ -433,8 +423,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(2,
- self.compute.resource_tracker.compute_node['vcpus_used'])
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
@@ -443,16 +432,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(3,
- self.compute.resource_tracker.compute_node['vcpus_used'])
+ self.assertEqual(3, self.rt.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
- self.compute.resource_tracker.update_usage(self.context,
+ self.rt.update_usage(self.context,
instance=instance)
- self.assertEqual(2,
- self.compute.resource_tracker.compute_node['vcpus_used'])
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
@@ -469,8 +456,7 @@ class ComputeTestCase(BaseTestCase):
"""Test passing of oversubscribed disk policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -489,16 +475,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
- self.assertEqual(instance_gb,
- self.compute.resource_tracker.compute_node['local_gb_used'])
+ self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.compute.resource_tracker.update_available_resource(
- self.context.elevated())
+ self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
@@ -2004,8 +1988,8 @@ class ComputeTestCase(BaseTestCase):
fake_setup_networks_on_host)
self.compute.finish_revert_resize(self.context,
- migration_id=migration_ref['id'], instance=rpcinst,
- reservations=reservations)
+ migration=jsonutils.to_primitive(migration_ref),
+ instance=rpcinst, reservations=reservations)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
@@ -2429,6 +2413,35 @@ class ComputeTestCase(BaseTestCase):
NotImplementedError('test'),
exc_info)
+ def test_add_instance_fault_with_remote_error(self):
+ exc_info = None
+ instance_uuid = str(utils.gen_uuid())
+
+ def fake_db_fault_create(ctxt, values):
+ self.assertTrue(values['details'].startswith('Remote error'))
+ self.assertTrue('raise rpc_common.RemoteError'
+ in values['details'])
+ del values['details']
+
+ expected = {
+ 'code': 500,
+ 'instance_uuid': instance_uuid,
+ 'message': 'My Test Message'
+ }
+ self.assertEquals(expected, values)
+
+ try:
+ raise rpc_common.RemoteError('test', 'My Test Message')
+ except rpc_common.RemoteError as exc:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
+ exc,
+ exc_info)
+
def test_add_instance_fault_user_error(self):
exc_info = None
instance_uuid = str(utils.gen_uuid())
@@ -3168,7 +3181,7 @@ class ComputeAPITestCase(BaseTestCase):
def dummy(*args, **kwargs):
self.network_api_called = True
- pass
+
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
dummy)
@@ -3184,10 +3197,13 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], None)
self.assertTrue(self.network_api_called)
- #local delete, so db should be clean
- self.assertRaises(exception.InstanceNotFound, db.instance_destroy,
- self.context,
- instance['uuid'])
+ # fetch the instance state from db and verify deletion.
+ deleted_context = context.RequestContext('fake', 'fake',
+ read_deleted='yes')
+ instance = db.instance_get_by_uuid(deleted_context, instance_uuid)
+ self.assertEqual(instance['vm_state'], vm_states.DELETED)
+ self.assertEqual(instance['task_state'], None)
+ self.assertTrue(instance['deleted'])
def test_repeated_delete_quota(self):
in_use = {'instances': 1}
@@ -5087,7 +5103,8 @@ class ComputeAggrTestCase(BaseTestCase):
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
- self.compute.add_aggregate_host(self.context, self.aggr.id, "host")
+ self.compute.add_aggregate_host(self.context, "host",
+ aggregate=jsonutils.to_primitive(self.aggr))
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
@@ -5105,15 +5122,16 @@ class ComputeAggrTestCase(BaseTestCase):
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
- self.assertEquals(aggregate.id, self.aggr.id)
+ self.assertEquals(aggregate['id'], self.aggr.id)
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
- self.compute.add_aggregate_host(self.context, self.aggr.id,
- "the_host", slave_info="SLAVE_INFO")
+ self.compute.add_aggregate_host(self.context, "the_host",
+ slave_info="SLAVE_INFO",
+ aggregate=jsonutils.to_primitive(self.aggr))
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
@@ -5236,6 +5254,23 @@ class ComputePolicyTestCase(BaseTestCase):
self.compute_api.get_instance_faults,
self.context, instances)
+ def test_force_host_fail(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": [["role:fake"]]}
+ self._set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, None, '1',
+ availability_zone='1:1')
+
+ def test_force_host_pass(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": []}
+ self._set_rules(rules)
+
+ self.compute_api.create(self.context, None, '1',
+ availability_zone='1:1')
+
class ComputeHostAPITestCase(BaseTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index f94cca857..1edfa771f 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -95,8 +95,9 @@ class ComputeRpcAPITestCase(test.TestCase):
self.assertEqual(arg, expected_arg)
def test_add_aggregate_host(self):
- self._test_compute_api('add_aggregate_host', 'cast', aggregate_id='id',
- host_param='host', host='host', slave_info={}, version='2.2')
+ self._test_compute_api('add_aggregate_host', 'cast',
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={}, version='2.14')
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
@@ -143,8 +144,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
- instance=self.fake_instance, migration_id='id', host='host',
- reservations=list('fake_res'))
+ instance=self.fake_instance, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'), version='2.13')
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
@@ -293,8 +294,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
- instance=self.fake_instance, migration_id='id', host='host',
- reservations=list('fake_res'))
+ instance=self.fake_instance, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'), version='2.12')
def test_rollback_live_migration_at_destination(self):
self._test_compute_api('rollback_live_migration_at_destination',
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl
index 44e99cd16..c70192949 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl
@@ -1,7 +1,7 @@
{
"server": {
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
"accessIPv4": "",
"accessIPv6": "",
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl
index 1cb33425e..beec3a12a 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-SRV-ATTR:instance_name="%(instance_name)s" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="None">
+<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-SRV-ATTR:instance_name="%(instance_name)s" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl
index 995865528..1fb8e1a47 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl
@@ -2,7 +2,7 @@
"servers": [
{
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
"accessIPv4": "",
"accessIPv6": "",
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl
index 7507232ea..1811882a2 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-SRV-ATTR:instance_name="%(instance_name)s" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="None">
+ <server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-SRV-ATTR:instance_name="%(instance_name)s" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
index 2c44cdfc0..92e626293 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
@@ -2,7 +2,7 @@
"server": {
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
index d910a7e23..137e59686 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="None" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-DCF:diskConfig="AUTO">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
index 21185c5d5..8f1583baf 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
@@ -3,7 +3,7 @@
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
index eb7bed92d..344686e66 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="None" OS-DCF:diskConfig="AUTO">
+ <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-DCF:diskConfig="AUTO">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 89d6d42c6..3bcd737bb 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -303,6 +303,7 @@ class ServersSampleJsonTest(ServersSampleBase):
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('server-get-resp', subs, response)
def test_servers_list(self):
@@ -320,6 +321,7 @@ class ServersSampleJsonTest(ServersSampleBase):
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('servers-details-resp', subs, response)
@@ -804,6 +806,7 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('extended-server-attrs-get',
subs, response)
@@ -815,6 +818,7 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
return self._verify_response('extended-server-attrs-list',
subs, response)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 9bbd7ba92..04f646ef0 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -19,6 +19,7 @@
from nova import context
from nova import network
+from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import rpc
from nova import test
@@ -78,3 +79,66 @@ class ApiTestCase(test.TestCase):
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
+
+ def _stub_migrate_instance_calls(self, method, multi_host, info):
+ fake_instance_type = {'rxtx_factor': 'fake_factor'}
+ fake_instance = {'uuid': 'fake_uuid',
+ 'instance_type': fake_instance_type,
+ 'project_id': 'fake_project_id'}
+ fake_migration = {'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest'}
+
+ def fake_mig_inst_method(*args, **kwargs):
+ info['kwargs'] = kwargs
+
+ def fake_is_multi_host(*args, **kwargs):
+ return multi_host
+
+ def fake_get_floaters(*args, **kwargs):
+ return ['fake_float1', 'fake_float2']
+
+ self.stubs.Set(network_rpcapi.NetworkAPI, method,
+ fake_mig_inst_method)
+ self.stubs.Set(self.network_api, '_is_multi_host',
+ fake_is_multi_host)
+ self.stubs.Set(self.network_api, '_get_floating_ip_addresses',
+ fake_get_floaters)
+
+ expected = {'instance_uuid': 'fake_uuid',
+ 'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest',
+ 'rxtx_factor': 'fake_factor',
+ 'project_id': 'fake_project_id',
+ 'floating_addresses': None}
+ if multi_host:
+ expected['host'] = 'fake_compute_dest'
+ expected['floating_addresses'] = ['fake_float1', 'fake_float2']
+ return fake_instance, fake_migration, expected
+
+ def test_migrate_instance_start_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', True, info)
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_start_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', False, info)
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', True, info)
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', False, info)
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index edb477b70..a8f29e012 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -351,35 +351,37 @@ class TestQuantumv2(test.TestCase):
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1'}})
- req_net_ids.append('my_netid1')
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
+ expected_network_order = req_net_ids
+ else:
+ expected_network_order = [n['id'] for n in nets]
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
mox_list_network_params = dict(tenant_id=self.instance['project_id'],
shared=False)
if search_ids:
- mox_list_network_params['id'] = search_ids
+ mox_list_network_params['id'] = mox.SameElementsAs(search_ids)
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': nets})
mox_list_network_params = dict(shared=True)
if search_ids:
- mox_list_network_params['id'] = search_ids
+ mox_list_network_params['id'] = mox.SameElementsAs(search_ids)
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': []})
- for network in nets:
+ for net_id in expected_network_order:
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
- port = ports.get(network['id'], None)
+ port = ports.get(net_id, None)
if port:
port_id = port['id']
self.moxed_client.update_port(port_id,
@@ -387,10 +389,10 @@ class TestQuantumv2(test.TestCase):
).AndReturn(
{'port': port})
else:
- fixed_ip = fixed_ips.get(network['id'])
+ fixed_ip = fixed_ips.get(net_id)
if fixed_ip:
port_req_body['port']['fixed_ip'] = fixed_ip
- port_req_body['port']['network_id'] = network['id']
+ port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
@@ -410,8 +412,9 @@ class TestQuantumv2(test.TestCase):
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
- requested_networks = [(net['id'], None, None)
- for net in (self.nets3[0], self.nets3[-1])]
+ requested_networks = [
+ (net['id'], None, None)
+ for net in (self.nets3[1], self.nets3[0], self.nets3[2])]
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
@@ -902,3 +905,33 @@ class TestQuantumv2(test.TestCase):
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
+
+
+class TestQuantumv2ModuleMethods(test.TestCase):
+ def test_ensure_requested_network_ordering_no_preference(self):
+ l = [1, 2, 3]
+
+ quantumapi._ensure_requested_network_ordering(
+ lambda x: x,
+ l,
+ None)
+
+ def test_ensure_requested_network_ordering_no_preference(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ quantumapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ None)
+
+ self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
+
+ def test_ensure_requested_network_ordering_with_preference(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ quantumapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ [1, 2, 3])
+
+ self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index a087ba97f..de3f19cea 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -36,11 +36,17 @@ class NetworkRpcAPITestCase(test.TestCase):
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_topic = FLAGS.network_topic
expected_msg = rpcapi.make_msg(method, **kwargs)
+ if 'source_compute' in expected_msg['args']:
+ # Fix up for migrate_instance_* calls.
+ args = expected_msg['args']
+ args['source'] = args.pop('source_compute')
+ args['dest'] = args.pop('dest_compute')
targeted_methods = [
'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
'_rpc_allocate_fixed_ip', 'deallocate_fixed_ip',
'_associate_floating_ip', '_disassociate_floating_ip',
- 'lease_fixed_ip', 'release_fixed_ip'
+ 'lease_fixed_ip', 'release_fixed_ip',
+ 'migrate_instance_start', 'migrate_instance_finish',
]
if method in targeted_methods and 'host' in kwargs:
if method != 'deallocate_fixed_ip':
@@ -258,3 +264,45 @@ class NetworkRpcAPITestCase(test.TestCase):
def test_release_fixed_ip(self):
self._test_network_api('release_fixed_ip', rpc_method='cast',
address='fake_addr', host='fake_host')
+
+ def test_migrate_instance_start(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ version='1.2')
+
+ def test_migrate_instance_start_multi_host(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
+
+ def test_migrate_instance_finish(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ version='1.2')
+
+ def test_migrate_instance_finish_multi_host(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index f2c33e623..8203277ae 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -379,6 +379,29 @@ class ImageCacheManagerTestCase(test.TestCase):
res = image_cache_manager._verify_checksum(img, fname)
self.assertTrue(res)
+ def test_verify_checksum_disabled(self):
+ img = {'container_format': 'ami', 'id': '42'}
+
+ self.flags(checksum_base_images=False)
+
+ with self._intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'))
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+
+ # Checksum is valid
+ f = open(info_fname, 'w')
+ csum = hashlib.sha1()
+ csum.update(testdata)
+ f.write('{"sha1": "%s"}\n' % csum.hexdigest())
+ f.close()
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = image_cache_manager._verify_checksum(img, fname)
+ self.assertTrue(res is None)
+
def test_verify_checksum_invalid_json(self):
img = {'container_format': 'ami', 'id': '42'}
@@ -653,6 +676,7 @@ class ImageCacheManagerTestCase(test.TestCase):
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_checksum_fails(self):
+ self.flags(checksum_base_images=True)
self.stubs.Set(virtutils, 'chown', lambda x, y: None)
img = '123'
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 83a7514db..8f6f27bb0 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -105,6 +105,9 @@ class FakeIVMOperator(object):
'disk_used': 0,
'disk_avail': 10168}
+ def get_hostname(self):
+ return 'fake-powervm'
+
def fake_get_powervm_operator():
return FakeIVMOperator()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 1b84b858d..8b883f755 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -30,6 +30,7 @@ import nova
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import test
from nova import utils
@@ -510,7 +511,7 @@ class GenericUtilsTestCase(test.TestCase):
class IsUUIDLikeTestCase(test.TestCase):
def assertUUIDLike(self, val, expected):
- result = utils.is_uuid_like(val)
+ result = uuidutils.is_uuid_like(val)
self.assertEqual(result, expected)
def test_good_uuid(self):
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 1f30ee695..9d48cdf06 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -269,7 +269,8 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_poll_rebooting_instances(self):
- self.connection.poll_rebooting_instances(10)
+ instances = [self._get_running_instance()]
+ self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
def test_poll_rescued_instances(self):
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 404c183a0..54f7948b6 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -27,6 +27,8 @@ import re
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
@@ -896,6 +898,37 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
+ def test_maintenance_mode(self):
+ real_call_xenapi = self.conn._session.call_xenapi
+ instance = self._create_instance(spawn=True)
+ api_calls = {}
+
+ # Record all the xenapi calls, and return a fake list of hosts
+ # for the host.get_all call
+ def fake_call_xenapi(method, *args):
+ api_calls[method] = args
+ if method == 'host.get_all':
+ return ['foo', 'bar', 'baz']
+ return real_call_xenapi(method, *args)
+ self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
+
+ # Always find the 'bar' destination host
+ def fake_host_find(context, session, src, dst):
+ return 'bar'
+ self.stubs.Set(host, '_host_find', fake_host_find)
+
+ result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
+ self.assertEqual(result, 'on_maintenance')
+
+ # We expect the VM.pool_migrate call to have been called to
+ # migrate our instance to the 'bar' host
+ expected = (instance['uuid'], 'bar', {})
+ self.assertTrue(api_calls.get('VM.pool_migrate'), expected)
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
+ self.assertTrue(instance['task_state'], task_states.MIGRATING)
+
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
@@ -2241,7 +2274,8 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
- self.context, self.aggr.id, "fake_host")
+ self.context, "fake_host",
+ aggregate=jsonutils.to_primitive(self.aggr))
excepted = db.aggregate_get(self.context, self.aggr.id)
self.assertEqual(excepted.metadetails[pool_states.KEY],
pool_states.ERROR)
@@ -2258,10 +2292,10 @@ class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
- def add_aggregate_host(self, ctxt, aggregate_id,
+ def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
- self.add_aggregate_host, ctxt, aggregate_id,
+ self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
@@ -2304,7 +2338,8 @@ class HypervisorPoolTestCase(test.TestCase):
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
- "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
+ "CONTEXT", jsonutils.to_primitive(aggregate),
+ "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
diff --git a/nova/utils.py b/nova/utils.py
index 284d72b55..4dd0f161f 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -772,18 +772,6 @@ def gen_uuid():
return uuid.uuid4()
-def is_uuid_like(val):
- """For our purposes, a UUID is a string in canonical form:
-
- aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
- """
- try:
- uuid.UUID(val)
- return True
- except (TypeError, ValueError, AttributeError):
- return False
-
-
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
@@ -1056,7 +1044,7 @@ def generate_mac_address():
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
- random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
@@ -1176,9 +1164,13 @@ def mkfs(fs, path, label=None):
else:
args = ['mkfs', '-t', fs]
#add -F to force no interactive excute on non-block device.
- if fs in ['ext3', 'ext4']:
+ if fs in ('ext3', 'ext4'):
args.extend(['-F'])
if label:
- args.extend(['-n', label])
+ if fs in ('msdos', 'vfat'):
+ label_opt = '-n'
+ else:
+ label_opt = '-L'
+ args.extend([label_opt, label])
args.append(path)
execute(*args)
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index c38b0f98b..ee183584c 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -676,6 +676,7 @@ class BareMetalDriver(driver.ComputeDriver):
'local_gb_used': self.get_local_gb_used(),
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
+ 'hypervisor_hostname': FLAGS.host,
'cpu_info': self.get_cpu_info(),
'cpu_arch': FLAGS.cpu_arch}
@@ -737,4 +738,5 @@ class HostState(object):
self.connection.get_memory_mb_used())
data["hypervisor_type"] = self.connection.get_hypervisor_type()
data["hypervisor_version"] = self.connection.get_hypervisor_version()
+ data["hypervisor_hostname"] = FLAGS.host
self._stats = data
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 9c8a6448d..a466fa180 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -604,8 +604,14 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
pass
- def poll_rebooting_instances(self, timeout):
- """Poll for rebooting instances"""
+ def poll_rebooting_instances(self, timeout, instances):
+ """Poll for rebooting instances
+
+ :param timeout: the currently configured timeout for considering
+ rebooting instances to be stuck
+ :param instances: instances that have been in rebooting state
+ longer than the configured timeout
+ """
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 03711fe98..79d98c5cf 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -70,7 +70,9 @@ class FakeDriver(driver.ComputeDriver):
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
- 'host_name_label': 'fake-mini'}
+ 'host_name_label': 'fake-mini',
+ 'hypervisor_hostname': 'fake-mini',
+ }
self._mounts = {}
def init_host(self, host):
@@ -123,7 +125,7 @@ class FakeDriver(driver.ComputeDriver):
def unrescue(self, instance, network_info):
pass
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
pass
def poll_rescued_instances(self, timeout):
@@ -265,6 +267,7 @@ class FakeDriver(driver.ComputeDriver):
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
+ 'hypervisor_hostname': 'fake-mini',
'cpu_info': '?'}
return dic
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index b2a025e0c..a2f9d5904 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -152,6 +152,7 @@ class HostOps(baseops.BaseOps):
data["supported_instances"] = \
[('i686', 'hyperv', 'hvm'),
('x86_64', 'hyperv', 'hvm')]
+ data["hypervisor_hostname"] = platform.node()
self._stats = data
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index ada4c6a3a..3f0e2ec53 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1054,7 +1054,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.file_delete(rescue_file)
@exception.wrap_exception()
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
pass
@exception.wrap_exception()
@@ -1683,7 +1683,8 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# FIXME(vish): stick this in db
inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
+ inst_type = instance_types.get_instance_type(inst_type_id,
+ inactive=True)
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = FLAGS.libvirt_type
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 29191629f..721587512 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -226,6 +226,9 @@ class ImageCacheManager(object):
handle manually when it occurs.
"""
+ if not FLAGS.checksum_base_images:
+ return None
+
stored_checksum = read_stored_checksum(base_file)
if stored_checksum:
f = open(base_file, 'r')
@@ -251,7 +254,7 @@ class ImageCacheManager(object):
# NOTE(mikal): If the checksum file is missing, then we should
# create one. We don't create checksums when we download images
# from glance because that would delay VM startup.
- if FLAGS.checksum_base_images and create_if_missing:
+ if create_if_missing:
write_stored_checksum(base_file)
return None
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index bc0986ca4..2601f0537 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -156,6 +156,7 @@ class PowerVMOperator(object):
data['host_memory_free'] = memory_info['avail_mem']
data['hypervisor_type'] = constants.POWERVM_HYPERVISOR_TYPE
data['hypervisor_version'] = constants.POWERVM_HYPERVISOR_VERSION
+ data['hypervisor_hostname'] = self._operator.get_hostname()
data['extres'] = ''
self._host_stats = data
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 0c17dccff..605c95cfd 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -49,6 +49,11 @@ xenapi_agent_opts = [
'configuration is not injected into the image. '
'Used if compute_driver=xenapi.XenAPIDriver and '
' flat_injected=True'),
+ cfg.StrOpt('xenapi_disable_agent',
+ default=False,
+ help='Disable XenAPI agent. Reduces the amount of time '
+ 'it takes nova to detect that a VM has started, when '
+ 'that VM does not have the agent installed'),
]
FLAGS = flags.FLAGS
@@ -244,6 +249,9 @@ def find_guest_agent(base_dir):
tries to locate a guest agent at the path
specificed by agent_rel_path
"""
+ if FLAGS.xenapi_disable_agent:
+ return False
+
agent_rel_path = FLAGS.xenapi_agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index e4c4150a8..4d032e891 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -181,13 +181,7 @@ class XenAPIDriver(driver.ComputeDriver):
"""Finish reverting a resize, powering back on the instance"""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance)
- block_device_mapping = driver.block_device_info_get_mapping(
- block_device_info)
- for vol in block_device_mapping:
- connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
- self.attach_volume(connection_info,
- instance['name'], mount_device)
+ self._attach_mapped_block_devices(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
@@ -195,6 +189,9 @@ class XenAPIDriver(driver.ComputeDriver):
"""Completes a resize, turning on the migrated instance"""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance)
+ self._attach_mapped_block_devices(instance, block_device_info)
+
+ def _attach_mapped_block_devices(self, instance, block_device_info):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
@@ -290,9 +287,9 @@ class XenAPIDriver(driver.ComputeDriver):
"""Restore the specified instance"""
self._vmops.restore(instance)
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances"""
- self._vmops.poll_rebooting_instances(timeout)
+ self._vmops.poll_rebooting_instances(timeout, instances)
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index b22112f66..db4f5d03e 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -583,7 +583,9 @@ class SessionBase(object):
return jsonutils.dumps({'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
- 'free-computed': 40}})
+ 'free-computed': 40},
+ 'host_hostname': 'fake-xenhost',
+ })
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 8a69f7c54..39a3b9824 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -174,6 +174,7 @@ class HostState(object):
data["host_memory_free_computed"] = host_memory.get(
'free-computed', 0)
del data['host_memory']
+ data['hypervisor_hostname'] = data['host_hostname']
self._stats = data
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 71b21ce24..0c2565dbd 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -126,7 +126,7 @@ class ResourcePool(object):
slave_info = self._create_slave_info()
self.compute_rpcapi.add_aggregate_host(
- context, aggregate.id, host, master_compute, slave_info)
+ context, aggregate, host, master_compute, slave_info)
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 96a9f5ba6..915c45243 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -159,8 +159,14 @@ class VMOps(object):
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
+ @property
+ def agent_enabled(self):
+ return not FLAGS.xenapi_disable_agent
+
def _get_agent(self, instance, vm_ref):
- return xapi_agent.XenAPIBasedAgent(self._session, instance, vm_ref)
+ if self.agent_enabled:
+ return xapi_agent.XenAPIBasedAgent(self._session, instance, vm_ref)
+ raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
"""List VM instances."""
@@ -519,17 +525,6 @@ class VMOps(object):
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
- agent_build = db.agent_build_get_by_triple(ctx, 'xen',
- instance['os_type'], instance['architecture'])
- if agent_build:
- LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
- '/%(architecture)s is %(version)s') % agent_build)
- else:
- LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
- '/%(architecture)s') % {
- 'hypervisor': 'xen',
- 'os': instance['os_type'],
- 'architecture': instance['architecture']})
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
@@ -542,34 +537,47 @@ class VMOps(object):
greenthread.sleep(0.5)
- # Update agent, if necessary
- # This also waits until the agent starts
- agent = self._get_agent(instance, vm_ref)
- version = agent.get_agent_version()
- if version:
- LOG.info(_('Instance agent version: %s'), version,
- instance=instance)
-
- if (version and agent_build and
- cmp_version(version, agent_build['version']) < 0):
- agent.agent_update(agent_build)
-
- # if the guest agent is not available, configure the
- # instance, but skip the admin password configuration
- no_agent = version is None
-
- # Inject files, if necessary
- if injected_files:
- # Inject any files, if specified
- for path, contents in injected_files:
- agent.inject_file(path, contents)
-
- # Set admin password, if necessary
- if admin_password and not no_agent:
- agent.set_admin_password(admin_password)
-
- # Reset network config
- agent.resetnetwork()
+ if self.agent_enabled:
+ agent_build = db.agent_build_get_by_triple(
+ ctx, 'xen', instance['os_type'], instance['architecture'])
+ if agent_build:
+ LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
+ '/%(architecture)s is %(version)s') % agent_build)
+ else:
+ LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
+ '/%(architecture)s') % {
+ 'hypervisor': 'xen',
+ 'os': instance['os_type'],
+ 'architecture': instance['architecture']})
+
+ # Update agent, if necessary
+ # This also waits until the agent starts
+ agent = self._get_agent(instance, vm_ref)
+ version = agent.get_agent_version()
+ if version:
+ LOG.info(_('Instance agent version: %s'), version,
+ instance=instance)
+
+ if (version and agent_build and
+ cmp_version(version, agent_build['version']) < 0):
+ agent.agent_update(agent_build)
+
+ # if the guest agent is not available, configure the
+ # instance, but skip the admin password configuration
+ no_agent = version is None
+
+ # Inject files, if necessary
+ if injected_files:
+ # Inject any files, if specified
+ for path, contents in injected_files:
+ agent.inject_file(path, contents)
+
+ # Set admin password, if necessary
+ if admin_password and not no_agent:
+ agent.set_admin_password(admin_password)
+
+ # Reset network config
+ agent.resetnetwork()
# Set VCPU weight
vcpu_weight = instance['instance_type']['vcpu_weight']
@@ -862,15 +870,21 @@ class VMOps(object):
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
- vm_ref = self._get_vm_opaque_ref(instance)
- agent = self._get_agent(instance, vm_ref)
- agent.set_admin_password(new_pass)
+ if self.agent_enabled:
+ vm_ref = self._get_vm_opaque_ref(instance)
+ agent = self._get_agent(instance, vm_ref)
+ agent.set_admin_password(new_pass)
+ else:
+ raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
- vm_ref = self._get_vm_opaque_ref(instance)
- agent = self._get_agent(instance, vm_ref)
- agent.inject_file(path, contents)
+ if self.agent_enabled:
+ vm_ref = self._get_vm_opaque_ref(instance)
+ agent = self._get_agent(instance, vm_ref)
+ agent.inject_file(path, contents)
+ else:
+ raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
@@ -1147,10 +1161,12 @@ class VMOps(object):
instance=instance)
else:
vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+ self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
+ self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
@@ -1174,7 +1190,7 @@ class VMOps(object):
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
- def poll_rebooting_instances(self, timeout):
+ def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
@@ -1185,7 +1201,6 @@ class VMOps(object):
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
- instances = db.instance_get_all_hung_in_rebooting(ctxt, timeout)
instances_info = dict(instance_count=len(instances),
timeout=timeout)
@@ -1423,9 +1438,12 @@ class VMOps(object):
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
- vm_ref = self._get_vm_opaque_ref(instance)
- agent = self._get_agent(instance, vm_ref)
- agent.resetnetwork()
+ if self.agent_enabled:
+ vm_ref = self._get_vm_opaque_ref(instance)
+ agent = self._get_agent(instance, vm_ref)
+ agent.resetnetwork()
+ else:
+ raise NotImplementedError()
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
diff --git a/openstack-common.conf b/openstack-common.conf
index 9a09fd5a8..666fb059e 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,context,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc
+modules=cfg,context,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/tools/pip-requires b/tools/pip-requires
index 922f1a24a..a214d7bc2 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -19,5 +19,6 @@ Babel>=0.9.6
iso8601>=0.1.4
httplib2
setuptools_git>=0.4
+python-cinderclient
python-quantumclient>=2.1
python-glanceclient>=0.5.0,<2
diff --git a/tools/test-requires b/tools/test-requires
index 536d4deee..fc56d3c87 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -11,5 +11,4 @@ pep8==1.2
pylint==0.25.2
sphinx>=1.1.2
feedparser
-python-cinderclient
MySQL-python