diff options
97 files changed, 1398 insertions, 1080 deletions
diff --git a/bin/nova-all b/bin/nova-all index ce0a459b4..531116d69 100755 --- a/bin/nova-all +++ b/bin/nova-all @@ -49,6 +49,7 @@ from nova import utils from nova.vnc import xvp_proxy +CONF = config.CONF LOG = logging.getLogger('nova.all') if __name__ == '__main__': @@ -58,7 +59,7 @@ if __name__ == '__main__': launcher = service.ProcessLauncher() # nova-api - for api in flags.FLAGS.enabled_apis: + for api in CONF.enabled_apis: try: server = service.WSGIService(api) launcher.launch_server(server, workers=server.workers or 1) diff --git a/bin/nova-api b/bin/nova-api index 4bcfa7f79..776152e43 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -42,12 +42,14 @@ from nova.openstack.common import log as logging from nova import service from nova import utils +CONF = config.CONF + if __name__ == '__main__': config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() launcher = service.ProcessLauncher() - for api in flags.FLAGS.enabled_apis: + for api in CONF.enabled_apis: server = service.WSGIService(api) launcher.launch_server(server, workers=server.workers or 1) launcher.wait() diff --git a/bin/nova-cert b/bin/nova-cert index 317739329..441bda9e5 100755 --- a/bin/nova-cert +++ b/bin/nova-cert @@ -38,11 +38,12 @@ from nova.openstack.common import log as logging from nova import service from nova import utils +CONF = config.CONF + if __name__ == '__main__': config.parse_args(sys.argv) - FLAGS = flags.FLAGS logging.setup("nova") utils.monkey_patch() - server = service.Service.create(binary='nova-cert', topic=FLAGS.cert_topic) + server = service.Service.create(binary='nova-cert', topic=CONF.cert_topic) service.serve(server) service.wait() diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues index 05531de9b..be1d98e3e 100755 --- a/bin/nova-clear-rabbit-queues +++ b/bin/nova-clear-rabbit-queues @@ -53,8 +53,8 @@ delete_exchange_opt = cfg.BoolOpt('delete_exchange', default=False, help='delete nova exchange too.') -FLAGS = flags.FLAGS -FLAGS.register_cli_opt(delete_exchange_opt) +CONF = config.CONF +CONF.register_cli_opt(delete_exchange_opt) def delete_exchange(exch): @@ -73,5 +73,5 @@ if __name__ == '__main__': args = config.parse_args(sys.argv) logging.setup("nova") delete_queues(args[1:]) - if FLAGS.delete_exchange: - delete_exchange(FLAGS.control_exchange) + if CONF.delete_exchange: + delete_exchange(CONF.control_exchange) diff --git a/bin/nova-compute b/bin/nova-compute index 2ff98ccfc..f850e1b8c 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -20,7 +20,14 @@ """Starter script for Nova Compute.""" import eventlet -eventlet.monkey_patch() +import os + +if os.name == 'nt': + # eventlet monkey patching causes subprocess.Popen to fail on Windows + # when using pipes due to missing non blocking I/O support + eventlet.monkey_patch(os=False) +else: + eventlet.monkey_patch() import os import sys @@ -40,12 +47,13 @@ from nova.openstack.common import log as logging from nova import service from nova import utils +CONF = config.CONF + if __name__ == '__main__': config.parse_args(sys.argv) - FLAGS = flags.FLAGS logging.setup('nova') utils.monkey_patch() server = service.Service.create(binary='nova-compute', - topic=FLAGS.compute_topic) + topic=CONF.compute_topic) service.serve(server) service.wait() diff --git a/bin/nova-console b/bin/nova-console index 92b99edfb..c75e088c8 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -38,11 +38,12 @@ from nova import flags from nova.openstack.common import log as logging from nova import service +CONF = config.CONF + if __name__ == '__main__': config.parse_args(sys.argv) - FLAGS = flags.FLAGS logging.setup("nova") server = service.Service.create(binary='nova-console', - topic=FLAGS.console_topic) + topic=CONF.console_topic) service.serve(server) service.wait() diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth index 14ef701a7..654a3f824 100755 --- a/bin/nova-consoleauth +++ b/bin/nova-consoleauth @@ -37,12 +37,12 @@ from nova import flags from nova.openstack.common import log as logging from nova import service +CONF = config.CONF if __name__ == "__main__": config.parse_args(sys.argv) - FLAGS = flags.FLAGS logging.setup("nova") server = service.Service.create(binary='nova-consoleauth', - topic=FLAGS.consoleauth_topic) + topic=CONF.consoleauth_topic) service.serve(server) service.wait() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 48639ce87..ed36c47bc 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -46,21 +46,20 @@ from nova.openstack.common import log as logging from nova.openstack.common import rpc from nova import utils -FLAGS = flags.FLAGS - +CONF = config.CONF LOG = logging.getLogger('nova.dhcpbridge') def add_lease(mac, ip_address): """Set the IP that was assigned by the DHCP server.""" - if FLAGS.fake_rabbit: + if CONF.fake_rabbit: LOG.debug(_("leasing ip")) - network_manager = importutils.import_object(FLAGS.network_manager) + network_manager = importutils.import_object(CONF.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), ip_address) else: api = network_rpcapi.NetworkAPI() - api.lease_fixed_ip(context.get_admin_context(), ip_address, FLAGS.host) + api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host) def old_lease(mac, ip_address): @@ -73,28 +72,28 @@ def old_lease(mac, ip_address): def del_lease(mac, ip_address): """Called when a lease expires.""" - if FLAGS.fake_rabbit: + if CONF.fake_rabbit: LOG.debug(_("releasing ip")) - network_manager = importutils.import_object(FLAGS.network_manager) + network_manager = importutils.import_object(CONF.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), ip_address) else: api = network_rpcapi.NetworkAPI() api.release_fixed_ip(context.get_admin_context(), ip_address, - FLAGS.host) + CONF.host) def init_leases(network_id): """Get the list of hosts for a network.""" ctxt = context.get_admin_context() network_ref = db.network_get(ctxt, network_id) - network_manager = importutils.import_object(FLAGS.network_manager) + network_manager = importutils.import_object(CONF.network_manager) return network_manager.get_dhcp_leases(ctxt, network_ref) def main(): """Parse environment and arguments and call the approproate action.""" - flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) + flagfile = os.environ.get('FLAGFILE', CONF.dhcpbridge_flagfile) argv = config.parse_args(sys.argv, default_config_files=[flagfile]) logging.setup("nova") diff --git a/bin/nova-manage b/bin/nova-manage index 1cb2dabb7..43ff5ebca 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -89,15 +89,15 @@ from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova import version -FLAGS = flags.FLAGS -flags.DECLARE('flat_network_bridge', 'nova.network.manager') -flags.DECLARE('num_networks', 'nova.network.manager') -flags.DECLARE('multi_host', 'nova.network.manager') -flags.DECLARE('network_size', 'nova.network.manager') -flags.DECLARE('vlan_start', 'nova.network.manager') -flags.DECLARE('vpn_start', 'nova.network.manager') -flags.DECLARE('default_floating_pool', 'nova.network.manager') -flags.DECLARE('public_interface', 'nova.network.linux_net') +CONF = config.CONF +CONF.import_opt('flat_network_bridge', 'nova.network.manager') +CONF.import_opt('num_networks', 'nova.network.manager') +CONF.import_opt('multi_host', 'nova.network.manager') +CONF.import_opt('network_size', 'nova.network.manager') +CONF.import_opt('vlan_start', 'nova.network.manager') +CONF.import_opt('vpn_start', 'nova.network.manager') +CONF.import_opt('default_floating_pool', 'nova.network.manager') +CONF.import_opt('public_interface', 'nova.network.linux_net') QUOTAS = quota.QUOTAS @@ -379,9 +379,9 @@ class FloatingIpCommands(object): """Creates floating ips for zone by range""" admin_context = context.get_admin_context() if not pool: - pool = FLAGS.default_floating_pool + pool = CONF.default_floating_pool if not interface: - interface = FLAGS.public_interface + interface = CONF.public_interface ips = ({'address': str(address), 'pool': pool, 'interface': interface} for address in self.address_to_hosts(ip_range)) @@ -475,7 +475,7 @@ class NetworkCommands(object): if v and k != "self")) if multi_host is not None: kwargs['multi_host'] = multi_host == 'T' - net_manager = importutils.import_object(FLAGS.network_manager) + net_manager = importutils.import_object(CONF.network_manager) net_manager.create_networks(context.get_admin_context(), **kwargs) def list(self): @@ -519,8 +519,8 @@ class NetworkCommands(object): if fixed_range is None and uuid is None: raise Exception("Please specify either fixed_range or uuid") - net_manager = importutils.import_object(FLAGS.network_manager) - if "QuantumManager" in FLAGS.network_manager: + net_manager = importutils.import_object(CONF.network_manager) + if "QuantumManager" in CONF.network_manager: if uuid is None: raise Exception("UUID is required to delete Quantum Networks") if fixed_range: @@ -634,7 +634,7 @@ class ServiceCommands(object): _('Updated_At')) for svc in services: delta = now - (svc['updated_at'] or svc['created_at']) - alive = abs(utils.total_seconds(delta)) <= FLAGS.service_down_time + alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: @@ -1121,10 +1121,10 @@ class GetLogCommands(object): def errors(self): """Get all of the errors from the log files""" error_found = 0 - if FLAGS.logdir: - logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')] + if CONF.logdir: + logs = [x for x in os.listdir(CONF.logdir) if x.endswith('.log')] for file in logs: - log_file = os.path.join(FLAGS.logdir, file) + log_file = os.path.join(CONF.logdir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 @@ -1224,7 +1224,7 @@ def main(): argv = config.parse_args(sys.argv) logging.setup("nova") except cfg.ConfigFilesNotFoundError: - cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None + cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print _("Could not read %s. Re-running with sudo") % cfgfile diff --git a/bin/nova-network b/bin/nova-network index d23d7882c..def7782d7 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -40,12 +40,13 @@ from nova.openstack.common import log as logging from nova import service from nova import utils +CONF = config.CONF + if __name__ == '__main__': config.parse_args(sys.argv) - FLAGS = flags.FLAGS logging.setup("nova") utils.monkey_patch() server = service.Service.create(binary='nova-network', - topic=FLAGS.network_topic) + topic=CONF.network_topic) service.serve(server) service.wait() diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy index d3d9702af..1ba43aa01 100755 --- a/bin/nova-novncproxy +++ b/bin/nova-novncproxy @@ -67,8 +67,9 @@ opts = [ default=6080, help='Port on which to listen for incoming requests'), ] -FLAGS = flags.FLAGS -FLAGS.register_cli_opts(opts) + +CONF = config.CONF +CONF.register_cli_opts(opts) LOG = logging.getLogger(__name__) @@ -130,28 +131,28 @@ class NovaWebSocketProxy(websockify.WebSocketProxy): if __name__ == '__main__': - if FLAGS.ssl_only and not os.path.exists(FLAGS.cert): - parser.error("SSL only and %s not found" % FLAGS.cert) + if CONF.ssl_only and not os.path.exists(CONF.cert): + parser.error("SSL only and %s not found" % CONF.cert) # Setup flags config.parse_args(sys.argv) # Check to see if novnc html/js/css files are present - if not os.path.exists(FLAGS.web): - print "Can not find novnc html/js/css files at %s." % FLAGS.web + if not os.path.exists(CONF.web): + print "Can not find novnc html/js/css files at %s." % CONF.web sys.exit(-1) # Create and start the NovaWebSockets proxy - server = NovaWebSocketProxy(listen_host=FLAGS.novncproxy_host, - listen_port=FLAGS.novncproxy_port, - source_is_ipv6=FLAGS.source_is_ipv6, - verbose=FLAGS.verbose, - cert=FLAGS.cert, - key=FLAGS.key, - ssl_only=FLAGS.ssl_only, - daemon=FLAGS.daemon, - record=FLAGS.record, - web=FLAGS.web, + server = NovaWebSocketProxy(listen_host=CONF.novncproxy_host, + listen_port=CONF.novncproxy_port, + source_is_ipv6=CONF.source_is_ipv6, + verbose=CONF.verbose, + cert=CONF.cert, + key=CONF.key, + ssl_only=CONF.ssl_only, + daemon=CONF.daemon, + record=CONF.record, + web=CONF.web, target_host='ignore', target_port='ignore', wrap_mode='exit', diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver index d63ea108e..d6849ce9d 100755 --- a/bin/nova-rpc-zmq-receiver +++ b/bin/nova-rpc-zmq-receiver @@ -40,9 +40,9 @@ from nova.openstack.common import rpc from nova.openstack.common.rpc import impl_zmq from nova import utils -FLAGS = flags.FLAGS -FLAGS.register_opts(rpc.rpc_opts) -FLAGS.register_opts(impl_zmq.zmq_opts) +CONF = config.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) def main(): @@ -50,7 +50,7 @@ def main(): logging.setup("nova") utils.monkey_patch() - ipc_dir = FLAGS.rpc_zmq_ipc_dir + ipc_dir = CONF.rpc_zmq_ipc_dir # Create the necessary directories/files for this service. if not os.path.isdir(ipc_dir): @@ -63,10 +63,10 @@ def main(): logging.error(_("Could not create IPC socket directory.")) return - with contextlib.closing(impl_zmq.ZmqProxy(FLAGS)) as reactor: + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: consume_in = "tcp://%s:%s" % \ - (FLAGS.rpc_zmq_bind_address, - FLAGS.rpc_zmq_port) + (CONF.rpc_zmq_bind_address, + CONF.rpc_zmq_port) consumption_proxy = impl_zmq.InternalContext(None) reactor.register(consumption_proxy, diff --git a/bin/nova-scheduler b/bin/nova-scheduler index fc345808a..73dfab207 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -42,12 +42,13 @@ from nova.openstack.common import log as logging from nova import service from nova import utils +CONF = config.CONF + if __name__ == '__main__': config.parse_args(sys.argv) - FLAGS = flags.FLAGS logging.setup("nova") utils.monkey_patch() server = service.Service.create(binary='nova-scheduler', - topic=FLAGS.scheduler_topic) + topic=CONF.scheduler_topic) service.serve(server) service.wait() diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy index e884b3f52..b816bf2e9 100755 --- a/bin/nova-xvpvncproxy +++ b/bin/nova-xvpvncproxy @@ -38,8 +38,6 @@ from nova.openstack.common import rpc from nova import service from nova.vnc import xvp_proxy -FLAGS = flags.FLAGS - if __name__ == "__main__": config.parse_args(sys.argv) logging.setup("nova") diff --git a/etc/nova/policy.json b/etc/nova/policy.json index f77f733c6..bd015802a 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -7,6 +7,7 @@ "compute:create": "", "compute:create:attach_network": "", "compute:create:attach_volume": "", + "compute:create:forced_host": "is_admin:True", "compute:get_all": "", diff --git a/nova/api/auth.py b/nova/api/auth.py index be99f7041..1562aeede 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -21,6 +21,7 @@ Common Auth Middleware. import webob.dec import webob.exc +from nova import config from nova import context from nova import flags from nova.openstack.common import cfg @@ -34,16 +35,16 @@ use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for', help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') -FLAGS = flags.FLAGS -FLAGS.register_opt(use_forwarded_for_opt) +CONF = config.CONF +CONF.register_opt(use_forwarded_for_opt) LOG = logging.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" - pipeline = local_conf[FLAGS.auth_strategy] - if not FLAGS.api_rate_limit: - limit_name = FLAGS.auth_strategy + '_nolimit' + pipeline = local_conf[CONF.auth_strategy] + if not CONF.api_rate_limit: + limit_name = CONF.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] @@ -95,7 +96,7 @@ class NovaKeystoneContext(wsgi.Middleware): # Build a context, including the auth_token... remote_address = req.remote_addr - if FLAGS.use_forwarded_for: + if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) service_catalog = None diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index d1145420e..1bf1f9f70 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -31,6 +31,7 @@ from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova.api.ec2 import faults from nova.api import validator +from nova import config from nova import context from nova import exception from nova import flags @@ -71,10 +72,9 @@ ec2_opts = [ help='Time in seconds before ec2 timestamp expires'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(ec2_opts) - -flags.DECLARE('use_forwarded_for', 'nova.api.auth') +CONF = config.CONF +CONF.register_opts(ec2_opts) +CONF.import_opt('use_forwarded_for', 'nova.api.auth') def ec2_error(req, request_id, code, message): @@ -163,11 +163,11 @@ class Lockout(wsgi.Middleware): def __init__(self, application): """middleware can use fake for testing.""" - if FLAGS.memcached_servers: + if CONF.memcached_servers: import memcache else: from nova.common import memorycache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, + self.mc = memcache.Client(CONF.memcached_servers, debug=0) super(Lockout, self).__init__(application) @@ -176,7 +176,7 @@ class Lockout(wsgi.Middleware): access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) - if failures >= FLAGS.lockout_attempts: + if failures >= CONF.lockout_attempts: detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) @@ -184,15 +184,15 @@ class Lockout(wsgi.Middleware): failures = self.mc.incr(failures_key) if failures is None: # NOTE(vish): To use incr, failures has to be a string. - self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) - elif failures >= FLAGS.lockout_attempts: - lock_mins = FLAGS.lockout_minutes + self.mc.set(failures_key, '1', time=CONF.lockout_window * 60) + elif failures >= CONF.lockout_attempts: + lock_mins = CONF.lockout_minutes msg = _('Access key %(access_key)s has had %(failures)d' ' failed authentications and will be locked out' ' for %(lock_mins)d minutes.') % locals() LOG.warn(msg) self.mc.set(failures_key, str(failures), - time=FLAGS.lockout_minutes * 60) + time=CONF.lockout_minutes * 60) return res @@ -224,14 +224,14 @@ class EC2KeystoneAuth(wsgi.Middleware): 'path': req.path, 'params': auth_params, } - if "ec2" in FLAGS.keystone_ec2_url: + if "ec2" in CONF.keystone_ec2_url: creds = {'ec2Credentials': cred_dict} else: creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} creds_json = jsonutils.dumps(creds) headers = {'Content-Type': 'application/json'} - o = urlparse.urlparse(FLAGS.keystone_ec2_url) + o = urlparse.urlparse(CONF.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: @@ -262,7 +262,7 @@ class EC2KeystoneAuth(wsgi.Middleware): return ec2_error(req, request_id, "Unauthorized", msg) remote_address = req.remote_addr - if FLAGS.use_forwarded_for: + if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) @@ -291,7 +291,7 @@ class NoAuth(wsgi.Middleware): user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') project_id = project_id or user_id remote_address = req.remote_addr - if FLAGS.use_forwarded_for: + if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, @@ -315,7 +315,7 @@ class Requestify(wsgi.Middleware): args = dict(req.params) try: expired = ec2utils.is_ec2_timestamp_expired(req.params, - expires=FLAGS.ec2_timestamp_expiry) + expires=CONF.ec2_timestamp_expiry) if expired: msg = _("Timestamp failed validation.") LOG.exception(msg) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 70b1e3b80..6cd7c4431 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -30,7 +30,6 @@ from nova import flags from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS def _underscore_to_camelcase(str): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 3446b5a8f..8a7471951 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -33,6 +33,7 @@ from nova import compute from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import vm_states +from nova import config from nova import db from nova import exception from nova import flags @@ -45,7 +46,7 @@ from nova import utils from nova import volume -FLAGS = flags.FLAGS +CONF = config.CONF LOG = logging.getLogger(__name__) @@ -283,22 +284,22 @@ class CloudController(object): return {'availabilityZoneInfo': result} def describe_regions(self, context, region_name=None, **kwargs): - if FLAGS.region_list: + if CONF.region_list: regions = [] - for region in FLAGS.region_list: + for region in CONF.region_list: name, _sep, host = region.partition('=') - endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, + endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme, host, - FLAGS.ec2_port, - FLAGS.ec2_path) + CONF.ec2_port, + CONF.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', - 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, - FLAGS.ec2_host, - FLAGS.ec2_port, - FLAGS.ec2_path)}] + 'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme, + CONF.ec2_host, + CONF.ec2_port, + CONF.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, @@ -366,7 +367,7 @@ class CloudController(object): result = [] for key_pair in key_pairs: # filter out the vpn keys - suffix = FLAGS.vpn_key_suffix + suffix = CONF.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], @@ -652,7 +653,7 @@ class CloudController(object): def create_security_group(self, context, group_name, group_description): if isinstance(group_name, unicode): group_name = group_name.encode('utf-8') - if FLAGS.ec2_strict_validation: + if CONF.ec2_strict_validation: # EC2 specification gives constraints for name and description: # Accepts alphanumeric characters, spaces, dashes, and underscores allowed = '^[a-zA-Z0-9_\- ]+$' @@ -1048,7 +1049,7 @@ class CloudController(object): instances = [] for instance in instances: if not context.is_admin: - if instance['image_ref'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(CONF.vpn_image_id): continue i = {} instance_uuid = instance['uuid'] @@ -1070,7 +1071,7 @@ class CloudController(object): floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] - if FLAGS.ec2_private_dns_show_ip: + if CONF.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index 580cfdac7..de05aa903 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -27,8 +27,6 @@ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.openstack.common import uuidutils - -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) diff --git a/nova/api/ec2/faults.py b/nova/api/ec2/faults.py index ef16f086e..331603a3a 100644 --- a/nova/api/ec2/faults.py +++ b/nova/api/ec2/faults.py @@ -15,11 +15,12 @@ import webob.dec import webob.exc +from nova import config from nova import context from nova import flags from nova import utils -FLAGS = flags.FLAGS +CONF = config.CONF class Fault(webob.exc.HTTPException): @@ -44,7 +45,7 @@ class Fault(webob.exc.HTTPException): user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') - if FLAGS.use_forwarded_for: + if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctxt = context.RequestContext(user_id, diff --git a/nova/api/manager.py b/nova/api/manager.py index 204e55a0e..dc081d9a6 100644 --- a/nova/api/manager.py +++ b/nova/api/manager.py @@ -16,11 +16,12 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova import manager from nova.openstack.common import importutils -FLAGS = flags.FLAGS +CONF = config.CONF class MetadataManager(manager.Manager): @@ -31,7 +32,7 @@ class MetadataManager(manager.Manager): """ def __init__(self, *args, **kwargs): super(MetadataManager, self).__init__(*args, **kwargs) - self.network_driver = importutils.import_module(FLAGS.network_driver) + self.network_driver = importutils.import_module(CONF.network_driver) def init_host(self): """Perform any initialization. diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py index 6a472df58..21fb4a7da 100644 --- a/nova/api/metadata/base.py +++ b/nova/api/metadata/base.py @@ -24,6 +24,7 @@ import os from nova.api.ec2 import ec2utils from nova import block_device +from nova import config from nova import context from nova import db from nova import flags @@ -40,9 +41,9 @@ metadata_opts = [ 'config drive')), ] -FLAGS = flags.FLAGS -flags.DECLARE('dhcp_domain', 'nova.network.manager') -FLAGS.register_opts(metadata_opts) +CONF = config.CONF +CONF.register_opts(metadata_opts) +CONF.import_opt('dhcp_domain', 'nova.network.manager') VERSIONS = [ @@ -308,8 +309,8 @@ class InstanceMetadata(): def _get_hostname(self): return "%s%s%s" % (self.instance['hostname'], - '.' if FLAGS.dhcp_domain else '', - FLAGS.dhcp_domain) + '.' if CONF.dhcp_domain else '', + CONF.dhcp_domain) def lookup(self, path): if path == "" or path[0] != "/": @@ -351,7 +352,7 @@ class InstanceMetadata(): """Yields (path, value) tuples for metadata elements.""" # EC2 style metadata for version in VERSIONS + ["latest"]: - if version in FLAGS.config_drive_skip_versions.split(' '): + if version in CONF.config_drive_skip_versions.split(' '): continue data = self.get_ec2_metadata(version) diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index d022cbc82..14ec696cd 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -23,16 +23,18 @@ import webob.dec import webob.exc from nova.api.metadata import base +from nova import config from nova import exception from nova import flags from nova.openstack.common import log as logging from nova import wsgi +CONF = config.CONF +CONF.import_opt('use_forwarded_for', 'nova.api.auth') + LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS -flags.DECLARE('use_forwarded_for', 'nova.api.auth') -if FLAGS.memcached_servers: +if CONF.memcached_servers: import memcache else: from nova.common import memorycache as memcache @@ -42,7 +44,7 @@ class MetadataRequestHandler(wsgi.Application): """Serve metadata.""" def __init__(self): - self._cache = memcache.Client(FLAGS.memcached_servers, debug=0) + self._cache = memcache.Client(CONF.memcached_servers, debug=0) def get_metadata(self, address): if not address: @@ -65,7 +67,7 @@ class MetadataRequestHandler(wsgi.Application): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): remote_address = req.remote_addr - if FLAGS.use_forwarded_for: + if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) if os.path.normpath("/" + req.path_info) == "/": diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 078d58639..78064012b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -21,14 +21,15 @@ import webob.dec import webob.exc from nova.api.openstack import wsgi +from nova import config from nova import context from nova import flags from nova.openstack.common import log as logging from nova import wsgi as base_wsgi LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS -flags.DECLARE('use_forwarded_for', 'nova.api.auth') +CONF = config.CONF +CONF.import_opt('use_forwarded_for', 'nova.api.auth') class NoAuthMiddleware(base_wsgi.Middleware): @@ -54,7 +55,7 @@ class NoAuthMiddleware(base_wsgi.Middleware): user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') - if FLAGS.use_forwarded_for: + if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index ccc70cd1f..50ac76179 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -28,6 +28,7 @@ from nova.api.openstack import xmlutil from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova import config from nova import exception from nova import flags from nova.openstack.common import log as logging @@ -35,7 +36,7 @@ from nova import quota LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS +CONF = config.CONF QUOTAS = quota.QUOTAS @@ -148,7 +149,7 @@ def _get_marker_param(request): return request.GET['marker'] -def limited(items, request, max_limit=FLAGS.osapi_max_limit): +def limited(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity @@ -185,7 +186,7 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit): return items[offset:range_end] -def get_limit_and_marker(request, max_limit=FLAGS.osapi_max_limit): +def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit): """get limited parameter from request""" params = get_pagination_params(request) limit = params.get('limit', max_limit) @@ -195,7 +196,7 @@ def get_limit_and_marker(request, max_limit=FLAGS.osapi_max_limit): return limit, marker -def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): +def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" limit, marker = get_limit_and_marker(request, max_limit) @@ -414,7 +415,7 @@ class MetadataTemplate(xmlutil.TemplateBuilder): def check_snapshots_enabled(f): @functools.wraps(f) def inner(*args, **kwargs): - if not FLAGS.allow_instance_snapshots: + if not CONF.allow_instance_snapshots: LOG.warn(_('Rejecting snapshot request, snapshots currently' ' disabled')) msg = _("Instance snapshots are not permitted at this time.") @@ -443,7 +444,7 @@ class ViewBuilder(object): params = request.params.copy() params["marker"] = identifier prefix = self._update_link_prefix(request.application_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_compute_link_prefix) url = os.path.join(prefix, request.environ["nova.context"].project_id, collection_name) @@ -452,7 +453,7 @@ class ViewBuilder(object): def _get_href_link(self, request, identifier, collection_name): """Return an href string pointing to this object.""" prefix = self._update_link_prefix(request.application_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_compute_link_prefix) return os.path.join(prefix, request.environ["nova.context"].project_id, collection_name, @@ -462,7 +463,7 @@ class ViewBuilder(object): """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(request.application_url) base_url = self._update_link_prefix(base_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_compute_link_prefix) return os.path.join(base_url, request.environ["nova.context"].project_id, collection_name, diff --git a/nova/api/openstack/compute/__init__.py b/nova/api/openstack/compute/__init__.py index 4af679ffb..e6704951f 100644 --- a/nova/api/openstack/compute/__init__.py +++ b/nova/api/openstack/compute/__init__.py @@ -31,6 +31,7 @@ from nova.api.openstack.compute import limits from nova.api.openstack.compute import server_metadata from nova.api.openstack.compute import servers from nova.api.openstack.compute import versions +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -42,8 +43,8 @@ allow_instance_snapshots_opt = cfg.BoolOpt('allow_instance_snapshots', default=True, help='Permit instance snapshot operations.') -FLAGS = flags.FLAGS -FLAGS.register_opt(allow_instance_snapshots_opt) +CONF = config.CONF +CONF.register_opt(allow_instance_snapshots_opt) class APIRouter(nova.api.openstack.APIRouter): diff --git a/nova/api/openstack/compute/contrib/__init__.py b/nova/api/openstack/compute/contrib/__init__.py index d44254eb6..e6a1e9c4d 100644 --- a/nova/api/openstack/compute/contrib/__init__.py +++ b/nova/api/openstack/compute/contrib/__init__.py @@ -22,11 +22,12 @@ It can't be called 'extensions' because that causes namespacing problems. """ from nova.api.openstack import extensions +from nova import config from nova import flags from nova.openstack.common import log as logging -FLAGS = flags.FLAGS +CONF = config.CONF LOG = logging.getLogger(__name__) @@ -36,4 +37,4 @@ def standard_extensions(ext_mgr): def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, - FLAGS.osapi_compute_ext_list) + CONF.osapi_compute_ext_list) diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py index 8432f02fc..1bac0851d 100644 --- a/nova/api/openstack/compute/contrib/admin_actions.py +++ b/nova/api/openstack/compute/contrib/admin_actions.py @@ -27,11 +27,8 @@ from nova import exception from nova import flags from nova.openstack.common import log as logging - -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) - # States usable in resetState action state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR) diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py index ccc6b84a2..c05a208a3 100644 --- a/nova/api/openstack/compute/contrib/certificates.py +++ b/nova/api/openstack/compute/contrib/certificates.py @@ -24,9 +24,7 @@ from nova import flags from nova import network from nova.openstack.common import log as logging - LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'certificates') diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py index afc24b95d..77d88144a 100644 --- a/nova/api/openstack/compute/contrib/cloudpipe.py +++ b/nova/api/openstack/compute/contrib/cloudpipe.py @@ -21,6 +21,7 @@ from nova.cloudpipe import pipelib from nova import compute from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova import config from nova import db from nova import exception from nova import flags @@ -30,8 +31,7 @@ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import utils - -FLAGS = flags.FLAGS +CONF = config.CONF LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'cloudpipe') @@ -70,12 +70,12 @@ class CloudpipeController(object): # NOTE(vish): One of the drawbacks of doing this in the api is # the keys will only be on the api node that launched # the cloudpipe. - fileutils.ensure_tree(FLAGS.keys_path) + fileutils.ensure_tree(CONF.keys_path) def _get_all_cloudpipes(self, context): """Get all cloudpipes""" return [instance for instance in self.compute_api.get_all(context) - if instance['image_ref'] == str(FLAGS.vpn_image_id) + if instance['image_ref'] == str(CONF.vpn_image_id) and instance['vm_state'] != vm_states.DELETED] def _get_cloudpipe_for_project(self, context, project_id): diff --git a/nova/api/openstack/compute/contrib/config_drive.py b/nova/api/openstack/compute/contrib/config_drive.py index 779aad539..ac294f660 100644 --- a/nova/api/openstack/compute/contrib/config_drive.py +++ b/nova/api/openstack/compute/contrib/config_drive.py @@ -23,8 +23,6 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import flags - -FLAGS = flags.FLAGS authorize = extensions.soft_extension_authorizer('compute', 'config_drive') diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py index 6ca10559f..15f6456ea 100644 --- a/nova/api/openstack/compute/contrib/extended_server_attributes.py +++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py @@ -22,8 +22,6 @@ from nova import db from nova import flags from nova.openstack.common import log as logging - -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer('compute', 'extended_server_attributes') diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py index d88f4e14b..f7ccdcbff 100644 --- a/nova/api/openstack/compute/contrib/extended_status.py +++ b/nova/api/openstack/compute/contrib/extended_status.py @@ -21,8 +21,6 @@ from nova import compute from nova import flags from nova.openstack.common import log as logging - -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer('compute', 'extended_status') diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py index 67fc897fb..237872405 100644 --- a/nova/api/openstack/compute/contrib/hosts.py +++ b/nova/api/openstack/compute/contrib/hosts.py @@ -28,9 +28,7 @@ from nova import exception from nova import flags from nova.openstack.common import log as logging - LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'hosts') diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py index 4547bbd01..7c98cb8d6 100644 --- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py +++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py @@ -21,11 +21,12 @@ import datetime import webob.exc from nova.api.openstack import extensions +from nova import config from nova import db from nova import flags from nova import utils -FLAGS = flags.FLAGS +CONF = config.CONF authorize = extensions.extension_authorizer('compute', @@ -82,7 +83,7 @@ class InstanceUsageAuditLogController(object): # We do this this way to include disabled compute services, # which can have instances on them. (mdragon) services = [svc for svc in db.service_get_all(context) - if svc['topic'] == FLAGS.compute_topic] + if svc['topic'] == CONF.compute_topic] hosts = set(serv['host'] for serv in services) seen_hosts = set() done_hosts = set() diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/networks.py index 62b4a6c80..4537e1ec7 100644 --- a/nova/api/openstack/compute/contrib/networks.py +++ b/nova/api/openstack/compute/contrib/networks.py @@ -26,8 +26,6 @@ from nova import flags from nova import network from nova.openstack.common import log as logging - -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'networks') authorize_view = extensions.extension_authorizer('compute', 'networks:view') diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py index 918f17100..054eaf870 100644 --- a/nova/api/openstack/compute/contrib/rescue.py +++ b/nova/api/openstack/compute/contrib/rescue.py @@ -21,13 +21,14 @@ from nova.api.openstack import common from nova.api.openstack import extensions as exts from nova.api.openstack import wsgi from nova import compute +from nova import config from nova import exception from nova import flags from nova.openstack.common import log as logging from nova import utils -FLAGS = flags.FLAGS +CONF = config.CONF LOG = logging.getLogger(__name__) authorize = exts.extension_authorizer('compute', 'rescue') @@ -54,7 +55,7 @@ class RescueController(wsgi.Controller): if body['rescue'] and 'adminPass' in body['rescue']: password = body['rescue']['adminPass'] else: - password = utils.generate_password(FLAGS.password_length) + password = utils.generate_password(CONF.password_length) instance = self._get_instance(context, id) try: diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py index ee36ee58d..b86397694 100644 --- a/nova/api/openstack/compute/contrib/security_groups.py +++ b/nova/api/openstack/compute/contrib/security_groups.py @@ -32,9 +32,7 @@ from nova import exception from nova import flags from nova.openstack.common import log as logging - LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') softauth = extensions.soft_extension_authorizer('compute', 'security_groups') diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py index ee7924dec..f6e9a63f6 100644 --- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py @@ -26,8 +26,6 @@ from nova import exception from nova import flags from nova.openstack.common import timeutils - -FLAGS = flags.FLAGS authorize_show = extensions.extension_authorizer('compute', 'simple_tenant_usage:show') authorize_list = extensions.extension_authorizer('compute', diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py index 6eaa51079..1de6134ad 100644 --- a/nova/api/openstack/compute/contrib/volumes.py +++ b/nova/api/openstack/compute/contrib/volumes.py @@ -30,9 +30,7 @@ from nova.openstack.common import log as logging from nova import utils from nova import volume - LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'volumes') diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py index 01b728a30..c46a6b034 100644 --- a/nova/api/openstack/compute/extensions.py +++ b/nova/api/openstack/compute/extensions.py @@ -16,19 +16,20 @@ # under the License. from nova.api.openstack import extensions as base_extensions +from nova import config from nova import flags from nova.openstack.common import log as logging from nova.openstack.common.plugin import pluginmanager LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS +CONF = config.CONF class ExtensionManager(base_extensions.ExtensionManager): def __init__(self): LOG.audit(_('Initializing extension manager.')) - self.cls_list = FLAGS.osapi_compute_extension + self.cls_list = CONF.osapi_compute_extension self.PluginManager = pluginmanager.PluginManager('nova', 'compute-extensions') self.PluginManager.load_plugins() diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py index 4273e40cd..3bc817076 100644 --- a/nova/api/openstack/compute/image_metadata.py +++ b/nova/api/openstack/compute/image_metadata.py @@ -24,9 +24,6 @@ from nova import flags from nova.image import glance -FLAGS = flags.FLAGS - - class Controller(object): """The image metadata API controller for the OpenStack API""" diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py index 1b20531de..0c280618e 100644 --- a/nova/api/openstack/compute/images.py +++ b/nova/api/openstack/compute/images.py @@ -27,7 +27,6 @@ import nova.utils LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS SUPPORTED_FILTERS = { 'name': 'name', diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py index 6ad888fd7..ec9759759 100644 --- a/nova/api/openstack/compute/ips.py +++ b/nova/api/openstack/compute/ips.py @@ -25,9 +25,7 @@ from nova.api.openstack import xmlutil from nova import flags from nova.openstack.common import log as logging - LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS def make_network(elem): diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index c4293255d..d8d2f1c28 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -30,6 +30,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova.compute import instance_types +from nova import config from nova import exception from nova import flags from nova.openstack.common import importutils @@ -41,7 +42,7 @@ from nova import utils LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS +CONF = config.CONF def make_fault(elem): @@ -603,7 +604,7 @@ class Controller(wsgi.Controller): self.quantum_attempted = True from nova.network.quantumv2 import api as quantum_api self.have_quantum = issubclass( - importutils.import_class(FLAGS.network_api_class), + importutils.import_class(CONF.network_api_class), quantum_api.API) except ImportError: self.have_quantum = False @@ -921,7 +922,7 @@ class Controller(wsgi.Controller): if '_is_precooked' in server['server'].keys(): del server['server']['_is_precooked'] else: - if FLAGS.enable_instance_password: + if CONF.enable_instance_password: server['server']['adminPass'] = password robj = wsgi.ResponseObject(server) @@ -930,7 +931,7 @@ class Controller(wsgi.Controller): def _delete(self, context, req, instance_uuid): instance = self._get_server(context, req, instance_uuid) - if FLAGS.reclaim_instance_interval: + if CONF.reclaim_instance_interval: self.compute_api.soft_delete(context, instance) else: self.compute_api.delete(context, instance) @@ -1185,7 +1186,7 @@ class Controller(wsgi.Controller): try: password = body['adminPass'] except (KeyError, TypeError): - password = utils.generate_password(FLAGS.password_length) + password = utils.generate_password(CONF.password_length) context = req.environ['nova.context'] instance = self._get_server(context, req, id) @@ -1253,7 +1254,7 @@ class Controller(wsgi.Controller): # Add on the adminPass attribute since the view doesn't do it # unless instance passwords are disabled - if FLAGS.enable_instance_password: + if CONF.enable_instance_password: view['server']['adminPass'] = password robj = wsgi.ResponseObject(view) @@ -1327,7 +1328,7 @@ class Controller(wsgi.Controller): password = server['adminPass'] self._validate_admin_password(password) except KeyError: - password = utils.generate_password(FLAGS.password_length) + password = utils.generate_password(CONF.password_length) except ValueError: raise exc.HTTPBadRequest(explanation=_("Invalid adminPass")) diff --git a/nova/api/openstack/compute/views/addresses.py b/nova/api/openstack/compute/views/addresses.py index 41d1d0730..ec5fda64a 100644 --- a/nova/api/openstack/compute/views/addresses.py +++ b/nova/api/openstack/compute/views/addresses.py @@ -21,8 +21,6 @@ from nova.api.openstack import common from nova import flags from nova.openstack.common import log as logging - -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py index c0ea71385..d1d7d008f 100644 --- a/nova/api/openstack/compute/views/images.py +++ b/nova/api/openstack/compute/views/images.py @@ -18,11 +18,11 @@ import os.path from nova.api.openstack import common +from nova import config from nova import flags from nova import utils - -FLAGS = flags.FLAGS +CONF = config.CONF class ViewBuilder(common.ViewBuilder): @@ -123,7 +123,7 @@ class ViewBuilder(common.ViewBuilder): """Create an alternate link for a specific image id.""" glance_url = utils.generate_glance_url() glance_url = self._update_link_prefix(glance_url, - FLAGS.osapi_glance_link_prefix) + CONF.osapi_glance_link_prefix) return os.path.join(glance_url, request.environ["nova.context"].project_id, self._collection_name, diff --git a/nova/api/openstack/compute/views/versions.py b/nova/api/openstack/compute/views/versions.py index 454761b32..826c8b4a5 100644 --- a/nova/api/openstack/compute/views/versions.py +++ b/nova/api/openstack/compute/views/versions.py @@ -19,10 +19,11 @@ import copy import os from nova.api.openstack import common +from nova import config from nova import flags -FLAGS = flags.FLAGS +CONF = config.CONF def get_view_builder(req): @@ -93,7 +94,7 @@ class ViewBuilder(common.ViewBuilder): def generate_href(self, path=None): """Create an url that refers to a specific version_number.""" prefix = self._update_link_prefix(self.base_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_compute_link_prefix) version_number = 'v2' if path: path = path.strip('/') diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 01c2516c8..298e98603 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -30,9 +30,7 @@ from nova.openstack.common import importutils from nova.openstack.common import log as logging import nova.policy - LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS class ExtensionDescriptor(object): diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py index 6c991408d..1d22e74fc 100644 --- a/nova/api/sizelimit.py +++ b/nova/api/sizelimit.py @@ -21,6 +21,7 @@ Request Body limiting middleware. import webob.dec import webob.exc +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -33,8 +34,8 @@ max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', help='the maximum body size ' 'per each osapi request(bytes)') -FLAGS = flags.FLAGS -FLAGS.register_opt(max_request_body_size_opt) +CONF = config.CONF +CONF.register_opt(max_request_body_size_opt) LOG = logging.getLogger(__name__) @@ -46,8 +47,8 @@ class RequestBodySizeLimiter(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - if (req.content_length > FLAGS.osapi_max_request_body_size - or len(req.body) > FLAGS.osapi_max_request_body_size): + if (req.content_length > CONF.osapi_max_request_body_size + or len(req.body) > CONF.osapi_max_request_body_size): msg = _("Request is too large.") raise webob.exc.HTTPBadRequest(explanation=msg) else: diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 9d83862a4..63be694b8 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -28,6 +28,7 @@ import zipfile from nova import compute from nova.compute import instance_types +from nova import config from nova import crypto from nova import db from nova import exception @@ -53,7 +54,9 @@ cloudpipe_opts = [ help=_('Netmask to push into openvpn config')), ] -flags.DECLARE('cnt_vpn_clients', 'nova.network.manager') +CONF = config.CONF +CONF.import_opt('cnt_vpn_clients', 'nova.network.manager') + FLAGS = flags.FLAGS FLAGS.register_opts(cloudpipe_opts) diff --git a/nova/compute/api.py b/nova/compute/api.py index 28c7068ba..3d9108c75 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -35,6 +35,7 @@ from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova import config from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import crypto from nova.db import base @@ -59,7 +60,8 @@ from nova import volume LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS -flags.DECLARE('consoleauth_topic', 'nova.consoleauth') +CONF = config.CONF +CONF.import_opt('consoleauth_topic', 'nova.consoleauth') MAX_USERDATA_SIZE = 65535 QUOTAS = quota.QUOTAS @@ -500,7 +502,8 @@ class API(base.Base): LOG.debug(_("Going to run %s instances...") % num_instances) filter_properties = dict(scheduler_hints=scheduler_hints) - if context.is_admin and forced_host: + if forced_host: + check_policy(context, 'create:forced_host', {}) filter_properties['force_hosts'] = [forced_host] for i in xrange(num_instances): diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index d89e6409a..fd796bd91 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -130,7 +130,7 @@ def get_default_instance_type(): return get_instance_type_by_name(name) -def get_instance_type(instance_type_id, ctxt=None): +def get_instance_type(instance_type_id, ctxt=None, inactive=False): """Retrieves single instance type by id.""" if instance_type_id is None: return get_default_instance_type() @@ -138,6 +138,9 @@ def get_instance_type(instance_type_id, ctxt=None): if ctxt is None: ctxt = context.get_admin_context() + if inactive: + ctxt = ctxt.elevated(read_deleted="yes") + return db.instance_type_get(ctxt, instance_type_id) diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 17416f991..4a284be64 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -39,8 +39,13 @@ def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None): """Adds the specified fault to the database.""" code = 500 + message = fault.__class__.__name__ + if hasattr(fault, "kwargs"): code = fault.kwargs.get('code', 500) + # get the message from the exception that was thrown + # if that does not exist, use the name of the exception class itself + message = fault.kwargs.get('value', message) details = unicode(fault) if exc_info and code == 500: @@ -50,7 +55,7 @@ def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None): values = { 'instance_uuid': instance_uuid, 'code': code, - 'message': fault.__class__.__name__, + 'message': unicode(message), 'details': unicode(details), } db.instance_fault_create(context, values) diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index 219119724..e654780a3 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -29,17 +29,9 @@ from nova.virt.vmwareapi import driver as vmwareapi_conn LOG = logging.getLogger(__name__) -vmrc_manager_opts = [ - cfg.StrOpt('console_public_hostname', - default='', - help='Publicly visible name for this console host'), - cfg.StrOpt('console_driver', - default='nova.console.vmrc.VMRCConsole', - help='Driver to use for the console'), - ] - FLAGS = flags.FLAGS -FLAGS.register_opts(vmrc_manager_opts) +flags.DECLARE('console_driver', 'nova.console.manager') +flags.DECLARE('console_public_hostname', 'nova.console.manager') class ConsoleVMRCManager(manager.Manager): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ea7e665cf..865bb05f1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -23,7 +23,6 @@ import collections import copy import datetime import functools -import warnings from sqlalchemy import and_ from sqlalchemy.exc import IntegrityError @@ -773,9 +772,9 @@ def floating_ip_create(context, values, session=None): # check uniqueness for not deleted addresses if not floating_ip_ref.deleted: try: - floating_ip = floating_ip_get_by_address(context, - floating_ip_ref.address, - session) + floating_ip = _floating_ip_get_by_address(context, + floating_ip_ref.address, + session) except exception.FloatingIpNotFoundForAddress: pass else: @@ -802,9 +801,9 @@ def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): - floating_ip_ref = floating_ip_get_by_address(context, - floating_address, - session=session) + floating_ip_ref = _floating_ip_get_by_address(context, + floating_address, + session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) @@ -815,25 +814,18 @@ def floating_ip_fixed_ip_associate(context, floating_address, @require_context def floating_ip_deallocate(context, address): - session = get_session() - with session.begin(): - floating_ip_ref = floating_ip_get_by_address(context, - address, - session=session) - floating_ip_ref['project_id'] = None - floating_ip_ref['host'] = None - floating_ip_ref['auto_assigned'] = False - floating_ip_ref.save(session=session) + model_query(context, models.FloatingIp).\ + filter_by(address=address).\ + update({'project_id': None, + 'host': None, + 'auto_assigned': False}) @require_context def floating_ip_destroy(context, address): - session = get_session() - with session.begin(): - floating_ip_ref = floating_ip_get_by_address(context, - address, - session=session) - floating_ip_ref.delete(session=session) + model_query(context, models.FloatingIp).\ + filter_by(address=address).\ + delete() @require_context @@ -863,13 +855,9 @@ def floating_ip_disassociate(context, address): @require_context def floating_ip_set_auto_assigned(context, address): - session = get_session() - with session.begin(): - floating_ip_ref = floating_ip_get_by_address(context, - address, - session=session) - floating_ip_ref.auto_assigned = True - floating_ip_ref.save(session=session) + model_query(context, models.FloatingIp).\ + filter_by(address=address).\ + update({'auto_assigned': True}) def _floating_ip_get_all(context, session=None): @@ -906,7 +894,12 @@ def floating_ip_get_all_by_project(context, project_id): @require_context -def floating_ip_get_by_address(context, address, session=None): +def floating_ip_get_by_address(context, address): + return _floating_ip_get_by_address(context, address) + + +@require_context +def _floating_ip_get_by_address(context, address, session=None): result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() @@ -923,16 +916,14 @@ def floating_ip_get_by_address(context, address, session=None): @require_context -def floating_ip_get_by_fixed_address(context, fixed_address, session=None): - if not session: - session = get_session() - - fixed_ip = fixed_ip_get_by_address(context, fixed_address, session) - fixed_ip_id = fixed_ip['id'] - - return model_query(context, models.FloatingIp, session=session).\ - filter_by(fixed_ip_id=fixed_ip_id).\ - all() +def floating_ip_get_by_fixed_address(context, fixed_address): + subq = model_query(context, models.FixedIp.id).\ + filter_by(address=fixed_address).\ + limit(1).\ + subquery() + return model_query(context, models.FloatingIp).\ + filter_by(fixed_ip_id=subq.as_scalar()).\ + all() # NOTE(tr3buchet) please don't invent an exception here, empty list is fine @@ -951,7 +942,9 @@ def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None): def floating_ip_update(context, address, values): session = get_session() with session.begin(): - floating_ip_ref = floating_ip_get_by_address(context, address, session) + floating_ip_ref = _floating_ip_get_by_address(context, + address, + session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) @@ -1604,7 +1597,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, if marker is not None: try: marker = instance_get_by_uuid(context, marker, session=session) - except exception.InstanceNotFound as e: + except exception.InstanceNotFound: raise exception.MarkerNotFound(marker) query_prefix = paginate_query(query_prefix, models.Instance, limit, [sort_key, 'created_at', 'id'], @@ -3240,7 +3233,7 @@ def volume_metadata_update(context, volume_id, metadata, delete): try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) - except exception.VolumeMetadataNotFound, e: + except exception.VolumeMetadataNotFound: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) @@ -4186,7 +4179,7 @@ def instance_metadata_update(context, instance_uuid, metadata, delete, try: meta_ref = instance_metadata_get_item(context, instance_uuid, meta_key, session) - except exception.InstanceMetadataNotFound, e: + except exception.InstanceMetadataNotFound: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_uuid": instance_uuid}) @@ -4268,7 +4261,7 @@ def instance_system_metadata_update(context, instance_uuid, metadata, delete, try: meta_ref = _instance_system_metadata_get_item( context, instance_uuid, meta_key, session) - except exception.InstanceSystemMetadataNotFound, e: + except exception.InstanceSystemMetadataNotFound: meta_ref = models.InstanceSystemMetadata() item.update({"key": meta_key, "instance_uuid": instance_uuid}) @@ -4455,7 +4448,7 @@ def instance_type_extra_specs_update_or_create(context, flavor_id, try: spec_ref = instance_type_extra_specs_get_item( context, flavor_id, key, session) - except exception.InstanceTypeExtraSpecsNotFound, e: + except exception.InstanceTypeExtraSpecsNotFound: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type["id"], @@ -4634,7 +4627,7 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) - except exception.VolumeTypeExtraSpecsNotFound, e: + except exception.VolumeTypeExtraSpecsNotFound: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, @@ -4994,12 +4987,12 @@ def aggregate_delete(context, aggregate_id): raise exception.AggregateNotFound(aggregate_id=aggregate_id) #Delete Metadata - rows = model_query(context, - models.AggregateMetadata).\ - filter_by(aggregate_id=aggregate_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + model_query(context, + models.AggregateMetadata).\ + filter_by(aggregate_id=aggregate_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_admin_context diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index ccd98a377..184d279ae 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -16,7 +16,149 @@ # License for the specific language governing permissions and limitations # under the License. -"""Session Handling for SQLAlchemy backend.""" +"""Session Handling for SQLAlchemy backend. + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with AUTOCOMMIT=1. + model_query() will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + Note: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and reservation_rollback(). + + Examples: + + def get_foo(context, foo): + return model_query(context, models.Foo).\ + filter_by(foo=foo).\ + first() + + def update_foo(context, id, newfoo): + model_query(context, models.Foo).\ + filter_by(id=id).\ + update({'foo': newfoo}) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keeping all the reads and writes within + the context managed by a single session. In this way, the session's __exit__ + handler will take care of calling flush() and commit() for you. + If using this approach, you should not explicitly call flush() or commit(). + Any error within the context of the session will cause the session to emit + a ROLLBACK. If the connection is dropped before this is possible, the + database will implicitly rollback the transaction. + + Note: statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call model.save() + + def create_many_foo(context, foos): + session = get_session() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = get_session() + with session.begin(): + foo_ref = model_query(context, models.Foo, session).\ + filter_by(id=foo_id).\ + first() + model_query(context, models.Bar, session).\ + filter_by(id=foo_ref['bar_id']).\ + update({'bar': newbar}) + + Note: update_bar is a trivially simple example of using "with session.begin". + Whereas create_many_foo is a good example of when a transaction is needed, + it is always best to use as few queries as possible. The two queries in + update_bar can be better expressed using a single query which avoids + the need for an explicit transaction. It can be expressed like so: + + def update_bar(context, foo_id, newbar): + subq = model_query(context, models.Foo.id).\ + filter_by(id=foo_id).\ + limit(1).\ + subquery() + model_query(context, models.Bar).\ + filter_by(id=subq.as_scalar()).\ + update({'bar': newbar}) + + For reference, this emits approximagely the following SQL statement: + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call session.begin() on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + def myfunc(foo): + session = get_session() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = get_session() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your "with session.begin()" block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid "with_lockmode('UPDATE')" when possible. + + In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + +""" import re import time diff --git a/nova/flags.py b/nova/flags.py index a27674472..e3a33de12 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -36,17 +36,6 @@ from nova.openstack.common import cfg FLAGS = cfg.CONF -class UnrecognizedFlag(Exception): - pass - - -def DECLARE(name, module_string, flag_values=FLAGS): - if module_string not in sys.modules: - __import__(module_string, globals(), locals()) - if name not in flag_values: - raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) - - def _get_my_ip(): """ Returns the actual ip of the local machine. diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot index c1646f64d..5301ee2c5 100644 --- a/nova/locale/nova.pot +++ b/nova/locale/nova.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: nova 2013.1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2012-11-05 00:02+0000\n" +"POT-Creation-Date: 2012-11-07 00:02+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" "Language-Team: LANGUAGE <LL@li.org>\n" @@ -154,8 +154,8 @@ msgstr "" msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: nova/exception.py:235 nova/api/ec2/cloud.py:389 nova/api/ec2/cloud.py:414 -#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2234 +#: nova/exception.py:235 nova/api/ec2/cloud.py:390 nova/api/ec2/cloud.py:415 +#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2238 msgid "Keypair data is invalid" msgstr "" @@ -179,8 +179,8 @@ msgstr "" msgid "Invalid volume" msgstr "" -#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1283 -#: nova/api/openstack/compute/contrib/admin_actions.py:242 +#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1285 +#: nova/api/openstack/compute/contrib/admin_actions.py:239 msgid "Invalid metadata" msgstr "" @@ -193,7 +193,7 @@ msgstr "" msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "" -#: nova/exception.py:271 nova/api/ec2/cloud.py:571 +#: nova/exception.py:271 nova/api/ec2/cloud.py:572 #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "" @@ -1416,17 +1416,17 @@ msgstr "" msgid "Invalid server_string: %s" msgstr "" -#: nova/utils.py:907 +#: nova/utils.py:895 #, python-format msgid "timefunc: '%(name)s' took %(total_time).2f secs" msgstr "" -#: nova/utils.py:985 +#: nova/utils.py:973 #, python-format msgid "Reloading cached file %s" msgstr "" -#: nova/utils.py:1103 nova/virt/configdrive.py:151 +#: nova/utils.py:1091 nova/virt/configdrive.py:151 #, python-format msgid "Could not remove tmpdir: %s" msgstr "" @@ -1453,15 +1453,15 @@ msgstr "" msgid "Loading app %(name)s from %(path)s" msgstr "" -#: nova/api/auth.py:108 +#: nova/api/auth.py:109 msgid "Invalid service catalog json." msgstr "" -#: nova/api/auth.py:131 +#: nova/api/auth.py:132 msgid "Sourcing roles from deprecated X-Role HTTP header" msgstr "" -#: nova/api/sizelimit.py:51 +#: nova/api/sizelimit.py:52 msgid "Request is too large." msgstr "" @@ -1592,262 +1592,262 @@ msgstr "" msgid "Environment: %s" msgstr "" -#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:79 +#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:81 msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/api/ec2/apirequest.py:64 +#: nova/api/ec2/apirequest.py:63 #, python-format msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/api/ec2/cloud.py:337 +#: nova/api/ec2/cloud.py:338 #, python-format msgid "Create snapshot of volume %s" msgstr "" -#: nova/api/ec2/cloud.py:363 +#: nova/api/ec2/cloud.py:364 #, python-format msgid "Could not find key pair(s): %s" msgstr "" -#: nova/api/ec2/cloud.py:379 +#: nova/api/ec2/cloud.py:380 #, python-format msgid "Create key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:386 nova/api/ec2/cloud.py:411 +#: nova/api/ec2/cloud.py:387 nova/api/ec2/cloud.py:412 #: nova/api/openstack/compute/contrib/keypairs.py:93 msgid "Quota exceeded, too many key pairs." msgstr "" -#: nova/api/ec2/cloud.py:392 nova/api/ec2/cloud.py:417 +#: nova/api/ec2/cloud.py:393 nova/api/ec2/cloud.py:418 #: nova/api/openstack/compute/contrib/keypairs.py:101 #, python-format msgid "Key pair '%s' already exists." msgstr "" -#: nova/api/ec2/cloud.py:401 +#: nova/api/ec2/cloud.py:402 #, python-format msgid "Import key %s" msgstr "" -#: nova/api/ec2/cloud.py:424 +#: nova/api/ec2/cloud.py:425 #, python-format msgid "Delete key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:558 nova/api/ec2/cloud.py:679 +#: nova/api/ec2/cloud.py:559 nova/api/ec2/cloud.py:680 msgid "Not enough parameters, need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:563 +#: nova/api/ec2/cloud.py:564 #, python-format msgid "%s Not enough parameters to build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:633 +#: nova/api/ec2/cloud.py:602 nova/api/ec2/cloud.py:634 msgid "No rule for the specified parameters." msgstr "" -#: nova/api/ec2/cloud.py:624 +#: nova/api/ec2/cloud.py:625 #, python-format msgid "%s - This rule already exists in group" msgstr "" -#: nova/api/ec2/cloud.py:690 +#: nova/api/ec2/cloud.py:691 #, python-format msgid "Get console output for instance %s" msgstr "" -#: nova/api/ec2/cloud.py:766 +#: nova/api/ec2/cloud.py:767 #, python-format msgid "Create volume from snapshot %s" msgstr "" -#: nova/api/ec2/cloud.py:770 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:771 nova/api/openstack/compute/contrib/volumes.py:241 #, python-format msgid "Create volume of %s GB" msgstr "" -#: nova/api/ec2/cloud.py:798 +#: nova/api/ec2/cloud.py:799 msgid "Delete Failed" msgstr "" -#: nova/api/ec2/cloud.py:811 +#: nova/api/ec2/cloud.py:812 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/api/ec2/cloud.py:819 +#: nova/api/ec2/cloud.py:820 msgid "Attach Failed." msgstr "" -#: nova/api/ec2/cloud.py:832 nova/api/openstack/compute/contrib/volumes.py:422 +#: nova/api/ec2/cloud.py:833 nova/api/openstack/compute/contrib/volumes.py:420 #, python-format msgid "Detach volume %s" msgstr "" -#: nova/api/ec2/cloud.py:838 +#: nova/api/ec2/cloud.py:839 msgid "Detach Volume Failed." msgstr "" -#: nova/api/ec2/cloud.py:864 nova/api/ec2/cloud.py:921 -#: nova/api/ec2/cloud.py:1458 nova/api/ec2/cloud.py:1473 +#: nova/api/ec2/cloud.py:865 nova/api/ec2/cloud.py:922 +#: nova/api/ec2/cloud.py:1459 nova/api/ec2/cloud.py:1474 #, python-format msgid "attribute not supported: %s" msgstr "" -#: nova/api/ec2/cloud.py:987 +#: nova/api/ec2/cloud.py:988 #, python-format msgid "vol = %s\n" msgstr "" -#: nova/api/ec2/cloud.py:1138 +#: nova/api/ec2/cloud.py:1139 msgid "Allocate address" msgstr "" -#: nova/api/ec2/cloud.py:1142 +#: nova/api/ec2/cloud.py:1143 msgid "No more floating IPs available" msgstr "" -#: nova/api/ec2/cloud.py:1146 +#: nova/api/ec2/cloud.py:1147 #, python-format msgid "Release address %s" msgstr "" -#: nova/api/ec2/cloud.py:1151 +#: nova/api/ec2/cloud.py:1152 msgid "Unable to release IP Address." msgstr "" -#: nova/api/ec2/cloud.py:1154 +#: nova/api/ec2/cloud.py:1155 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1162 +#: nova/api/ec2/cloud.py:1163 msgid "Unable to associate IP Address, no fixed_ips." msgstr "" -#: nova/api/ec2/cloud.py:1170 +#: nova/api/ec2/cloud.py:1171 #: nova/api/openstack/compute/contrib/floating_ips.py:257 #, python-format msgid "multiple fixed_ips exist, using the first: %s" msgstr "" -#: nova/api/ec2/cloud.py:1179 +#: nova/api/ec2/cloud.py:1180 msgid "Floating ip is already associated." msgstr "" -#: nova/api/ec2/cloud.py:1182 +#: nova/api/ec2/cloud.py:1183 msgid "l3driver call to add floating ip failed." msgstr "" -#: nova/api/ec2/cloud.py:1185 +#: nova/api/ec2/cloud.py:1186 msgid "Error, unable to associate floating ip." msgstr "" -#: nova/api/ec2/cloud.py:1193 +#: nova/api/ec2/cloud.py:1194 #, python-format msgid "Disassociate address %s" msgstr "" -#: nova/api/ec2/cloud.py:1198 +#: nova/api/ec2/cloud.py:1199 msgid "Floating ip is not associated." msgstr "" -#: nova/api/ec2/cloud.py:1201 +#: nova/api/ec2/cloud.py:1202 #: nova/api/openstack/compute/contrib/floating_ips.py:100 msgid "Cannot disassociate auto assigned floating ip" msgstr "" -#: nova/api/ec2/cloud.py:1228 +#: nova/api/ec2/cloud.py:1229 msgid "Image must be available" msgstr "" -#: nova/api/ec2/cloud.py:1260 +#: nova/api/ec2/cloud.py:1261 msgid "Going to start terminating instances" msgstr "" -#: nova/api/ec2/cloud.py:1270 +#: nova/api/ec2/cloud.py:1271 #, python-format msgid "Reboot instance %r" msgstr "" -#: nova/api/ec2/cloud.py:1279 +#: nova/api/ec2/cloud.py:1280 msgid "Going to stop instances" msgstr "" -#: nova/api/ec2/cloud.py:1288 +#: nova/api/ec2/cloud.py:1289 msgid "Going to start instances" msgstr "" -#: nova/api/ec2/cloud.py:1379 +#: nova/api/ec2/cloud.py:1380 #, python-format msgid "De-registering image %s" msgstr "" -#: nova/api/ec2/cloud.py:1395 +#: nova/api/ec2/cloud.py:1396 msgid "imageLocation is required" msgstr "" -#: nova/api/ec2/cloud.py:1414 +#: nova/api/ec2/cloud.py:1415 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1476 +#: nova/api/ec2/cloud.py:1477 msgid "user or group not specified" msgstr "" -#: nova/api/ec2/cloud.py:1478 +#: nova/api/ec2/cloud.py:1479 msgid "only group \"all\" is supported" msgstr "" -#: nova/api/ec2/cloud.py:1480 +#: nova/api/ec2/cloud.py:1481 msgid "operation_type must be add or remove" msgstr "" -#: nova/api/ec2/cloud.py:1482 +#: nova/api/ec2/cloud.py:1483 #, python-format msgid "Updating image %s publicity" msgstr "" -#: nova/api/ec2/cloud.py:1495 +#: nova/api/ec2/cloud.py:1496 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "" -#: nova/api/ec2/cloud.py:1524 +#: nova/api/ec2/cloud.py:1525 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" msgstr "" -#: nova/api/ec2/cloud.py:1554 +#: nova/api/ec2/cloud.py:1555 #, python-format msgid "Couldn't stop instance with in %d sec" msgstr "" -#: nova/api/ec2/cloud.py:1572 +#: nova/api/ec2/cloud.py:1573 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "" -#: nova/api/ec2/cloud.py:1605 +#: nova/api/ec2/cloud.py:1606 msgid "Invalid CIDR" msgstr "" -#: nova/api/ec2/ec2utils.py:189 +#: nova/api/ec2/ec2utils.py:187 msgid "Request must include either Timestamp or Expires, but cannot contain both" msgstr "" -#: nova/api/ec2/ec2utils.py:209 +#: nova/api/ec2/ec2utils.py:207 msgid "Timestamp is invalid." msgstr "" -#: nova/api/metadata/handler.py:77 nova/api/metadata/handler.py:84 +#: nova/api/metadata/handler.py:79 nova/api/metadata/handler.py:86 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "" @@ -1883,129 +1883,129 @@ msgstr "" msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: nova/api/openstack/common.py:99 +#: nova/api/openstack/common.py:100 #, python-format msgid "" "status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. " "Bad upgrade or db corrupted?" msgstr "" -#: nova/api/openstack/common.py:138 nova/api/openstack/common.py:172 +#: nova/api/openstack/common.py:139 nova/api/openstack/common.py:173 msgid "limit param must be an integer" msgstr "" -#: nova/api/openstack/common.py:141 nova/api/openstack/common.py:176 +#: nova/api/openstack/common.py:142 nova/api/openstack/common.py:177 msgid "limit param must be positive" msgstr "" -#: nova/api/openstack/common.py:166 +#: nova/api/openstack/common.py:167 msgid "offset param must be an integer" msgstr "" -#: nova/api/openstack/common.py:180 +#: nova/api/openstack/common.py:181 msgid "offset param must be positive" msgstr "" -#: nova/api/openstack/common.py:215 nova/api/openstack/compute/servers.py:536 +#: nova/api/openstack/common.py:216 nova/api/openstack/compute/servers.py:538 #, python-format msgid "marker [%s] not found" msgstr "" -#: nova/api/openstack/common.py:255 +#: nova/api/openstack/common.py:256 #, python-format msgid "href %s does not contain version" msgstr "" -#: nova/api/openstack/common.py:270 +#: nova/api/openstack/common.py:271 msgid "Image metadata limit exceeded" msgstr "" -#: nova/api/openstack/common.py:278 +#: nova/api/openstack/common.py:279 msgid "Image metadata key cannot be blank" msgstr "" -#: nova/api/openstack/common.py:281 +#: nova/api/openstack/common.py:282 msgid "Image metadata key too long" msgstr "" -#: nova/api/openstack/common.py:284 +#: nova/api/openstack/common.py:285 msgid "Invalid image metadata" msgstr "" -#: nova/api/openstack/common.py:335 +#: nova/api/openstack/common.py:336 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "" -#: nova/api/openstack/common.py:338 +#: nova/api/openstack/common.py:339 #, python-format msgid "Instance is in an invalid state for '%(action)s'" msgstr "" -#: nova/api/openstack/common.py:418 +#: nova/api/openstack/common.py:419 msgid "Rejecting snapshot request, snapshots currently disabled" msgstr "" -#: nova/api/openstack/common.py:420 +#: nova/api/openstack/common.py:421 msgid "Instance snapshots are not permitted at this time." msgstr "" -#: nova/api/openstack/extensions.py:198 +#: nova/api/openstack/extensions.py:196 #, python-format msgid "Loaded extension: %s" msgstr "" -#: nova/api/openstack/extensions.py:237 +#: nova/api/openstack/extensions.py:235 #, python-format msgid "Ext name: %s" msgstr "" -#: nova/api/openstack/extensions.py:238 +#: nova/api/openstack/extensions.py:236 #, python-format msgid "Ext alias: %s" msgstr "" -#: nova/api/openstack/extensions.py:239 +#: nova/api/openstack/extensions.py:237 #, python-format msgid "Ext description: %s" msgstr "" -#: nova/api/openstack/extensions.py:241 +#: nova/api/openstack/extensions.py:239 #, python-format msgid "Ext namespace: %s" msgstr "" -#: nova/api/openstack/extensions.py:242 +#: nova/api/openstack/extensions.py:240 #, python-format msgid "Ext updated: %s" msgstr "" -#: nova/api/openstack/extensions.py:244 +#: nova/api/openstack/extensions.py:242 #, python-format msgid "Exception loading extension: %s" msgstr "" -#: nova/api/openstack/extensions.py:258 +#: nova/api/openstack/extensions.py:256 #, python-format msgid "Loading extension %s" msgstr "" -#: nova/api/openstack/extensions.py:267 +#: nova/api/openstack/extensions.py:265 #, python-format msgid "Calling extension factory %s" msgstr "" -#: nova/api/openstack/extensions.py:279 +#: nova/api/openstack/extensions.py:277 #, python-format msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: nova/api/openstack/extensions.py:360 +#: nova/api/openstack/extensions.py:358 #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: nova/api/openstack/extensions.py:384 +#: nova/api/openstack/extensions.py:382 #, python-format msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" @@ -2015,7 +2015,7 @@ msgid "cannot understand JSON" msgstr "" #: nova/api/openstack/wsgi.py:223 -#: nova/api/openstack/compute/contrib/hosts.py:85 +#: nova/api/openstack/compute/contrib/hosts.py:83 msgid "cannot understand XML" msgstr "" @@ -2064,7 +2064,7 @@ msgstr "" #: nova/api/openstack/compute/server_metadata.py:76 #: nova/api/openstack/compute/server_metadata.py:101 #: nova/api/openstack/compute/server_metadata.py:126 -#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#: nova/api/openstack/compute/contrib/admin_actions.py:212 msgid "Malformed request body" msgstr "" @@ -2098,7 +2098,7 @@ msgstr "" msgid "subclasses must implement construct()!" msgstr "" -#: nova/api/openstack/compute/extensions.py:30 +#: nova/api/openstack/compute/extensions.py:31 msgid "Initializing extension manager." msgstr "" @@ -2117,37 +2117,37 @@ msgstr "" msgid "Invalid minDisk filter [%s]" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:40 -#: nova/api/openstack/compute/images.py:146 -#: nova/api/openstack/compute/images.py:161 +#: nova/api/openstack/compute/image_metadata.py:37 +#: nova/api/openstack/compute/images.py:145 +#: nova/api/openstack/compute/images.py:160 msgid "Image not found." msgstr "" -#: nova/api/openstack/compute/image_metadata.py:80 +#: nova/api/openstack/compute/image_metadata.py:77 msgid "Incorrect request body format" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:84 +#: nova/api/openstack/compute/image_metadata.py:81 #: nova/api/openstack/compute/server_metadata.py:80 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:79 msgid "Request body and URI mismatch" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:87 +#: nova/api/openstack/compute/image_metadata.py:84 #: nova/api/openstack/compute/server_metadata.py:84 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:82 msgid "Request body contains too many items" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:113 +#: nova/api/openstack/compute/image_metadata.py:110 msgid "Invalid metadata key" msgstr "" -#: nova/api/openstack/compute/ips.py:74 +#: nova/api/openstack/compute/ips.py:72 msgid "Instance does not exist" msgstr "" -#: nova/api/openstack/compute/ips.py:97 +#: nova/api/openstack/compute/ips.py:95 msgid "Instance is not a member of specified network" msgstr "" @@ -2173,318 +2173,318 @@ msgstr "" msgid "Metadata item was not found" msgstr "" -#: nova/api/openstack/compute/servers.py:445 -#: nova/api/openstack/compute/servers.py:457 -#: nova/api/openstack/compute/servers.py:552 -#: nova/api/openstack/compute/servers.py:720 -#: nova/api/openstack/compute/servers.py:981 -#: nova/api/openstack/compute/servers.py:1084 -#: nova/api/openstack/compute/servers.py:1234 +#: nova/api/openstack/compute/servers.py:447 +#: nova/api/openstack/compute/servers.py:459 +#: nova/api/openstack/compute/servers.py:554 +#: nova/api/openstack/compute/servers.py:722 +#: nova/api/openstack/compute/servers.py:983 +#: nova/api/openstack/compute/servers.py:1086 +#: nova/api/openstack/compute/servers.py:1236 msgid "Instance could not be found" msgstr "" -#: nova/api/openstack/compute/servers.py:496 +#: nova/api/openstack/compute/servers.py:498 msgid "Invalid changes-since value" msgstr "" -#: nova/api/openstack/compute/servers.py:515 +#: nova/api/openstack/compute/servers.py:517 msgid "Only administrators may list deleted instances" msgstr "" -#: nova/api/openstack/compute/servers.py:559 +#: nova/api/openstack/compute/servers.py:561 msgid "Server name is not a string or unicode" msgstr "" -#: nova/api/openstack/compute/servers.py:563 +#: nova/api/openstack/compute/servers.py:565 msgid "Server name is an empty string" msgstr "" -#: nova/api/openstack/compute/servers.py:567 +#: nova/api/openstack/compute/servers.py:569 msgid "Server name must be less than 256 characters." msgstr "" -#: nova/api/openstack/compute/servers.py:584 +#: nova/api/openstack/compute/servers.py:586 #, python-format msgid "Bad personality format: missing %s" msgstr "" -#: nova/api/openstack/compute/servers.py:587 +#: nova/api/openstack/compute/servers.py:589 msgid "Bad personality format" msgstr "" -#: nova/api/openstack/compute/servers.py:591 +#: nova/api/openstack/compute/servers.py:593 #, python-format msgid "Personality content for %s cannot be decoded" msgstr "" -#: nova/api/openstack/compute/servers.py:622 +#: nova/api/openstack/compute/servers.py:624 msgid "Unknown argment : port" msgstr "" -#: nova/api/openstack/compute/servers.py:625 +#: nova/api/openstack/compute/servers.py:627 #, python-format msgid "Bad port format: port uuid is not in proper format (%s)" msgstr "" -#: nova/api/openstack/compute/servers.py:635 +#: nova/api/openstack/compute/servers.py:637 #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" -#: nova/api/openstack/compute/servers.py:645 +#: nova/api/openstack/compute/servers.py:647 #, python-format msgid "Invalid fixed IP address (%s)" msgstr "" -#: nova/api/openstack/compute/servers.py:658 +#: nova/api/openstack/compute/servers.py:660 #, python-format msgid "Duplicate networks (%s) are not allowed" msgstr "" -#: nova/api/openstack/compute/servers.py:664 +#: nova/api/openstack/compute/servers.py:666 #, python-format msgid "Bad network format: missing %s" msgstr "" -#: nova/api/openstack/compute/servers.py:667 +#: nova/api/openstack/compute/servers.py:669 msgid "Bad networks format" msgstr "" -#: nova/api/openstack/compute/servers.py:693 +#: nova/api/openstack/compute/servers.py:695 msgid "Userdata content cannot be decoded" msgstr "" -#: nova/api/openstack/compute/servers.py:700 +#: nova/api/openstack/compute/servers.py:702 msgid "accessIPv4 is not proper IPv4 format" msgstr "" -#: nova/api/openstack/compute/servers.py:707 +#: nova/api/openstack/compute/servers.py:709 msgid "accessIPv6 is not proper IPv6 format" msgstr "" -#: nova/api/openstack/compute/servers.py:736 +#: nova/api/openstack/compute/servers.py:738 msgid "Server name is not defined" msgstr "" -#: nova/api/openstack/compute/servers.py:785 -#: nova/api/openstack/compute/servers.py:891 +#: nova/api/openstack/compute/servers.py:787 +#: nova/api/openstack/compute/servers.py:893 msgid "Invalid flavorRef provided." msgstr "" -#: nova/api/openstack/compute/servers.py:825 +#: nova/api/openstack/compute/servers.py:827 msgid "min_count must be an integer value" msgstr "" -#: nova/api/openstack/compute/servers.py:828 +#: nova/api/openstack/compute/servers.py:830 msgid "min_count must be > 0" msgstr "" -#: nova/api/openstack/compute/servers.py:833 +#: nova/api/openstack/compute/servers.py:835 msgid "max_count must be an integer value" msgstr "" -#: nova/api/openstack/compute/servers.py:836 +#: nova/api/openstack/compute/servers.py:838 msgid "max_count must be > 0" msgstr "" -#: nova/api/openstack/compute/servers.py:839 +#: nova/api/openstack/compute/servers.py:841 msgid "min_count must be <= max_count" msgstr "" -#: nova/api/openstack/compute/servers.py:888 +#: nova/api/openstack/compute/servers.py:890 msgid "Can not find requested image" msgstr "" -#: nova/api/openstack/compute/servers.py:894 +#: nova/api/openstack/compute/servers.py:896 msgid "Invalid key_name provided." msgstr "" -#: nova/api/openstack/compute/servers.py:973 +#: nova/api/openstack/compute/servers.py:975 msgid "HostId cannot be updated." msgstr "" -#: nova/api/openstack/compute/servers.py:999 -#: nova/api/openstack/compute/servers.py:1019 +#: nova/api/openstack/compute/servers.py:1001 +#: nova/api/openstack/compute/servers.py:1021 msgid "Instance has not been resized." msgstr "" -#: nova/api/openstack/compute/servers.py:1005 +#: nova/api/openstack/compute/servers.py:1007 #, python-format msgid "Error in confirm-resize %s" msgstr "" -#: nova/api/openstack/compute/servers.py:1025 +#: nova/api/openstack/compute/servers.py:1027 #, python-format msgid "Error in revert-resize %s" msgstr "" -#: nova/api/openstack/compute/servers.py:1038 +#: nova/api/openstack/compute/servers.py:1040 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "" -#: nova/api/openstack/compute/servers.py:1042 +#: nova/api/openstack/compute/servers.py:1044 msgid "Missing argument 'type' for reboot" msgstr "" -#: nova/api/openstack/compute/servers.py:1055 +#: nova/api/openstack/compute/servers.py:1057 #, python-format msgid "Error in reboot %s" msgstr "" -#: nova/api/openstack/compute/servers.py:1067 +#: nova/api/openstack/compute/servers.py:1069 msgid "Unable to locate requested flavor." msgstr "" -#: nova/api/openstack/compute/servers.py:1070 +#: nova/api/openstack/compute/servers.py:1072 msgid "Resize requires a flavor change." msgstr "" -#: nova/api/openstack/compute/servers.py:1094 +#: nova/api/openstack/compute/servers.py:1096 msgid "Missing imageRef attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1103 +#: nova/api/openstack/compute/servers.py:1105 msgid "Invalid imageRef provided." msgstr "" -#: nova/api/openstack/compute/servers.py:1112 +#: nova/api/openstack/compute/servers.py:1114 msgid "Missing flavorRef attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1125 +#: nova/api/openstack/compute/servers.py:1127 msgid "No adminPass was specified" msgstr "" -#: nova/api/openstack/compute/servers.py:1129 -#: nova/api/openstack/compute/servers.py:1331 +#: nova/api/openstack/compute/servers.py:1131 +#: nova/api/openstack/compute/servers.py:1333 msgid "Invalid adminPass" msgstr "" -#: nova/api/openstack/compute/servers.py:1140 +#: nova/api/openstack/compute/servers.py:1142 msgid "Unable to parse metadata key/value pairs." msgstr "" -#: nova/api/openstack/compute/servers.py:1153 +#: nova/api/openstack/compute/servers.py:1155 msgid "Resize request has invalid 'flavorRef' attribute." msgstr "" -#: nova/api/openstack/compute/servers.py:1156 +#: nova/api/openstack/compute/servers.py:1158 msgid "Resize requests require 'flavorRef' attribute." msgstr "" -#: nova/api/openstack/compute/servers.py:1174 +#: nova/api/openstack/compute/servers.py:1176 #: nova/api/openstack/compute/contrib/aggregates.py:142 #: nova/api/openstack/compute/contrib/keypairs.py:78 -#: nova/api/openstack/compute/contrib/networks.py:75 +#: nova/api/openstack/compute/contrib/networks.py:73 msgid "Invalid request body" msgstr "" -#: nova/api/openstack/compute/servers.py:1179 +#: nova/api/openstack/compute/servers.py:1181 msgid "Could not parse imageRef from request." msgstr "" -#: nova/api/openstack/compute/servers.py:1241 +#: nova/api/openstack/compute/servers.py:1243 msgid "Cannot find image for rebuild" msgstr "" -#: nova/api/openstack/compute/servers.py:1274 +#: nova/api/openstack/compute/servers.py:1276 msgid "createImage entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1358 +#: nova/api/openstack/compute/servers.py:1360 #, python-format msgid "Removing options '%(unk_opt_str)s' from query" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:64 +#: nova/api/openstack/compute/contrib/admin_actions.py:61 #, python-format msgid "Compute.api::pause %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:81 +#: nova/api/openstack/compute/contrib/admin_actions.py:78 #, python-format msgid "Compute.api::unpause %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:98 +#: nova/api/openstack/compute/contrib/admin_actions.py:95 #, python-format msgid "compute.api::suspend %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:115 +#: nova/api/openstack/compute/contrib/admin_actions.py:112 #, python-format msgid "compute.api::resume %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:131 +#: nova/api/openstack/compute/contrib/admin_actions.py:128 #, python-format msgid "Error in migrate %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:145 +#: nova/api/openstack/compute/contrib/admin_actions.py:142 #, python-format msgid "Compute.api::reset_network %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:158 -#: nova/api/openstack/compute/contrib/admin_actions.py:174 -#: nova/api/openstack/compute/contrib/admin_actions.py:190 -#: nova/api/openstack/compute/contrib/admin_actions.py:312 +#: nova/api/openstack/compute/contrib/admin_actions.py:155 +#: nova/api/openstack/compute/contrib/admin_actions.py:171 +#: nova/api/openstack/compute/contrib/admin_actions.py:187 +#: nova/api/openstack/compute/contrib/admin_actions.py:309 #: nova/api/openstack/compute/contrib/multinic.py:41 -#: nova/api/openstack/compute/contrib/rescue.py:44 +#: nova/api/openstack/compute/contrib/rescue.py:45 msgid "Server not found" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:161 +#: nova/api/openstack/compute/contrib/admin_actions.py:158 #, python-format msgid "Compute.api::inject_network_info %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:177 +#: nova/api/openstack/compute/contrib/admin_actions.py:174 #, python-format msgid "Compute.api::lock %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:193 +#: nova/api/openstack/compute/contrib/admin_actions.py:190 #, python-format msgid "Compute.api::unlock %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:223 +#: nova/api/openstack/compute/contrib/admin_actions.py:220 #, python-format msgid "createBackup entity requires %s attribute" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:227 +#: nova/api/openstack/compute/contrib/admin_actions.py:224 msgid "Malformed createBackup entity" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:233 +#: nova/api/openstack/compute/contrib/admin_actions.py:230 msgid "createBackup attribute 'rotation' must be an integer" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:248 +#: nova/api/openstack/compute/contrib/admin_actions.py:245 #: nova/api/openstack/compute/contrib/console_output.py:47 #: nova/api/openstack/compute/contrib/server_diagnostics.py:47 #: nova/api/openstack/compute/contrib/server_start_stop.py:38 msgid "Instance not found" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:276 +#: nova/api/openstack/compute/contrib/admin_actions.py:273 msgid "host and block_migration must be specified." msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:284 +#: nova/api/openstack/compute/contrib/admin_actions.py:281 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:302 +#: nova/api/openstack/compute/contrib/admin_actions.py:299 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:315 +#: nova/api/openstack/compute/contrib/admin_actions.py:312 #, python-format msgid "Compute.api::resetState %s" msgstr "" @@ -2533,7 +2533,7 @@ msgstr "" msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "" -#: nova/api/openstack/compute/contrib/certificates.py:76 +#: nova/api/openstack/compute/contrib/certificates.py:74 msgid "Only root certificate can be retrieved." msgstr "" @@ -2598,7 +2598,7 @@ msgstr "" #: nova/api/openstack/compute/contrib/floating_ips.py:234 #: nova/api/openstack/compute/contrib/floating_ips.py:290 -#: nova/api/openstack/compute/contrib/security_groups.py:417 +#: nova/api/openstack/compute/contrib/security_groups.py:415 msgid "Missing parameter dict" msgstr "" @@ -2632,41 +2632,41 @@ msgstr "" msgid "Error. Unable to associate floating ip" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:124 +#: nova/api/openstack/compute/contrib/hosts.py:122 #, python-format msgid "Host '%s' could not be found." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:153 +#: nova/api/openstack/compute/contrib/hosts.py:151 #, python-format msgid "Invalid status: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:157 +#: nova/api/openstack/compute/contrib/hosts.py:155 #, python-format msgid "Invalid mode: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:161 +#: nova/api/openstack/compute/contrib/hosts.py:159 #, python-format msgid "Invalid update setting: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:179 +#: nova/api/openstack/compute/contrib/hosts.py:177 #, python-format msgid "Putting host %(host)s in maintenance mode %(mode)s." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:190 +#: nova/api/openstack/compute/contrib/hosts.py:188 #, python-format msgid "Setting host %(host)s to %(state)s." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:237 +#: nova/api/openstack/compute/contrib/hosts.py:235 msgid "Describe-resource is admin only functionality" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:245 +#: nova/api/openstack/compute/contrib/hosts.py:243 msgid "Host not found" msgstr "" @@ -2686,7 +2686,7 @@ msgstr "" msgid "No hypervisor matching '%s' could be found." msgstr "" -#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:54 +#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:55 #, python-format msgid "Invalid timestamp for date %s" msgstr "" @@ -2704,55 +2704,55 @@ msgstr "" msgid "Unable to find address %r" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:72 +#: nova/api/openstack/compute/contrib/networks.py:70 #, python-format msgid "Network does not have %s action" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:80 +#: nova/api/openstack/compute/contrib/networks.py:78 #, python-format msgid "Disassociating network with id %s" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:84 -#: nova/api/openstack/compute/contrib/networks.py:101 -#: nova/api/openstack/compute/contrib/networks.py:111 +#: nova/api/openstack/compute/contrib/networks.py:82 +#: nova/api/openstack/compute/contrib/networks.py:99 +#: nova/api/openstack/compute/contrib/networks.py:109 msgid "Network not found" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:97 +#: nova/api/openstack/compute/contrib/networks.py:95 #, python-format msgid "Showing network with id %s" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:107 +#: nova/api/openstack/compute/contrib/networks.py:105 #, python-format msgid "Deleting network with id %s" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:122 +#: nova/api/openstack/compute/contrib/networks.py:120 msgid "Missing network in body" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:126 +#: nova/api/openstack/compute/contrib/networks.py:124 msgid "Network label is required" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:130 +#: nova/api/openstack/compute/contrib/networks.py:128 msgid "Network cidr or cidr_v6 is required" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:132 +#: nova/api/openstack/compute/contrib/networks.py:130 #, python-format msgid "Creating network with label %s" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:148 +#: nova/api/openstack/compute/contrib/networks.py:146 #, python-format msgid "Associating network %(network)s with project %(project)s" msgstr "" -#: nova/api/openstack/compute/contrib/networks.py:156 +#: nova/api/openstack/compute/contrib/networks.py:154 #, python-format msgid "Cannot associate network %(network)s with project %(project)s: %(message)s" msgstr "" @@ -2765,24 +2765,24 @@ msgstr "" msgid "Malformed scheduler_hints attribute" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:224 +#: nova/api/openstack/compute/contrib/security_groups.py:222 msgid "Security group id should be integer" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:333 +#: nova/api/openstack/compute/contrib/security_groups.py:331 msgid "Not enough parameters to build a valid rule." msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:339 +#: nova/api/openstack/compute/contrib/security_groups.py:337 #, python-format msgid "This rule already exists in group %s" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:420 +#: nova/api/openstack/compute/contrib/security_groups.py:418 msgid "Security group not specified" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:424 +#: nova/api/openstack/compute/contrib/security_groups.py:422 msgid "Security group name cannot be empty" msgstr "" @@ -2794,38 +2794,38 @@ msgstr "" msgid "stop instance" msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:75 +#: nova/api/openstack/compute/contrib/volumes.py:73 #, python-format msgid "vol=%s" msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:187 +#: nova/api/openstack/compute/contrib/volumes.py:185 #, python-format msgid "Delete volume with id: %s" msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:352 -#: nova/api/openstack/compute/contrib/volumes.py:432 +#: nova/api/openstack/compute/contrib/volumes.py:350 +#: nova/api/openstack/compute/contrib/volumes.py:430 #, python-format msgid "Instance %s is not attached." msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:383 +#: nova/api/openstack/compute/contrib/volumes.py:381 #, python-format msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:546 +#: nova/api/openstack/compute/contrib/volumes.py:544 #, python-format msgid "Delete snapshot with id: %s" msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:589 +#: nova/api/openstack/compute/contrib/volumes.py:587 #, python-format msgid "Create snapshot from volume %s" msgstr "" -#: nova/api/openstack/compute/contrib/volumes.py:593 +#: nova/api/openstack/compute/contrib/volumes.py:591 #, python-format msgid "Invalid value '%s' for force. " msgstr "" @@ -2834,23 +2834,23 @@ msgstr "" msgid "Instance has had its instance_type removed from the DB" msgstr "" -#: nova/cloudpipe/pipelib.py:44 +#: nova/cloudpipe/pipelib.py:45 msgid "Instance type for vpn instances" msgstr "" -#: nova/cloudpipe/pipelib.py:47 +#: nova/cloudpipe/pipelib.py:48 msgid "Template for cloudpipe instance boot script" msgstr "" -#: nova/cloudpipe/pipelib.py:50 +#: nova/cloudpipe/pipelib.py:51 msgid "Network to push into openvpn config" msgstr "" -#: nova/cloudpipe/pipelib.py:53 +#: nova/cloudpipe/pipelib.py:54 msgid "Netmask to push into openvpn config" msgstr "" -#: nova/cloudpipe/pipelib.py:106 +#: nova/cloudpipe/pipelib.py:109 #, python-format msgid "Launching VPN for %s" msgstr "" @@ -2863,200 +2863,200 @@ msgstr "" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: nova/compute/api.py:221 +#: nova/compute/api.py:224 msgid "Cannot run any more instances of this type." msgstr "" -#: nova/compute/api.py:228 +#: nova/compute/api.py:231 #, python-format msgid "Can only run %s more instances of this type." msgstr "" -#: nova/compute/api.py:237 +#: nova/compute/api.py:240 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s " "instances. %(msg)s" msgstr "" -#: nova/compute/api.py:257 +#: nova/compute/api.py:260 #, python-format msgid "" "Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " "properties" msgstr "" -#: nova/compute/api.py:267 +#: nova/compute/api.py:270 msgid "Metadata property key blank" msgstr "" -#: nova/compute/api.py:271 +#: nova/compute/api.py:274 msgid "Metadata property key greater than 255 characters" msgstr "" -#: nova/compute/api.py:275 +#: nova/compute/api.py:278 msgid "Metadata property value greater than 255 characters" msgstr "" -#: nova/compute/api.py:499 +#: nova/compute/api.py:502 #, python-format msgid "Going to run %s instances..." msgstr "" -#: nova/compute/api.py:570 +#: nova/compute/api.py:574 #, python-format msgid "bdm %s" msgstr "" -#: nova/compute/api.py:597 +#: nova/compute/api.py:601 #, python-format msgid "block_device_mapping %s" msgstr "" -#: nova/compute/api.py:829 +#: nova/compute/api.py:833 msgid "Going to try to soft delete instance" msgstr "" -#: nova/compute/api.py:846 +#: nova/compute/api.py:850 msgid "No host for instance, deleting immediately" msgstr "" -#: nova/compute/api.py:946 +#: nova/compute/api.py:950 msgid "host for instance is down, deleting from database" msgstr "" -#: nova/compute/api.py:990 +#: nova/compute/api.py:994 msgid "Going to try to terminate instance" msgstr "" -#: nova/compute/api.py:1030 +#: nova/compute/api.py:1034 msgid "Going to try to stop instance" msgstr "" -#: nova/compute/api.py:1044 +#: nova/compute/api.py:1048 msgid "Going to try to start instance" msgstr "" -#: nova/compute/api.py:1108 +#: nova/compute/api.py:1112 #, python-format msgid "Searching by: %s" msgstr "" -#: nova/compute/api.py:1243 +#: nova/compute/api.py:1247 #, python-format msgid "Image type not recognized %s" msgstr "" -#: nova/compute/api.py:1352 +#: nova/compute/api.py:1356 #, python-format msgid "snapshot for %s" msgstr "" -#: nova/compute/api.py:1674 +#: nova/compute/api.py:1678 msgid "flavor_id is None. Assuming migration." msgstr "" -#: nova/compute/api.py:1683 +#: nova/compute/api.py:1687 #, python-format msgid "" "Old instance type %(current_instance_type_name)s, new instance type " "%(new_instance_type_name)s" msgstr "" -#: nova/compute/api.py:1725 +#: nova/compute/api.py:1729 #, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s" msgstr "" -#: nova/compute/api.py:1897 +#: nova/compute/api.py:1901 msgid "Locking" msgstr "" -#: nova/compute/api.py:1905 +#: nova/compute/api.py:1909 msgid "Unlocking" msgstr "" -#: nova/compute/api.py:1973 +#: nova/compute/api.py:1977 msgid "Volume must be attached in order to detach." msgstr "" -#: nova/compute/api.py:2058 +#: nova/compute/api.py:2062 #, python-format msgid "Going to try to live migrate instance to %s" msgstr "" -#: nova/compute/api.py:2207 +#: nova/compute/api.py:2211 msgid "Keypair name contains unsafe characters" msgstr "" -#: nova/compute/api.py:2211 +#: nova/compute/api.py:2215 msgid "Keypair name must be between 1 and 255 characters long" msgstr "" -#: nova/compute/api.py:2312 +#: nova/compute/api.py:2316 #, python-format msgid "Security group %s is not a string or unicode" msgstr "" -#: nova/compute/api.py:2315 +#: nova/compute/api.py:2319 #, python-format msgid "Security group %s cannot be empty." msgstr "" -#: nova/compute/api.py:2323 +#: nova/compute/api.py:2327 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " "limited to '%(allowed)'." msgstr "" -#: nova/compute/api.py:2329 +#: nova/compute/api.py:2333 #, python-format msgid "Security group %s should not be greater than 255 characters." msgstr "" -#: nova/compute/api.py:2349 +#: nova/compute/api.py:2353 msgid "Quota exceeded, too many security groups." msgstr "" -#: nova/compute/api.py:2352 +#: nova/compute/api.py:2356 #, python-format msgid "Create Security Group %s" msgstr "" -#: nova/compute/api.py:2359 +#: nova/compute/api.py:2363 #, python-format msgid "Security group %s already exists" msgstr "" -#: nova/compute/api.py:2424 +#: nova/compute/api.py:2428 msgid "Security group is still in use" msgstr "" -#: nova/compute/api.py:2432 +#: nova/compute/api.py:2436 msgid "Failed to update usages deallocating security group" msgstr "" -#: nova/compute/api.py:2435 +#: nova/compute/api.py:2439 #, python-format msgid "Delete security group %s" msgstr "" -#: nova/compute/api.py:2692 +#: nova/compute/api.py:2696 #, python-format msgid "Rule (%s) not found" msgstr "" -#: nova/compute/api.py:2701 +#: nova/compute/api.py:2705 msgid "Quota exceeded, too many security group rules." msgstr "" -#: nova/compute/api.py:2704 +#: nova/compute/api.py:2708 #, python-format msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/api.py:2715 +#: nova/compute/api.py:2719 #, python-format msgid "Revoke security group ingress %s" msgstr "" @@ -3136,7 +3136,7 @@ msgstr "" msgid "Error: %s" msgstr "" -#: nova/compute/manager.py:597 nova/compute/manager.py:1740 +#: nova/compute/manager.py:597 nova/compute/manager.py:1743 msgid "Error trying to reschedule" msgstr "" @@ -3225,8 +3225,8 @@ msgstr "" msgid "Ignoring volume cleanup failure due to %s" msgstr "" -#: nova/compute/manager.py:995 nova/compute/manager.py:1909 -#: nova/compute/manager.py:3106 +#: nova/compute/manager.py:995 nova/compute/manager.py:1912 +#: nova/compute/manager.py:3112 #, python-format msgid "%s. Setting instance vm_state to ERROR" msgstr "" @@ -3324,264 +3324,264 @@ msgstr "" msgid "Changing instance metadata according to %(diff)r" msgstr "" -#: nova/compute/manager.py:1677 +#: nova/compute/manager.py:1680 msgid "destination same as source!" msgstr "" -#: nova/compute/manager.py:1696 +#: nova/compute/manager.py:1699 msgid "Migrating" msgstr "" -#: nova/compute/manager.py:1906 +#: nova/compute/manager.py:1909 #, python-format msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s" msgstr "" -#: nova/compute/manager.py:1962 +#: nova/compute/manager.py:1965 msgid "Pausing" msgstr "" -#: nova/compute/manager.py:1979 +#: nova/compute/manager.py:1982 msgid "Unpausing" msgstr "" -#: nova/compute/manager.py:2017 +#: nova/compute/manager.py:2020 msgid "Retrieving diagnostics" msgstr "" -#: nova/compute/manager.py:2047 +#: nova/compute/manager.py:2050 msgid "Resuming" msgstr "" -#: nova/compute/manager.py:2063 +#: nova/compute/manager.py:2066 msgid "Reset network" msgstr "" -#: nova/compute/manager.py:2068 +#: nova/compute/manager.py:2071 msgid "Inject network info" msgstr "" -#: nova/compute/manager.py:2071 +#: nova/compute/manager.py:2074 #, python-format msgid "network_info to inject: |%s|" msgstr "" -#: nova/compute/manager.py:2088 +#: nova/compute/manager.py:2091 msgid "Get console output" msgstr "" -#: nova/compute/manager.py:2113 +#: nova/compute/manager.py:2116 msgid "Getting vnc console" msgstr "" -#: nova/compute/manager.py:2141 +#: nova/compute/manager.py:2144 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:2185 +#: nova/compute/manager.py:2188 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:2194 +#: nova/compute/manager.py:2197 #, python-format msgid "" "Failed to connect to volume %(volume_id)s while attaching at " "%(mountpoint)s" msgstr "" -#: nova/compute/manager.py:2209 +#: nova/compute/manager.py:2212 #, python-format msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:2238 +#: nova/compute/manager.py:2241 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "" -#: nova/compute/manager.py:2242 +#: nova/compute/manager.py:2245 msgid "Detaching volume from unknown instance" msgstr "" -#: nova/compute/manager.py:2255 +#: nova/compute/manager.py:2258 #, python-format msgid "Faild to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:2299 +#: nova/compute/manager.py:2302 #, python-format msgid "Host %(host)s not found" msgstr "" -#: nova/compute/manager.py:2359 +#: nova/compute/manager.py:2362 msgid "Instance has no volume." msgstr "" -#: nova/compute/manager.py:2419 +#: nova/compute/manager.py:2422 #, python-format msgid "Pre live migration failed at %(dest)s" msgstr "" -#: nova/compute/manager.py:2445 +#: nova/compute/manager.py:2448 msgid "_post_live_migration() is started.." msgstr "" -#: nova/compute/manager.py:2478 +#: nova/compute/manager.py:2481 msgid "No floating_ip found" msgstr "" -#: nova/compute/manager.py:2486 +#: nova/compute/manager.py:2489 msgid "No floating_ip found." msgstr "" -#: nova/compute/manager.py:2488 +#: nova/compute/manager.py:2491 #, python-format msgid "" "Live migration: Unexpected error: cannot inherit floating ip.\n" "%(e)s" msgstr "" -#: nova/compute/manager.py:2514 +#: nova/compute/manager.py:2517 #, python-format msgid "Migrating instance to %(dest)s finished successfully." msgstr "" -#: nova/compute/manager.py:2516 +#: nova/compute/manager.py:2519 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." msgstr "" -#: nova/compute/manager.py:2530 +#: nova/compute/manager.py:2533 msgid "Post operation of migration started" msgstr "" -#: nova/compute/manager.py:2661 +#: nova/compute/manager.py:2664 msgid "Updated the info_cache for instance" msgstr "" -#: nova/compute/manager.py:2687 +#: nova/compute/manager.py:2693 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" msgstr "" -#: nova/compute/manager.py:2692 +#: nova/compute/manager.py:2698 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "" -#: nova/compute/manager.py:2701 +#: nova/compute/manager.py:2707 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" msgstr "" -#: nova/compute/manager.py:2708 +#: nova/compute/manager.py:2714 #, python-format msgid "Instance %(instance_uuid)s not found" msgstr "" -#: nova/compute/manager.py:2712 +#: nova/compute/manager.py:2718 msgid "In ERROR state" msgstr "" -#: nova/compute/manager.py:2719 +#: nova/compute/manager.py:2725 #, python-format msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None" msgstr "" -#: nova/compute/manager.py:2727 +#: nova/compute/manager.py:2733 #, python-format msgid "Error auto-confirming resize: %(e)s. Will retry later." msgstr "" -#: nova/compute/manager.py:2744 +#: nova/compute/manager.py:2750 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." msgstr "" -#: nova/compute/manager.py:2762 +#: nova/compute/manager.py:2768 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:2785 +#: nova/compute/manager.py:2791 msgid "Updating bandwidth usage cache" msgstr "" -#: nova/compute/manager.py:2850 +#: nova/compute/manager.py:2856 msgid "Updating host status" msgstr "" -#: nova/compute/manager.py:2876 +#: nova/compute/manager.py:2882 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." msgstr "" -#: nova/compute/manager.py:2882 nova/compute/manager.py:2920 +#: nova/compute/manager.py:2888 nova/compute/manager.py:2926 msgid "During sync_power_state the instance has a pending task. Skip." msgstr "" -#: nova/compute/manager.py:2907 +#: nova/compute/manager.py:2913 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" msgstr "" -#: nova/compute/manager.py:2944 +#: nova/compute/manager.py:2950 msgid "Instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:2956 nova/compute/manager.py:2967 -#: nova/compute/manager.py:2981 +#: nova/compute/manager.py:2962 nova/compute/manager.py:2973 +#: nova/compute/manager.py:2987 msgid "error during stop() in sync_power_state." msgstr "" -#: nova/compute/manager.py:2961 +#: nova/compute/manager.py:2967 msgid "Instance is paused or suspended unexpectedly. Calling the stop API." msgstr "" -#: nova/compute/manager.py:2974 +#: nova/compute/manager.py:2980 msgid "Instance is not stopped. Calling the stop API." msgstr "" -#: nova/compute/manager.py:2990 +#: nova/compute/manager.py:2996 msgid "Instance is not (soft-)deleted." msgstr "" -#: nova/compute/manager.py:2998 +#: nova/compute/manager.py:3004 msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." msgstr "" -#: nova/compute/manager.py:3011 +#: nova/compute/manager.py:3017 msgid "Reclaiming deleted instance" msgstr "" -#: nova/compute/manager.py:3060 +#: nova/compute/manager.py:3066 #, python-format msgid "" "Detected instance with name label '%(name)s' which is marked as DELETED " "but still present on host." msgstr "" -#: nova/compute/manager.py:3067 +#: nova/compute/manager.py:3073 #, python-format msgid "" "Destroying instance with name label '%(name)s' which is marked as DELETED" " but still present on host." msgstr "" -#: nova/compute/manager.py:3074 +#: nova/compute/manager.py:3080 #, python-format msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" msgstr "" @@ -3754,16 +3754,16 @@ msgstr "" msgid "Using %(prefix)s instead of %(req_prefix)s" msgstr "" -#: nova/console/manager.py:81 nova/console/vmrc_manager.py:71 +#: nova/console/manager.py:81 nova/console/vmrc_manager.py:63 msgid "Adding console" msgstr "" -#: nova/console/manager.py:102 nova/console/vmrc_manager.py:123 +#: nova/console/manager.py:102 nova/console/vmrc_manager.py:115 #, python-format msgid "Tried to remove non-existent console %(console_id)s." msgstr "" -#: nova/console/vmrc_manager.py:126 +#: nova/console/vmrc_manager.py:118 #, python-format msgid "Removing console %(console_id)s." msgstr "" @@ -3808,50 +3808,50 @@ msgstr "" msgid "Checking Token: %(token)s, %(token_valid)s)" msgstr "" -#: nova/db/sqlalchemy/api.py:206 +#: nova/db/sqlalchemy/api.py:208 #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:2796 +#: nova/db/sqlalchemy/api.py:2792 #, python-format msgid "Change will make usage less than 0 for the following resources: %(unders)s" msgstr "" -#: nova/db/sqlalchemy/api.py:4697 +#: nova/db/sqlalchemy/api.py:4693 msgid "Backend exists" msgstr "" -#: nova/db/sqlalchemy/api.py:4717 nova/db/sqlalchemy/api.py:4743 +#: nova/db/sqlalchemy/api.py:4713 nova/db/sqlalchemy/api.py:4739 #, python-format msgid "No backend config with id %(sm_backend_id)s" msgstr "" -#: nova/db/sqlalchemy/api.py:4755 +#: nova/db/sqlalchemy/api.py:4751 #, python-format msgid "No backend config with sr uuid %(sr_uuid)s" msgstr "" -#: nova/db/sqlalchemy/api.py:4789 +#: nova/db/sqlalchemy/api.py:4785 msgid "Flavor exists" msgstr "" -#: nova/db/sqlalchemy/api.py:4804 +#: nova/db/sqlalchemy/api.py:4800 #, python-format msgid "%(sm_flavor_id) flavor not found" msgstr "" -#: nova/db/sqlalchemy/api.py:4823 +#: nova/db/sqlalchemy/api.py:4819 #, python-format msgid "No sm_flavor called %(sm_flavor_id)s" msgstr "" -#: nova/db/sqlalchemy/api.py:4840 +#: nova/db/sqlalchemy/api.py:4836 #, python-format msgid "No sm_flavor called %(sm_flavor_label)s" msgstr "" -#: nova/db/sqlalchemy/api.py:4878 +#: nova/db/sqlalchemy/api.py:4874 #, python-format msgid "No sm_volume with id %(volume_id)s" msgstr "" @@ -3864,7 +3864,7 @@ msgstr "" msgid "Upgrade DB using Essex release first." msgstr "" -#: nova/db/sqlalchemy/session.py:174 +#: nova/db/sqlalchemy/session.py:316 #, python-format msgid "SQL connection failed. %s attempts left." msgstr "" @@ -4108,69 +4108,69 @@ msgstr "" msgid "Unplugged gateway interface '%s'" msgstr "" -#: nova/network/manager.py:284 +#: nova/network/manager.py:285 #, python-format msgid "Fixed ip %(fixed_ip_id)s not found" msgstr "" -#: nova/network/manager.py:293 nova/network/manager.py:552 +#: nova/network/manager.py:294 nova/network/manager.py:553 #, python-format msgid "Interface %(interface)s not found" msgstr "" -#: nova/network/manager.py:308 +#: nova/network/manager.py:309 #, python-format msgid "floating IP allocation for instance |%s|" msgstr "" -#: nova/network/manager.py:372 +#: nova/network/manager.py:373 msgid "Floating IP is not associated. Ignore." msgstr "" -#: nova/network/manager.py:390 +#: nova/network/manager.py:391 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "" -#: nova/network/manager.py:394 +#: nova/network/manager.py:395 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "" -#: nova/network/manager.py:415 +#: nova/network/manager.py:416 #, python-format msgid "Quota exceeded for %(pid)s, tried to allocate floating IP" msgstr "" -#: nova/network/manager.py:476 +#: nova/network/manager.py:477 msgid "Failed to update usages deallocating floating IP" msgstr "" -#: nova/network/manager.py:673 +#: nova/network/manager.py:674 #, python-format msgid "Starting migration network for instance %(instance_uuid)s" msgstr "" -#: nova/network/manager.py:680 +#: nova/network/manager.py:681 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will notmigrate it " msgstr "" -#: nova/network/manager.py:706 +#: nova/network/manager.py:707 #, python-format msgid "Finishing migration network for instance %(instance_uuid)s" msgstr "" -#: nova/network/manager.py:714 +#: nova/network/manager.py:715 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will notsetup it." msgstr "" -#: nova/network/manager.py:761 +#: nova/network/manager.py:762 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -4178,39 +4178,39 @@ msgid "" "ignored." msgstr "" -#: nova/network/manager.py:807 +#: nova/network/manager.py:808 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "" -#: nova/network/manager.py:817 +#: nova/network/manager.py:818 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "" -#: nova/network/manager.py:931 +#: nova/network/manager.py:932 #, python-format msgid "Disassociated %s stale fixed ip(s)" msgstr "" -#: nova/network/manager.py:935 +#: nova/network/manager.py:936 msgid "setting network host" msgstr "" -#: nova/network/manager.py:1049 +#: nova/network/manager.py:1050 msgid "network allocations" msgstr "" -#: nova/network/manager.py:1054 +#: nova/network/manager.py:1055 #, python-format msgid "networks retrieved for instance: |%(networks)s|" msgstr "" -#: nova/network/manager.py:1084 +#: nova/network/manager.py:1085 msgid "network deallocation for instance" msgstr "" -#: nova/network/manager.py:1307 +#: nova/network/manager.py:1308 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -4218,89 +4218,89 @@ msgid "" "created." msgstr "" -#: nova/network/manager.py:1388 +#: nova/network/manager.py:1389 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "" -#: nova/network/manager.py:1409 +#: nova/network/manager.py:1410 #, python-format msgid "Leased IP |%(address)s|" msgstr "" -#: nova/network/manager.py:1413 +#: nova/network/manager.py:1414 #, python-format msgid "IP %s leased that is not associated" msgstr "" -#: nova/network/manager.py:1421 +#: nova/network/manager.py:1422 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "" -#: nova/network/manager.py:1426 +#: nova/network/manager.py:1427 #, python-format msgid "Released IP |%(address)s|" msgstr "" -#: nova/network/manager.py:1430 +#: nova/network/manager.py:1431 #, python-format msgid "IP %s released that is not associated" msgstr "" -#: nova/network/manager.py:1433 +#: nova/network/manager.py:1434 #, python-format msgid "IP %s released that was not leased" msgstr "" -#: nova/network/manager.py:1452 +#: nova/network/manager.py:1453 #, python-format msgid "%s must be an integer" msgstr "" -#: nova/network/manager.py:1476 +#: nova/network/manager.py:1477 msgid "Maximum allowed length for 'label' is 255." msgstr "" -#: nova/network/manager.py:1496 +#: nova/network/manager.py:1497 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " "network_size flag." msgstr "" -#: nova/network/manager.py:1577 +#: nova/network/manager.py:1578 msgid "cidr already in use" msgstr "" -#: nova/network/manager.py:1580 +#: nova/network/manager.py:1581 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "" -#: nova/network/manager.py:1591 +#: nova/network/manager.py:1592 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " "(%(smaller)s)" msgstr "" -#: nova/network/manager.py:1650 +#: nova/network/manager.py:1651 msgid "Network already exists!" msgstr "" -#: nova/network/manager.py:1670 +#: nova/network/manager.py:1671 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "" -#: nova/network/manager.py:2136 +#: nova/network/manager.py:2137 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" msgstr "" -#: nova/network/manager.py:2143 +#: nova/network/manager.py:2144 #, python-format msgid "" "The network range is not big enough to fit %(num_networks)s. Network size" @@ -4333,42 +4333,42 @@ msgstr "" msgid "_get_auth_token() failed" msgstr "" -#: nova/network/quantumv2/api.py:98 +#: nova/network/quantumv2/api.py:105 #, python-format msgid "allocate_for_instance() for %s" msgstr "" -#: nova/network/quantumv2/api.py:101 +#: nova/network/quantumv2/api.py:108 #, python-format msgid "empty project id for instance %s" msgstr "" -#: nova/network/quantumv2/api.py:154 +#: nova/network/quantumv2/api.py:161 #, python-format msgid "Fail to delete port %(portid)s with failure: %(exception)s" msgstr "" -#: nova/network/quantumv2/api.py:166 +#: nova/network/quantumv2/api.py:173 #, python-format msgid "deallocate_for_instance() for %s" msgstr "" -#: nova/network/quantumv2/api.py:175 +#: nova/network/quantumv2/api.py:182 #, python-format msgid "Failed to delete quantum port %(portid)s " msgstr "" -#: nova/network/quantumv2/api.py:185 +#: nova/network/quantumv2/api.py:192 #, python-format msgid "get_instance_nw_info() for %s" msgstr "" -#: nova/network/quantumv2/api.py:200 +#: nova/network/quantumv2/api.py:207 #, python-format msgid "validate_networks() for %s" msgstr "" -#: nova/network/quantumv2/api.py:452 +#: nova/network/quantumv2/api.py:459 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" @@ -4764,42 +4764,42 @@ msgstr "" msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: nova/scheduler/chance.py:49 +#: nova/scheduler/chance.py:50 msgid "Is the appropriate service running?" msgstr "" -#: nova/scheduler/chance.py:54 +#: nova/scheduler/chance.py:55 msgid "Could not find another compute" msgstr "" -#: nova/scheduler/driver.py:64 +#: nova/scheduler/driver.py:66 msgid "Exception during scheduler.run_instance" msgstr "" -#: nova/scheduler/driver.py:68 nova/scheduler/manager.py:184 +#: nova/scheduler/driver.py:70 nova/scheduler/manager.py:185 #, python-format msgid "Setting instance to %(state)s state." msgstr "" -#: nova/scheduler/driver.py:110 +#: nova/scheduler/driver.py:112 #, python-format msgid "Casted '%(method)s' to compute '%(host)s'" msgstr "" -#: nova/scheduler/driver.py:125 +#: nova/scheduler/driver.py:127 #, python-format msgid "Casted '%(method)s' to %(topic)s '%(host)s'" msgstr "" -#: nova/scheduler/driver.py:173 +#: nova/scheduler/driver.py:175 msgid "Driver must implement schedule_prep_resize" msgstr "" -#: nova/scheduler/driver.py:181 +#: nova/scheduler/driver.py:183 msgid "Driver must implement schedule_run_instance" msgstr "" -#: nova/scheduler/driver.py:313 +#: nova/scheduler/driver.py:315 #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " @@ -4836,51 +4836,51 @@ msgstr "" msgid "Weighted %(weighted_host)s" msgstr "" -#: nova/scheduler/host_manager.py:246 +#: nova/scheduler/host_manager.py:247 #, python-format msgid "Host filter fails for ignored host %(host)s" msgstr "" -#: nova/scheduler/host_manager.py:253 +#: nova/scheduler/host_manager.py:254 #, python-format msgid "Host filter fails for non-forced host %(host)s" msgstr "" -#: nova/scheduler/host_manager.py:259 +#: nova/scheduler/host_manager.py:260 #, python-format msgid "Host filter function %(func)s failed for %(host)s" msgstr "" -#: nova/scheduler/host_manager.py:265 +#: nova/scheduler/host_manager.py:266 #, python-format msgid "Host filter passes for %(host)s" msgstr "" -#: nova/scheduler/host_manager.py:328 +#: nova/scheduler/host_manager.py:329 #, python-format msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: nova/scheduler/host_manager.py:351 +#: nova/scheduler/host_manager.py:352 msgid "host_manager only implemented for 'compute'" msgstr "" -#: nova/scheduler/host_manager.py:359 +#: nova/scheduler/host_manager.py:360 #, python-format msgid "No service for compute ID %s" msgstr "" -#: nova/scheduler/manager.py:168 +#: nova/scheduler/manager.py:169 #, python-format msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: nova/scheduler/scheduler_options.py:69 +#: nova/scheduler/scheduler_options.py:70 #, python-format msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: nova/scheduler/scheduler_options.py:78 +#: nova/scheduler/scheduler_options.py:79 #, python-format msgid "Could not decode scheduler options: '%(e)s'" msgstr "" @@ -4892,21 +4892,21 @@ msgstr "" msgid "%(host_state)s fails instance_type extra_specs requirements" msgstr "" -#: nova/scheduler/filters/compute_filter.py:38 +#: nova/scheduler/filters/compute_filter.py:39 #, python-format msgid "%(host_state)s is disabled or has not been heard from in a while" msgstr "" -#: nova/scheduler/filters/compute_filter.py:42 +#: nova/scheduler/filters/compute_filter.py:43 #, python-format msgid "%(host_state)s is disabled via capabilities" msgstr "" -#: nova/scheduler/filters/core_filter.py:45 +#: nova/scheduler/filters/core_filter.py:46 msgid "VCPUs not set; assuming CPU collection broken" msgstr "" -#: nova/scheduler/filters/disk_filter.py:47 +#: nova/scheduler/filters/disk_filter.py:48 #, python-format msgid "" "%(host_state)s does not have %(requested_disk)s MB usable disk, it only " @@ -4939,21 +4939,21 @@ msgstr "" msgid "%(host_state)s does not support requested instance_properties" msgstr "" -#: nova/scheduler/filters/io_ops_filter.py:42 +#: nova/scheduler/filters/io_ops_filter.py:43 #, python-format msgid "" "%(host_state)s fails I/O ops check: Max IOs per host is set to " "%(max_io_ops)s" msgstr "" -#: nova/scheduler/filters/num_instances_filter.py:39 +#: nova/scheduler/filters/num_instances_filter.py:40 #, python-format msgid "" "%(host_state)s fails num_instances check: Max instances per host is set " "to %(max_instances)s" msgstr "" -#: nova/scheduler/filters/ram_filter.py:46 +#: nova/scheduler/filters/ram_filter.py:47 #, python-format msgid "" "%(host_state)s does not have %(requested_ram)s MB usable ram, it only has" @@ -4965,7 +4965,7 @@ msgstr "" msgid "Previously tried hosts: %(hosts)s. (host=%(host)s)" msgstr "" -#: nova/scheduler/filters/trusted_filter.py:201 +#: nova/scheduler/filters/trusted_filter.py:202 #, python-format msgid "TCP: trust state of %(host)s:%(level)s(%(trust)s)" msgstr "" @@ -5065,12 +5065,12 @@ msgstr "" msgid "uuid" msgstr "" -#: nova/tests/test_xenapi.py:722 +#: nova/tests/test_xenapi.py:724 #, python-format msgid "Creating files in %s to simulate guest agent" msgstr "" -#: nova/tests/test_xenapi.py:733 +#: nova/tests/test_xenapi.py:735 #, python-format msgid "Removing simulated guest agent files in %s" msgstr "" @@ -5152,28 +5152,28 @@ msgstr "" msgid "test_snapshot_detail: resp_dict=%s" msgstr "" -#: nova/tests/compute/test_compute.py:633 -#: nova/tests/compute/test_compute.py:651 -#: nova/tests/compute/test_compute.py:687 -#: nova/tests/compute/test_compute.py:712 -#: nova/tests/compute/test_compute.py:2387 +#: nova/tests/compute/test_compute.py:619 +#: nova/tests/compute/test_compute.py:637 +#: nova/tests/compute/test_compute.py:673 +#: nova/tests/compute/test_compute.py:698 +#: nova/tests/compute/test_compute.py:2373 #, python-format msgid "Running instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:639 -#: nova/tests/compute/test_compute.py:674 -#: nova/tests/compute/test_compute.py:700 -#: nova/tests/compute/test_compute.py:730 +#: nova/tests/compute/test_compute.py:625 +#: nova/tests/compute/test_compute.py:660 +#: nova/tests/compute/test_compute.py:686 +#: nova/tests/compute/test_compute.py:716 #, python-format msgid "After terminating instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:1107 +#: nova/tests/compute/test_compute.py:1093 msgid "Internal error" msgstr "" -#: nova/tests/compute/test_compute.py:2398 +#: nova/tests/compute/test_compute.py:2384 #, python-format msgid "After force-killing instances: %s" msgstr "" @@ -5522,7 +5522,7 @@ msgid "instance %s: finished toXML method" msgstr "" #: nova/virt/baremetal/driver.py:559 nova/virt/hyperv/hostops.py:43 -#: nova/virt/libvirt/driver.py:1986 +#: nova/virt/libvirt/driver.py:1988 msgid "" "Cannot get the number of cpu, because this function is not implemented " "for this platform. This error can be safely ignored for now." @@ -5538,7 +5538,7 @@ msgid "Updating!" msgstr "" #: nova/virt/baremetal/driver.py:726 nova/virt/hyperv/hostops.py:141 -#: nova/virt/libvirt/driver.py:3028 nova/virt/xenapi/host.py:149 +#: nova/virt/libvirt/driver.py:3030 nova/virt/xenapi/host.py:149 msgid "Updating host stats" msgstr "" @@ -6139,7 +6139,7 @@ msgstr "" msgid "Failed to remove volume from VM %s" msgstr "" -#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:603 +#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:605 msgid "Could not determine iscsi initiator name" msgstr "" @@ -6257,219 +6257,219 @@ msgstr "" msgid "The ISCSI initiator name can't be found. Choosing the default one" msgstr "" -#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1462 +#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1464 #: nova/virt/xenapi/vm_utils.py:476 #, python-format msgid "block_device_list %s" msgstr "" -#: nova/virt/libvirt/driver.py:332 +#: nova/virt/libvirt/driver.py:334 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:338 +#: nova/virt/libvirt/driver.py:340 #, python-format msgid "Connecting to libvirt: %s" msgstr "" -#: nova/virt/libvirt/driver.py:359 +#: nova/virt/libvirt/driver.py:361 msgid "Connection to libvirt broke" msgstr "" -#: nova/virt/libvirt/driver.py:381 nova/virt/libvirt/driver.py:384 +#: nova/virt/libvirt/driver.py:383 nova/virt/libvirt/driver.py:386 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" -#: nova/virt/libvirt/driver.py:466 +#: nova/virt/libvirt/driver.py:468 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:480 +#: nova/virt/libvirt/driver.py:482 msgid "During wait destroy, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:485 +#: nova/virt/libvirt/driver.py:487 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:507 +#: nova/virt/libvirt/driver.py:509 msgid "Error from libvirt during undefineFlags. Retrying with undefine" msgstr "" -#: nova/virt/libvirt/driver.py:522 +#: nova/virt/libvirt/driver.py:524 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:535 +#: nova/virt/libvirt/driver.py:537 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:552 +#: nova/virt/libvirt/driver.py:554 #, python-format msgid "Deleting instance files %(target)s" msgstr "" -#: nova/virt/libvirt/driver.py:566 +#: nova/virt/libvirt/driver.py:568 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:728 +#: nova/virt/libvirt/driver.py:730 msgid "During detach_volume, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:738 +#: nova/virt/libvirt/driver.py:740 msgid "attaching LXC block device" msgstr "" -#: nova/virt/libvirt/driver.py:751 +#: nova/virt/libvirt/driver.py:753 msgid "detaching LXC block device" msgstr "" -#: nova/virt/libvirt/driver.py:883 +#: nova/virt/libvirt/driver.py:885 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:887 +#: nova/virt/libvirt/driver.py:889 msgid "Failed to soft reboot instance." msgstr "" -#: nova/virt/libvirt/driver.py:919 +#: nova/virt/libvirt/driver.py:921 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:954 +#: nova/virt/libvirt/driver.py:956 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:1084 +#: nova/virt/libvirt/driver.py:1086 msgid "Instance is running" msgstr "" -#: nova/virt/libvirt/driver.py:1091 nova/virt/powervm/operator.py:253 +#: nova/virt/libvirt/driver.py:1093 nova/virt/powervm/operator.py:253 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:1107 +#: nova/virt/libvirt/driver.py:1109 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:1153 +#: nova/virt/libvirt/driver.py:1155 msgid "Guest does not have a console available" msgstr "" -#: nova/virt/libvirt/driver.py:1197 +#: nova/virt/libvirt/driver.py:1199 #, python-format msgid "Path '%(path)s' supports direct I/O" msgstr "" -#: nova/virt/libvirt/driver.py:1201 +#: nova/virt/libvirt/driver.py:1203 #, python-format msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:1205 nova/virt/libvirt/driver.py:1209 +#: nova/virt/libvirt/driver.py:1207 nova/virt/libvirt/driver.py:1211 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:1275 +#: nova/virt/libvirt/driver.py:1277 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:1401 +#: nova/virt/libvirt/driver.py:1403 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:1411 +#: nova/virt/libvirt/driver.py:1413 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:1425 +#: nova/virt/libvirt/driver.py:1427 #, python-format msgid "Injecting %(injection)s into image %(img_id)s" msgstr "" -#: nova/virt/libvirt/driver.py:1435 +#: nova/virt/libvirt/driver.py:1437 #, python-format msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:1509 +#: nova/virt/libvirt/driver.py:1511 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" msgstr "" -#: nova/virt/libvirt/driver.py:1515 +#: nova/virt/libvirt/driver.py:1517 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" -#: nova/virt/libvirt/driver.py:1519 +#: nova/virt/libvirt/driver.py:1521 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" -#: nova/virt/libvirt/driver.py:1523 +#: nova/virt/libvirt/driver.py:1525 #, python-format msgid "CPU mode '%(mode)s' model '%(model)s' was chosen" msgstr "" -#: nova/virt/libvirt/driver.py:1539 +#: nova/virt/libvirt/driver.py:1541 msgid "" "Passthrough of the host CPU was requested but this libvirt version does " "not support this feature" msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1833 msgid "Starting toXML method" msgstr "" -#: nova/virt/libvirt/driver.py:1835 +#: nova/virt/libvirt/driver.py:1837 msgid "Finished toXML method" msgstr "" -#: nova/virt/libvirt/driver.py:1852 +#: nova/virt/libvirt/driver.py:1854 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:2104 +#: nova/virt/libvirt/driver.py:2106 msgid "libvirt version is too old (does not support getVersion)" msgstr "" -#: nova/virt/libvirt/driver.py:2291 +#: nova/virt/libvirt/driver.py:2293 msgid "Block migration can not be used with shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:2299 +#: nova/virt/libvirt/driver.py:2301 msgid "Live migration can not be used without shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:2334 +#: nova/virt/libvirt/driver.py:2336 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2354 +#: nova/virt/libvirt/driver.py:2356 #, python-format msgid "" "Instance launched has CPU info:\n" "%s" msgstr "" -#: nova/virt/libvirt/driver.py:2366 +#: nova/virt/libvirt/driver.py:2368 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -6479,51 +6479,51 @@ msgid "" "Refer to %(u)s" msgstr "" -#: nova/virt/libvirt/driver.py:2383 +#: nova/virt/libvirt/driver.py:2385 #, python-format msgid "" "Creating tmpfile %s to notify to other compute nodes that they should " "mount the same storage." msgstr "" -#: nova/virt/libvirt/driver.py:2431 +#: nova/virt/libvirt/driver.py:2433 #, python-format msgid "The firewall filter for %s does not exist" msgstr "" -#: nova/virt/libvirt/driver.py:2501 +#: nova/virt/libvirt/driver.py:2503 #, python-format msgid "Live Migration failure: %(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:2545 +#: nova/virt/libvirt/driver.py:2547 #, python-format msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." msgstr "" -#: nova/virt/libvirt/driver.py:2672 +#: nova/virt/libvirt/driver.py:2674 #, python-format msgid "skipping %(path)s since it looks like volume" msgstr "" -#: nova/virt/libvirt/driver.py:2721 +#: nova/virt/libvirt/driver.py:2723 #, python-format msgid "Getting disk size of %(i_name)s: %(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:2783 +#: nova/virt/libvirt/driver.py:2785 msgid "Starting migrate_disk_and_power_off" msgstr "" -#: nova/virt/libvirt/driver.py:2842 +#: nova/virt/libvirt/driver.py:2844 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2849 +#: nova/virt/libvirt/driver.py:2851 msgid "Starting finish_migration" msgstr "" -#: nova/virt/libvirt/driver.py:2900 +#: nova/virt/libvirt/driver.py:2902 msgid "Starting finish_revert_migration" msgstr "" @@ -6564,127 +6564,127 @@ msgstr "" msgid "Unknown image_type=%s" msgstr "" -#: nova/virt/libvirt/imagecache.py:164 +#: nova/virt/libvirt/imagecache.py:166 #, python-format msgid "%s is a valid instance name" msgstr "" -#: nova/virt/libvirt/imagecache.py:167 +#: nova/virt/libvirt/imagecache.py:169 #, python-format msgid "%s has a disk file" msgstr "" -#: nova/virt/libvirt/imagecache.py:169 +#: nova/virt/libvirt/imagecache.py:171 #, python-format msgid "Instance %(instance)s is backed by %(backing)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:182 +#: nova/virt/libvirt/imagecache.py:184 #, python-format msgid "" "Instance %(instance)s is using a backing file %(backing)s which does not " "appear in the image service" msgstr "" -#: nova/virt/libvirt/imagecache.py:234 +#: nova/virt/libvirt/imagecache.py:239 #, python-format msgid "%(id)s (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:244 +#: nova/virt/libvirt/imagecache.py:249 #, python-format msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" msgstr "" -#: nova/virt/libvirt/imagecache.py:263 +#: nova/virt/libvirt/imagecache.py:268 #, python-format msgid "Cannot remove %(base_file)s, it does not exist" msgstr "" -#: nova/virt/libvirt/imagecache.py:275 +#: nova/virt/libvirt/imagecache.py:280 #, python-format msgid "Base file too young to remove: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:278 +#: nova/virt/libvirt/imagecache.py:283 #, python-format msgid "Removing base file: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:285 +#: nova/virt/libvirt/imagecache.py:290 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:296 +#: nova/virt/libvirt/imagecache.py:301 #, python-format msgid "%(id)s (%(base_file)s): checking" msgstr "" -#: nova/virt/libvirt/imagecache.py:315 +#: nova/virt/libvirt/imagecache.py:320 #, python-format msgid "" "%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " "on other nodes" msgstr "" -#: nova/virt/libvirt/imagecache.py:327 +#: nova/virt/libvirt/imagecache.py:332 #, python-format msgid "" "%(id)s (%(base_file)s): warning -- an absent base file is in use! " "instances: %(instance_list)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:335 +#: nova/virt/libvirt/imagecache.py:340 #, python-format msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" msgstr "" -#: nova/virt/libvirt/imagecache.py:345 +#: nova/virt/libvirt/imagecache.py:350 #, python-format msgid "%(id)s (%(base_file)s): image is not in use" msgstr "" -#: nova/virt/libvirt/imagecache.py:351 +#: nova/virt/libvirt/imagecache.py:356 #, python-format msgid "%(id)s (%(base_file)s): image is in use" msgstr "" -#: nova/virt/libvirt/imagecache.py:374 +#: nova/virt/libvirt/imagecache.py:379 #, python-format msgid "Skipping verification, no base directory at %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:378 +#: nova/virt/libvirt/imagecache.py:383 msgid "Verify base images" msgstr "" -#: nova/virt/libvirt/imagecache.py:385 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "Image id %(id)s yields fingerprint %(fingerprint)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:403 +#: nova/virt/libvirt/imagecache.py:408 #, python-format msgid "Unknown base file: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:408 +#: nova/virt/libvirt/imagecache.py:413 #, python-format msgid "Active base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:411 +#: nova/virt/libvirt/imagecache.py:416 #, python-format msgid "Corrupt base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:415 +#: nova/virt/libvirt/imagecache.py:420 #, python-format msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:423 +#: nova/virt/libvirt/imagecache.py:428 msgid "Verification complete" msgstr "" @@ -6693,14 +6693,14 @@ msgstr "" msgid "LVM snapshots not implemented" msgstr "" -#: nova/virt/libvirt/utils.py:131 +#: nova/virt/libvirt/utils.py:134 #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db " "available, but %(size)db required by volume %(lv)s." msgstr "" -#: nova/virt/libvirt/utils.py:140 +#: nova/virt/libvirt/utils.py:143 #, python-format msgid "" "Volume group %(vg)s will not be able to hold sparse volume %(lv)s. " @@ -6708,68 +6708,68 @@ msgid "" "%(free_space)db." msgstr "" -#: nova/virt/libvirt/utils.py:187 +#: nova/virt/libvirt/utils.py:190 #, python-format msgid "Path %s must be LVM logical volume" msgstr "" -#: nova/virt/libvirt/utils.py:406 +#: nova/virt/libvirt/utils.py:409 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" -#: nova/virt/libvirt/utils.py:495 +#: nova/virt/libvirt/utils.py:498 #, python-format msgid "Reading image info file: %s" msgstr "" -#: nova/virt/libvirt/utils.py:499 +#: nova/virt/libvirt/utils.py:502 #, python-format msgid "Read: %s" msgstr "" -#: nova/virt/libvirt/utils.py:505 +#: nova/virt/libvirt/utils.py:508 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/utils.py:529 +#: nova/virt/libvirt/utils.py:532 #, python-format msgid "Writing image info file: %s" msgstr "" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:533 #, python-format msgid "Wrote: %s" msgstr "" -#: nova/virt/libvirt/vif.py:95 +#: nova/virt/libvirt/vif.py:97 #, python-format msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" msgstr "" -#: nova/virt/libvirt/vif.py:105 +#: nova/virt/libvirt/vif.py:107 #, python-format msgid "Ensuring bridge %s" msgstr "" -#: nova/virt/libvirt/vif.py:181 nova/virt/libvirt/vif.py:247 -#: nova/virt/libvirt/vif.py:307 +#: nova/virt/libvirt/vif.py:183 nova/virt/libvirt/vif.py:249 +#: nova/virt/libvirt/vif.py:309 msgid "Failed while unplugging vif" msgstr "" -#: nova/virt/libvirt/volume.py:174 +#: nova/virt/libvirt/volume.py:176 #, python-format msgid "iSCSI device not found at %s" msgstr "" -#: nova/virt/libvirt/volume.py:177 +#: nova/virt/libvirt/volume.py:179 #, python-format msgid "" "ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " "Try number: %(tries)s" msgstr "" -#: nova/virt/libvirt/volume.py:189 +#: nova/virt/libvirt/volume.py:191 #, python-format msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" msgstr "" @@ -7339,103 +7339,103 @@ msgstr "" msgid "Got image size of %(size)s for the image %(image)s" msgstr "" -#: nova/virt/xenapi/agent.py:80 nova/virt/xenapi/vmops.py:1471 +#: nova/virt/xenapi/agent.py:85 nova/virt/xenapi/vmops.py:1491 #, python-format msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" msgstr "" -#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1475 +#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1495 #, python-format msgid "" "NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " "args=%(args)r" msgstr "" -#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1480 +#: nova/virt/xenapi/agent.py:94 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" msgstr "" -#: nova/virt/xenapi/agent.py:99 +#: nova/virt/xenapi/agent.py:104 #, python-format msgid "" "The agent call to %(method)s returned an invalid response: %(ret)r. " "path=%(path)s; args=%(args)r" msgstr "" -#: nova/virt/xenapi/agent.py:109 +#: nova/virt/xenapi/agent.py:114 #, python-format msgid "Failed to query agent version: %(resp)r" msgstr "" -#: nova/virt/xenapi/agent.py:127 +#: nova/virt/xenapi/agent.py:132 msgid "Querying agent version" msgstr "" -#: nova/virt/xenapi/agent.py:141 +#: nova/virt/xenapi/agent.py:146 msgid "Reached maximum time attempting to query agent version" msgstr "" -#: nova/virt/xenapi/agent.py:149 +#: nova/virt/xenapi/agent.py:154 #, python-format msgid "Updating agent to %s" msgstr "" -#: nova/virt/xenapi/agent.py:157 +#: nova/virt/xenapi/agent.py:162 #, python-format msgid "Failed to update agent: %(resp)r" msgstr "" -#: nova/virt/xenapi/agent.py:171 +#: nova/virt/xenapi/agent.py:176 msgid "Setting admin password" msgstr "" -#: nova/virt/xenapi/agent.py:182 +#: nova/virt/xenapi/agent.py:187 #, python-format msgid "Failed to exchange keys: %(resp)r" msgstr "" -#: nova/virt/xenapi/agent.py:202 +#: nova/virt/xenapi/agent.py:207 #, python-format msgid "Failed to update password: %(resp)r" msgstr "" -#: nova/virt/xenapi/agent.py:209 +#: nova/virt/xenapi/agent.py:214 #, python-format msgid "Injecting file path: %r" msgstr "" -#: nova/virt/xenapi/agent.py:222 +#: nova/virt/xenapi/agent.py:227 #, python-format msgid "Failed to inject file: %(resp)r" msgstr "" -#: nova/virt/xenapi/agent.py:229 +#: nova/virt/xenapi/agent.py:234 msgid "Resetting network" msgstr "" -#: nova/virt/xenapi/agent.py:235 +#: nova/virt/xenapi/agent.py:240 #, python-format msgid "Failed to reset network: %(resp)r" msgstr "" -#: nova/virt/xenapi/agent.py:255 +#: nova/virt/xenapi/agent.py:263 msgid "" "XenServer tools installed in this image are capable of network injection." " Networking files will not bemanipulated" msgstr "" -#: nova/virt/xenapi/agent.py:263 +#: nova/virt/xenapi/agent.py:271 msgid "" "XenServer tools are present in this image but are not capable of network " "injection" msgstr "" -#: nova/virt/xenapi/agent.py:267 +#: nova/virt/xenapi/agent.py:275 msgid "XenServer tools are not installed in this image" msgstr "" -#: nova/virt/xenapi/agent.py:319 +#: nova/virt/xenapi/agent.py:327 #, python-format msgid "OpenSSL error: %s" msgstr "" @@ -7451,24 +7451,24 @@ msgstr "" msgid "Failure while cleaning up attached VDIs" msgstr "" -#: nova/virt/xenapi/driver.py:363 +#: nova/virt/xenapi/driver.py:360 #, python-format msgid "Could not determine key: %s" msgstr "" -#: nova/virt/xenapi/driver.py:574 +#: nova/virt/xenapi/driver.py:571 msgid "Host startup on XenServer is not supported." msgstr "" -#: nova/virt/xenapi/driver.py:626 +#: nova/virt/xenapi/driver.py:623 msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" msgstr "" -#: nova/virt/xenapi/driver.py:664 +#: nova/virt/xenapi/driver.py:661 msgid "Host is member of a pool, but DB says otherwise" msgstr "" -#: nova/virt/xenapi/driver.py:748 nova/virt/xenapi/driver.py:762 +#: nova/virt/xenapi/driver.py:745 nova/virt/xenapi/driver.py:759 #, python-format msgid "Got exception: %s" msgstr "" @@ -8004,189 +8004,193 @@ msgstr "" msgid "This domU must be running on the host specified by xenapi_connection_url" msgstr "" -#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:664 +#: nova/virt/xenapi/vmops.py:128 nova/virt/xenapi/vmops.py:674 #, python-format msgid "Updating progress to %(progress)d" msgstr "" -#: nova/virt/xenapi/vmops.py:229 +#: nova/virt/xenapi/vmops.py:169 +msgid "Error: Agent is disabled" +msgstr "" + +#: nova/virt/xenapi/vmops.py:237 msgid "Starting instance" msgstr "" -#: nova/virt/xenapi/vmops.py:297 +#: nova/virt/xenapi/vmops.py:305 msgid "Removing kernel/ramdisk files from dom0" msgstr "" -#: nova/virt/xenapi/vmops.py:369 +#: nova/virt/xenapi/vmops.py:377 #, python-format msgid "Block device information present: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:400 +#: nova/virt/xenapi/vmops.py:408 msgid "Failed to spawn, rolling back" msgstr "" -#: nova/virt/xenapi/vmops.py:473 +#: nova/virt/xenapi/vmops.py:481 msgid "Detected ISO image type, creating blank VM for install" msgstr "" -#: nova/virt/xenapi/vmops.py:490 +#: nova/virt/xenapi/vmops.py:498 msgid "Auto configuring disk, attempting to resize partition..." msgstr "" -#: nova/virt/xenapi/vmops.py:516 +#: nova/virt/xenapi/vmops.py:524 msgid "Starting VM" msgstr "" -#: nova/virt/xenapi/vmops.py:523 +#: nova/virt/xenapi/vmops.py:530 +msgid "Waiting for instance state to become running" +msgstr "" + +#: nova/virt/xenapi/vmops.py:544 #, python-format msgid "" "Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " "%(version)s" msgstr "" -#: nova/virt/xenapi/vmops.py:526 +#: nova/virt/xenapi/vmops.py:547 #, python-format msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" msgstr "" -#: nova/virt/xenapi/vmops.py:533 -msgid "Waiting for instance state to become running" -msgstr "" - -#: nova/virt/xenapi/vmops.py:548 +#: nova/virt/xenapi/vmops.py:558 #, python-format msgid "Instance agent version: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:575 +#: nova/virt/xenapi/vmops.py:585 msgid "Setting VCPU weight" msgstr "" -#: nova/virt/xenapi/vmops.py:583 +#: nova/virt/xenapi/vmops.py:593 #, python-format msgid "Could not find VM with name %s" msgstr "" -#: nova/virt/xenapi/vmops.py:633 +#: nova/virt/xenapi/vmops.py:643 msgid "Finished snapshot and upload for VM" msgstr "" -#: nova/virt/xenapi/vmops.py:637 +#: nova/virt/xenapi/vmops.py:647 #, python-format msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d" msgstr "" -#: nova/virt/xenapi/vmops.py:645 +#: nova/virt/xenapi/vmops.py:655 msgid "Failed to transfer vhd to new host" msgstr "" -#: nova/virt/xenapi/vmops.py:682 +#: nova/virt/xenapi/vmops.py:692 #, python-format msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" msgstr "" -#: nova/virt/xenapi/vmops.py:806 +#: nova/virt/xenapi/vmops.py:816 #, python-format msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" msgstr "" -#: nova/virt/xenapi/vmops.py:811 +#: nova/virt/xenapi/vmops.py:821 msgid "Resize complete" msgstr "" -#: nova/virt/xenapi/vmops.py:855 +#: nova/virt/xenapi/vmops.py:865 msgid "Starting halted instance found during reboot" msgstr "" -#: nova/virt/xenapi/vmops.py:940 +#: nova/virt/xenapi/vmops.py:956 msgid "Unable to find root VBD/VDI for VM" msgstr "" -#: nova/virt/xenapi/vmops.py:966 +#: nova/virt/xenapi/vmops.py:982 msgid "Destroying VDIs" msgstr "" -#: nova/virt/xenapi/vmops.py:993 +#: nova/virt/xenapi/vmops.py:1009 msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" msgstr "" -#: nova/virt/xenapi/vmops.py:1000 +#: nova/virt/xenapi/vmops.py:1016 msgid "instance has a kernel or ramdisk but not both" msgstr "" -#: nova/virt/xenapi/vmops.py:1007 +#: nova/virt/xenapi/vmops.py:1023 msgid "kernel/ramdisk files removed" msgstr "" -#: nova/virt/xenapi/vmops.py:1033 +#: nova/virt/xenapi/vmops.py:1049 msgid "Destroying VM" msgstr "" -#: nova/virt/xenapi/vmops.py:1059 +#: nova/virt/xenapi/vmops.py:1075 msgid "VM is not present, skipping destroy..." msgstr "" -#: nova/virt/xenapi/vmops.py:1110 +#: nova/virt/xenapi/vmops.py:1126 #, python-format msgid "Instance is already in Rescue Mode: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:1144 +#: nova/virt/xenapi/vmops.py:1160 msgid "VM is not present, skipping soft delete..." msgstr "" -#: nova/virt/xenapi/vmops.py:1192 +#: nova/virt/xenapi/vmops.py:1209 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" -#: nova/virt/xenapi/vmops.py:1196 +#: nova/virt/xenapi/vmops.py:1213 msgid "Automatically hard rebooting" msgstr "" -#: nova/virt/xenapi/vmops.py:1295 +#: nova/virt/xenapi/vmops.py:1312 msgid "Fetching VM ref while BUILDING failed" msgstr "" -#: nova/virt/xenapi/vmops.py:1378 +#: nova/virt/xenapi/vmops.py:1395 msgid "Injecting network info to xenstore" msgstr "" -#: nova/virt/xenapi/vmops.py:1397 +#: nova/virt/xenapi/vmops.py:1414 msgid "Creating vifs" msgstr "" -#: nova/virt/xenapi/vmops.py:1406 +#: nova/virt/xenapi/vmops.py:1423 #, python-format msgid "Creating VIF for network %(network_ref)s" msgstr "" -#: nova/virt/xenapi/vmops.py:1409 +#: nova/virt/xenapi/vmops.py:1426 #, python-format msgid "Created VIF %(vif_ref)s, network %(network_ref)s" msgstr "" -#: nova/virt/xenapi/vmops.py:1434 +#: nova/virt/xenapi/vmops.py:1454 msgid "Injecting hostname to xenstore" msgstr "" -#: nova/virt/xenapi/vmops.py:1530 +#: nova/virt/xenapi/vmops.py:1550 #, python-format msgid "" "Destination host:%(hostname)s must be in the same aggregate as the source" " server" msgstr "" -#: nova/virt/xenapi/vmops.py:1562 +#: nova/virt/xenapi/vmops.py:1582 msgid "Migrate Receive failed" msgstr "" -#: nova/virt/xenapi/vmops.py:1610 +#: nova/virt/xenapi/vmops.py:1630 msgid "VM.assert_can_migratefailed" msgstr "" -#: nova/virt/xenapi/vmops.py:1646 +#: nova/virt/xenapi/vmops.py:1666 msgid "Migrate Send failed" msgstr "" diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py index 2cc607684..03f7120a0 100644 --- a/nova/network/quantumv2/api.py +++ b/nova/network/quantumv2/api.py @@ -17,6 +17,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 from nova.compute import api as compute_api +from nova import config from nova.db import base from nova import exception from nova import flags @@ -51,7 +52,8 @@ quantum_opts = [ 'quantum in admin context'), ] -flags.DECLARE('default_floating_pool', 'nova.network.manager') +CONF = config.CONF +CONF.import_opt('default_floating_pool', 'nova.network.manager') FLAGS = flags.FLAGS FLAGS.register_opts(quantum_opts) @@ -90,6 +92,11 @@ class API(base.Base): search_opts['id'] = net_ids nets += quantum.list_networks(**search_opts).get('networks', []) + _ensure_requested_network_ordering( + lambda x: x['id'], + nets, + net_ids) + return nets def allocate_for_instance(self, context, instance, **kwargs): @@ -532,6 +539,13 @@ class API(base.Base): if not networks: networks = self._get_available_networks(context, instance['project_id']) + else: + # ensure ports are in preferred network order + _ensure_requested_network_ordering( + lambda x: x['network_id'], + ports, + [n['id'] for n in networks]) + nw_info = network_model.NetworkInfo() for port in ports: network_name = None @@ -645,3 +659,9 @@ class API(base.Base): def create_public_dns_domain(self, context, domain, project=None): """Create a private DNS domain with optional nova project.""" raise NotImplementedError() + + +def _ensure_requested_network_ordering(accessor, unordered, preferred): + """Sort a list with respect to the preferred network ordering.""" + if preferred: + unordered.sort(key=lambda i: preferred.index(accessor(i))) diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index 6d6288d83..1a608da29 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -23,11 +23,12 @@ Chance (Random) Scheduler implementation import random +from nova import config from nova import exception from nova import flags from nova.scheduler import driver -FLAGS = flags.FLAGS +CONF = config.CONF class ChanceScheduler(driver.Scheduler): @@ -65,7 +66,7 @@ class ChanceScheduler(driver.Scheduler): for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: - host = self._schedule(context, FLAGS.compute_topic, + host = self._schedule(context, CONF.compute_topic, request_spec, filter_properties) updated_instance = driver.instance_update_db(context, instance_uuid) @@ -88,7 +89,7 @@ class ChanceScheduler(driver.Scheduler): filter_properties, instance, instance_type, reservations): """Select a target for resize.""" - host = self._schedule(context, FLAGS.compute_topic, request_spec, + host = self._schedule(context, CONF.compute_topic, request_spec, filter_properties) self.compute_rpcapi.prep_resize(context, image, instance, instance_type, host, reservations) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 012ad09ed..f93268906 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova import config from nova import db from nova import exception from nova import flags @@ -52,11 +53,12 @@ scheduler_driver_opts = [ help='Maximum number of attempts to schedule an instance'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(scheduler_driver_opts) +CONF = config.CONF +CONF.register_opts(scheduler_driver_opts) -flags.DECLARE('instances_path', 'nova.compute.manager') -flags.DECLARE('libvirt_type', 'nova.virt.libvirt.driver') +CONF = config.CONF +CONF.import_opt('instances_path', 'nova.compute.manager') +CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver') def handle_schedule_error(context, ex, instance_uuid, request_spec): @@ -105,7 +107,7 @@ def cast_to_compute_host(context, host, method, **kwargs): instance_update_db(context, instance_uuid) rpc.cast(context, - rpc.queue_get_for(context, FLAGS.compute_topic, host), + rpc.queue_get_for(context, CONF.compute_topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals()) @@ -113,7 +115,7 @@ def cast_to_compute_host(context, host, method, **kwargs): def cast_to_host(context, topic, host, method, **kwargs): """Generic cast to host""" - topic_mapping = {FLAGS.compute_topic: cast_to_compute_host} + topic_mapping = {CONF.compute_topic: cast_to_compute_host} func = topic_mapping.get(topic) if func: @@ -149,7 +151,7 @@ class Scheduler(object): def __init__(self): self.host_manager = importutils.import_object( - FLAGS.scheduler_host_manager) + CONF.scheduler_host_manager) self.compute_api = compute_api.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 78bd49a96..c43e48876 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -21,6 +21,7 @@ Weighing Functions. import operator +from nova import config from nova import exception from nova import flags from nova.openstack.common import importutils @@ -30,8 +31,7 @@ from nova.scheduler import driver from nova.scheduler import least_cost from nova.scheduler import scheduler_options - -FLAGS = flags.FLAGS +CONF = config.CONF LOG = logging.getLogger(__name__) @@ -61,7 +61,7 @@ class FilterScheduler(driver.Scheduler): notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.start', notifier.INFO, payload) - weighted_hosts = self._schedule(context, FLAGS.compute_topic, + weighted_hosts = self._schedule(context, CONF.compute_topic, request_spec, filter_properties, instance_uuids) @@ -108,7 +108,7 @@ class FilterScheduler(driver.Scheduler): the prep_resize operation to it. """ - hosts = self._schedule(context, FLAGS.compute_topic, request_spec, + hosts = self._schedule(context, CONF.compute_topic, request_spec, filter_properties, [instance['uuid']]) if not hosts: raise exception.NoValidHost(reason="") @@ -187,7 +187,7 @@ class FilterScheduler(driver.Scheduler): filter_properties['os_type'] = os_type def _max_attempts(self): - max_attempts = FLAGS.scheduler_max_attempts + max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: raise exception.NovaException(_("Invalid value for " "'scheduler_max_attempts', must be >= 1")) @@ -226,7 +226,7 @@ class FilterScheduler(driver.Scheduler): ordered by their fitness. """ elevated = context.elevated() - if topic != FLAGS.compute_topic: + if topic != CONF.compute_topic: msg = _("Scheduler only understands Compute nodes (for now)") raise NotImplementedError(msg) @@ -306,12 +306,12 @@ class FilterScheduler(driver.Scheduler): """ if topic is None: # Schedulers only support compute right now. - topic = FLAGS.compute_topic + topic = CONF.compute_topic if topic in self.cost_function_cache: return self.cost_function_cache[topic] cost_fns = [] - for cost_fn_str in FLAGS.least_cost_functions: + for cost_fn_str in CONF.least_cost_functions: if '.' in cost_fn_str: short_name = cost_fn_str.split('.')[-1] else: @@ -333,7 +333,7 @@ class FilterScheduler(driver.Scheduler): try: flag_name = "%s_weight" % cost_fn.__name__ - weight = getattr(FLAGS, flag_name) + weight = getattr(CONF, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( flag_name=flag_name) diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py index 4af71c7b4..202f8232a 100644 --- a/nova/scheduler/filters/compute_filter.py +++ b/nova/scheduler/filters/compute_filter.py @@ -13,12 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova.openstack.common import log as logging from nova.scheduler import filters from nova import utils -FLAGS = flags.FLAGS +CONF = config.CONF LOG = logging.getLogger(__name__) @@ -29,7 +30,7 @@ class ComputeFilter(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): """Returns True for only active compute nodes""" instance_type = filter_properties.get('instance_type') - if host_state.topic != FLAGS.compute_topic or not instance_type: + if host_state.topic != CONF.compute_topic or not instance_type: return True capabilities = host_state.capabilities service = host_state.service diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py index 0c4a67dcc..9c93df930 100644 --- a/nova/scheduler/filters/core_filter.py +++ b/nova/scheduler/filters/core_filter.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -27,8 +28,8 @@ cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio', default=16.0, help='Virtual CPU to Physical CPU allocation ratio') -FLAGS = flags.FLAGS -FLAGS.register_opt(cpu_allocation_ratio_opt) +CONF = config.CONF +CONF.register_opt(cpu_allocation_ratio_opt) class CoreFilter(filters.BaseHostFilter): @@ -37,7 +38,7 @@ class CoreFilter(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): """Return True if host has sufficient CPU cores.""" instance_type = filter_properties.get('instance_type') - if host_state.topic != FLAGS.compute_topic or not instance_type: + if host_state.topic != CONF.compute_topic or not instance_type: return True if not host_state.vcpus_total: @@ -46,7 +47,7 @@ class CoreFilter(filters.BaseHostFilter): return True instance_vcpus = instance_type['vcpus'] - vcpus_total = host_state.vcpus_total * FLAGS.cpu_allocation_ratio + vcpus_total = host_state.vcpus_total * CONF.cpu_allocation_ratio # Only provide a VCPU limit to compute if the virt driver is reporting # an accurate count of installed VCPUs. (XenServer driver does not) diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py index 88b8c3377..358583b8a 100644 --- a/nova/scheduler/filters/disk_filter.py +++ b/nova/scheduler/filters/disk_filter.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -23,8 +24,8 @@ LOG = logging.getLogger(__name__) disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio", default=1.0, help="virtual disk to physical disk allocation ratio") -FLAGS = flags.FLAGS -FLAGS.register_opt(disk_allocation_ratio_opt) +CONF = config.CONF +CONF.register_opt(disk_allocation_ratio_opt) class DiskFilter(filters.BaseHostFilter): @@ -39,7 +40,7 @@ class DiskFilter(filters.BaseHostFilter): free_disk_mb = host_state.free_disk_mb total_usable_disk_mb = host_state.total_usable_disk_gb * 1024 - disk_mb_limit = total_usable_disk_mb * FLAGS.disk_allocation_ratio + disk_mb_limit = total_usable_disk_mb * CONF.disk_allocation_ratio used_disk_mb = total_usable_disk_mb - free_disk_mb usable_disk_mb = disk_mb_limit - used_disk_mb diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py index c2e0205a3..1b40bae62 100644 --- a/nova/scheduler/filters/io_ops_filter.py +++ b/nova/scheduler/filters/io_ops_filter.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -24,8 +25,8 @@ max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host", default=8, help="Ignore hosts that have too many builds/resizes/snaps/migrations") -FLAGS = flags.FLAGS -FLAGS.register_opt(max_io_ops_per_host_opt) +CONF = config.CONF +CONF.register_opt(max_io_ops_per_host_opt) class IoOpsFilter(filters.BaseHostFilter): @@ -36,7 +37,7 @@ class IoOpsFilter(filters.BaseHostFilter): compute node statistics to decide whether to filter. """ num_io_ops = host_state.num_io_ops - max_io_ops = FLAGS.max_io_ops_per_host + max_io_ops = CONF.max_io_ops_per_host passes = num_io_ops < max_io_ops if not passes: LOG.debug(_("%(host_state)s fails I/O ops check: Max IOs per host " diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py index 0d67b7b80..d1d16b80b 100644 --- a/nova/scheduler/filters/isolated_hosts_filter.py +++ b/nova/scheduler/filters/isolated_hosts_filter.py @@ -13,12 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. - +from nova import config from nova import flags from nova.scheduler import filters - -FLAGS = flags.FLAGS +CONF = config.CONF class IsolatedHostsFilter(filters.BaseHostFilter): @@ -28,6 +27,6 @@ class IsolatedHostsFilter(filters.BaseHostFilter): spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) image_ref = props.get('image_ref') - image_isolated = image_ref in FLAGS.isolated_images - host_isolated = host_state.host in FLAGS.isolated_hosts + image_isolated = image_ref in CONF.isolated_images + host_isolated = host_state.host in CONF.isolated_hosts return image_isolated == host_isolated diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py index e96539c0c..17c7ebc22 100644 --- a/nova/scheduler/filters/num_instances_filter.py +++ b/nova/scheduler/filters/num_instances_filter.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -24,8 +25,8 @@ max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host", default=50, help="Ignore hosts that have too many instances") -FLAGS = flags.FLAGS -FLAGS.register_opt(max_instances_per_host_opt) +CONF = config.CONF +CONF.register_opt(max_instances_per_host_opt) class NumInstancesFilter(filters.BaseHostFilter): @@ -33,7 +34,7 @@ class NumInstancesFilter(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): num_instances = host_state.num_instances - max_instances = FLAGS.max_instances_per_host + max_instances = CONF.max_instances_per_host passes = num_instances < max_instances if not passes: LOG.debug(_("%(host_state)s fails num_instances check: Max " diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py index 22ba0252c..85370dc2c 100644 --- a/nova/scheduler/filters/ram_filter.py +++ b/nova/scheduler/filters/ram_filter.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -25,8 +26,8 @@ ram_allocation_ratio_opt = cfg.FloatOpt("ram_allocation_ratio", default=1.5, help="virtual ram to physical ram allocation ratio") -FLAGS = flags.FLAGS -FLAGS.register_opt(ram_allocation_ratio_opt) +CONF = config.CONF +CONF.register_opt(ram_allocation_ratio_opt) class RamFilter(filters.BaseHostFilter): @@ -39,7 +40,7 @@ class RamFilter(filters.BaseHostFilter): free_ram_mb = host_state.free_ram_mb total_usable_ram_mb = host_state.total_usable_ram_mb - memory_mb_limit = total_usable_ram_mb * FLAGS.ram_allocation_ratio + memory_mb_limit = total_usable_ram_mb * CONF.ram_allocation_ratio used_ram_mb = total_usable_ram_mb - free_ram_mb usable_ram = memory_mb_limit - used_ram_mb if not usable_ram >= requested_ram: diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index 69968a766..4fd0488d9 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -48,6 +48,7 @@ import httplib import socket import ssl +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import jsonutils @@ -81,10 +82,10 @@ trusted_opts = [ help='attestation authorization blob - must change'), ] -FLAGS = flags.FLAGS +CONF = config.CONF trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters') -FLAGS.register_group(trust_group) -FLAGS.register_opts(trusted_opts, group='trusted_computing') +CONF.register_group(trust_group) +CONF.register_opts(trusted_opts, group=trust_group) class HTTPSClientAuthConnection(httplib.HTTPSConnection): @@ -124,13 +125,13 @@ class AttestationService(httplib.HTTPSConnection): # Provide access wrapper to attestation server to get integrity report. def __init__(self): - self.api_url = FLAGS.trusted_computing.attestation_api_url - self.host = FLAGS.trusted_computing.attestation_server - self.port = FLAGS.trusted_computing.attestation_port - self.auth_blob = FLAGS.trusted_computing.attestation_auth_blob + self.api_url = CONF.trusted_computing.attestation_api_url + self.host = CONF.trusted_computing.attestation_server + self.port = CONF.trusted_computing.attestation_port + self.auth_blob = CONF.trusted_computing.attestation_auth_blob self.key_file = None self.cert_file = None - self.ca_file = FLAGS.trusted_computing.attestation_server_ca_file + self.ca_file = CONF.trusted_computing.attestation_server_ca_file self.request_count = 100 def _do_request(self, method, action_url, body, headers): diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index fc9f3c8cc..91e16ad34 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -21,6 +21,7 @@ import UserDict from nova.compute import task_states from nova.compute import vm_states +from nova import config from nova import db from nova import exception from nova import flags @@ -49,8 +50,8 @@ host_manager_opts = [ 'when not specified in the request.'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(host_manager_opts) +CONF = config.CONF +CONF.register_opts(host_manager_opts) LOG = logging.getLogger(__name__) @@ -281,7 +282,7 @@ class HostManager(object): self.service_states = {} # { <host> : { <service> : { cap k : v }}} self.host_state_map = {} self.filter_classes = filters.get_filter_classes( - FLAGS.scheduler_available_filters) + CONF.scheduler_available_filters) def _choose_host_filters(self, filters): """Since the caller may specify which filters to use we need @@ -290,7 +291,7 @@ class HostManager(object): of acceptable filters. """ if filters is None: - filters = FLAGS.scheduler_default_filters + filters = CONF.scheduler_default_filters if not isinstance(filters, (list, tuple)): filters = [filters] good_filters = [] @@ -347,7 +348,7 @@ class HostManager(object): with the instance (in case the InstanceType changed since the instance was created).""" - if topic != FLAGS.compute_topic: + if topic != CONF.compute_topic: raise NotImplementedError(_( "host_manager only implemented for 'compute'")) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index bccdd0a4f..d3eaee735 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,6 +22,7 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -45,8 +46,8 @@ least_cost_opts = [ 'e.g. spread-first'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(least_cost_opts) +CONF = config.CONF +CONF.register_opts(least_cost_opts) # TODO(sirp): Once we have enough of these rules, we can break them out into a # cost_functions.py file (perhaps in a least_cost_scheduler directory) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 531c54726..77f5a0259 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -26,6 +26,7 @@ import sys from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova import config import nova.context from nova import db from nova import exception @@ -46,8 +47,8 @@ scheduler_driver_opt = cfg.StrOpt('scheduler_driver', default='nova.scheduler.filter_scheduler.FilterScheduler', help='Default driver to use for the scheduler') -FLAGS = flags.FLAGS -FLAGS.register_opt(scheduler_driver_opt) +CONF = config.CONF +CONF.register_opt(scheduler_driver_opt) QUOTAS = quota.QUOTAS @@ -59,7 +60,7 @@ class SchedulerManager(manager.Manager): def __init__(self, scheduler_driver=None, *args, **kwargs): if not scheduler_driver: - scheduler_driver = FLAGS.scheduler_driver + scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py index 13e3c0e1a..7c68bb12a 100644 --- a/nova/scheduler/multi.py +++ b/nova/scheduler/multi.py @@ -27,6 +27,7 @@ schedule requests to compute nodes but provide their own manager and topic. https://bugs.launchpad.net/nova/+bug/1009681 """ +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import importutils @@ -43,8 +44,8 @@ multi_scheduler_opts = [ help='Default driver to use for scheduling calls'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(multi_scheduler_opts) +CONF = config.CONF +CONF.register_opts(multi_scheduler_opts) class MultiScheduler(driver.Scheduler): @@ -58,9 +59,9 @@ class MultiScheduler(driver.Scheduler): def __init__(self): super(MultiScheduler, self).__init__() compute_driver = importutils.import_object( - FLAGS.compute_scheduler_driver) + CONF.compute_scheduler_driver) default_driver = importutils.import_object( - FLAGS.default_scheduler_driver) + CONF.default_scheduler_driver) self.drivers = {'compute': compute_driver, 'default': default_driver} diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py index b41668733..cbb6712db 100644 --- a/nova/scheduler/rpcapi.py +++ b/nova/scheduler/rpcapi.py @@ -18,12 +18,12 @@ Client side of the scheduler manager RPC API. """ +from nova import config from nova import flags from nova.openstack.common import jsonutils import nova.openstack.common.rpc.proxy - -FLAGS = flags.FLAGS +CONF = config.CONF class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy): @@ -60,7 +60,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy): BASE_RPC_API_VERSION = '2.0' def __init__(self): - super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic, + super(SchedulerAPI, self).__init__(topic=CONF.scheduler_topic, default_version=self.BASE_RPC_API_VERSION) def run_instance(self, ctxt, request_spec, admin_password, diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py index 7acf2f750..e8be0070b 100644 --- a/nova/scheduler/scheduler_options.py +++ b/nova/scheduler/scheduler_options.py @@ -26,6 +26,7 @@ import datetime import json import os +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -37,8 +38,8 @@ scheduler_json_config_location_opt = cfg.StrOpt( default='', help='Absolute path to scheduler configuration JSON file.') -FLAGS = flags.FLAGS -FLAGS.register_opt(scheduler_json_config_location_opt) +CONF = config.CONF +CONF.register_opt(scheduler_json_config_location_opt) LOG = logging.getLogger(__name__) @@ -86,7 +87,7 @@ class SchedulerOptions(object): def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: - filename = FLAGS.scheduler_json_config_location + filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index d8bc34883..7c01fdcc2 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -36,6 +36,7 @@ from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova import config from nova import context from nova import db from nova import exception @@ -66,7 +67,8 @@ from nova.volume import cinder QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS -flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') +CONF = config.CONF +CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') FAKE_IMAGE_REF = 'fake-image-ref' @@ -2411,6 +2413,35 @@ class ComputeTestCase(BaseTestCase): NotImplementedError('test'), exc_info) + def test_add_instance_fault_with_remote_error(self): + exc_info = None + instance_uuid = str(utils.gen_uuid()) + + def fake_db_fault_create(ctxt, values): + self.assertTrue(values['details'].startswith('Remote error')) + self.assertTrue('raise rpc_common.RemoteError' + in values['details']) + del values['details'] + + expected = { + 'code': 500, + 'instance_uuid': instance_uuid, + 'message': 'My Test Message' + } + self.assertEquals(expected, values) + + try: + raise rpc_common.RemoteError('test', 'My Test Message') + except rpc_common.RemoteError as exc: + exc_info = sys.exc_info() + + self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) + + ctxt = context.get_admin_context() + compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid, + exc, + exc_info) + def test_add_instance_fault_user_error(self): exc_info = None instance_uuid = str(utils.gen_uuid()) @@ -5220,6 +5251,23 @@ class ComputePolicyTestCase(BaseTestCase): self.compute_api.get_instance_faults, self.context, instances) + def test_force_host_fail(self): + rules = {"compute:create": [], + "compute:create:forced_host": [["role:fake"]]} + self._set_rules(rules) + + self.assertRaises(exception.PolicyNotAuthorized, + self.compute_api.create, self.context, None, '1', + availability_zone='1:1') + + def test_force_host_pass(self): + rules = {"compute:create": [], + "compute:create:forced_host": []} + self._set_rules(rules) + + self.compute_api.create(self.context, None, '1', + availability_zone='1:1') + class ComputeHostAPITestCase(BaseTestCase): def setUp(self): diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py index f1e1edc54..2b50edf05 100644 --- a/nova/tests/console/test_console.py +++ b/nova/tests/console/test_console.py @@ -18,6 +18,7 @@ """Tests For Console proxy.""" +from nova import config from nova.console import api as console_api from nova.console import rpcapi as console_rpcapi from nova import context @@ -29,7 +30,8 @@ from nova.openstack.common import rpc from nova import test FLAGS = flags.FLAGS -flags.DECLARE('console_driver', 'nova.console.manager') +CONF = config.CONF +CONF.import_opt('console_driver', 'nova.console.manager') class ConsoleTestCase(test.TestCase): diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index d1c166ba1..f8661e434 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -16,16 +16,17 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import config from nova import flags -FLAGS = flags.FLAGS +CONF = config.CONF -flags.DECLARE('scheduler_driver', 'nova.scheduler.manager') -flags.DECLARE('fake_network', 'nova.network.manager') -flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -flags.DECLARE('network_size', 'nova.network.manager') -flags.DECLARE('num_networks', 'nova.network.manager') -flags.DECLARE('policy_file', 'nova.policy') +CONF.import_opt('scheduler_driver', 'nova.scheduler.manager') +CONF.import_opt('fake_network', 'nova.network.manager') +CONF.import_opt('iscsi_num_targets', 'nova.volume.driver') +CONF.import_opt('network_size', 'nova.network.manager') +CONF.import_opt('num_networks', 'nova.network.manager') +CONF.import_opt('policy_file', 'nova.policy') def set_defaults(conf): diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py index edb477b70..a8f29e012 100644 --- a/nova/tests/network/test_quantumv2.py +++ b/nova/tests/network/test_quantumv2.py @@ -351,35 +351,37 @@ class TestQuantumv2(test.TestCase): self.moxed_client.show_port(port_id).AndReturn( {'port': {'id': 'my_portid1', 'network_id': 'my_netid1'}}) - req_net_ids.append('my_netid1') ports['my_netid1'] = self.port_data1[0] id = 'my_netid1' else: fixed_ips[id] = fixed_ip req_net_ids.append(id) + expected_network_order = req_net_ids + else: + expected_network_order = [n['id'] for n in nets] search_ids = [net['id'] for net in nets if net['id'] in req_net_ids] mox_list_network_params = dict(tenant_id=self.instance['project_id'], shared=False) if search_ids: - mox_list_network_params['id'] = search_ids + mox_list_network_params['id'] = mox.SameElementsAs(search_ids) self.moxed_client.list_networks( **mox_list_network_params).AndReturn({'networks': nets}) mox_list_network_params = dict(shared=True) if search_ids: - mox_list_network_params['id'] = search_ids + mox_list_network_params['id'] = mox.SameElementsAs(search_ids) self.moxed_client.list_networks( **mox_list_network_params).AndReturn({'networks': []}) - for network in nets: + for net_id in expected_network_order: port_req_body = { 'port': { 'device_id': self.instance['uuid'], 'device_owner': 'compute:nova', }, } - port = ports.get(network['id'], None) + port = ports.get(net_id, None) if port: port_id = port['id'] self.moxed_client.update_port(port_id, @@ -387,10 +389,10 @@ class TestQuantumv2(test.TestCase): ).AndReturn( {'port': port}) else: - fixed_ip = fixed_ips.get(network['id']) + fixed_ip = fixed_ips.get(net_id) if fixed_ip: port_req_body['port']['fixed_ip'] = fixed_ip - port_req_body['port']['network_id'] = network['id'] + port_req_body['port']['network_id'] = net_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = \ self.instance['project_id'] @@ -410,8 +412,9 @@ class TestQuantumv2(test.TestCase): def test_allocate_for_instance_with_requested_networks(self): # specify only first and last network - requested_networks = [(net['id'], None, None) - for net in (self.nets3[0], self.nets3[-1])] + requested_networks = [ + (net['id'], None, None) + for net in (self.nets3[1], self.nets3[0], self.nets3[2])] self._allocate_for_instance(net_idx=3, requested_networks=requested_networks) @@ -902,3 +905,33 @@ class TestQuantumv2(test.TestCase): self.mox.ReplayAll() api.disassociate_floating_ip(self.context, self.instance, address) + + +class TestQuantumv2ModuleMethods(test.TestCase): + def test_ensure_requested_network_ordering_no_preference(self): + l = [1, 2, 3] + + quantumapi._ensure_requested_network_ordering( + lambda x: x, + l, + None) + + def test_ensure_requested_network_ordering_no_preference(self): + l = [{'id': 3}, {'id': 1}, {'id': 2}] + + quantumapi._ensure_requested_network_ordering( + lambda x: x['id'], + l, + None) + + self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}]) + + def test_ensure_requested_network_ordering_with_preference(self): + l = [{'id': 3}, {'id': 1}, {'id': 2}] + + quantumapi._ensure_requested_network_ordering( + lambda x: x['id'], + l, + [1, 2, 3]) + + self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}]) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 15dec56b9..ad94f6550 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -22,6 +22,7 @@ from nova import flags from nova.openstack.common import cfg from nova import test +CONF = config.CONF FLAGS = flags.FLAGS FLAGS.register_opt(cfg.StrOpt('flags_unittest', default='foo', @@ -30,16 +31,16 @@ FLAGS.register_opt(cfg.StrOpt('flags_unittest', class FlagsTestCase(test.TestCase): def test_declare(self): - self.assert_('answer' not in FLAGS) - flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assert_('answer' in FLAGS) - self.assertEqual(FLAGS.answer, 42) + self.assert_('answer' not in CONF) + CONF.import_opt('answer', 'nova.tests.declare_flags') + self.assert_('answer' in CONF) + self.assertEqual(CONF.answer, 42) # Make sure we don't overwrite anything - FLAGS.set_override('answer', 256) - self.assertEqual(FLAGS.answer, 256) - flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assertEqual(FLAGS.answer, 256) + CONF.set_override('answer', 256) + self.assertEqual(CONF.answer, 256) + CONF.import_opt('answer', 'nova.tests.declare_flags') + self.assertEqual(CONF.answer, 256) def test_getopt_non_interspersed_args(self): self.assert_('runtime_answer' not in FLAGS) diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py index f2c33e623..8203277ae 100644 --- a/nova/tests/test_imagecache.py +++ b/nova/tests/test_imagecache.py @@ -379,6 +379,29 @@ class ImageCacheManagerTestCase(test.TestCase): res = image_cache_manager._verify_checksum(img, fname) self.assertTrue(res) + def test_verify_checksum_disabled(self): + img = {'container_format': 'ami', 'id': '42'} + + self.flags(checksum_base_images=False) + + with self._intercept_log_messages() as stream: + with utils.tempdir() as tmpdir: + self.flags(instances_path=tmpdir) + self.flags(image_info_filename_pattern=('$instances_path/' + '%(image)s.info')) + fname, info_fname, testdata = self._make_checksum(tmpdir) + + # Checksum is valid + f = open(info_fname, 'w') + csum = hashlib.sha1() + csum.update(testdata) + f.write('{"sha1": "%s"}\n' % csum.hexdigest()) + f.close() + + image_cache_manager = imagecache.ImageCacheManager() + res = image_cache_manager._verify_checksum(img, fname) + self.assertTrue(res is None) + def test_verify_checksum_invalid_json(self): img = {'container_format': 'ami', 'id': '42'} @@ -653,6 +676,7 @@ class ImageCacheManagerTestCase(test.TestCase): self.assertEquals(image_cache_manager.corrupt_base_files, []) def test_handle_base_image_checksum_fails(self): + self.flags(checksum_base_images=True) self.stubs.Set(virtutils, 'chown', lambda x, y: None) img = '123' diff --git a/nova/utils.py b/nova/utils.py index d97c2ce3f..4dd0f161f 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -1044,7 +1044,7 @@ def generate_mac_address(): # properly: 0xfa. # Discussion: https://bugs.launchpad.net/nova/+bug/921838 mac = [0xfa, 0x16, 0x3e, - random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) @@ -1164,9 +1164,13 @@ def mkfs(fs, path, label=None): else: args = ['mkfs', '-t', fs] #add -F to force no interactive excute on non-block device. - if fs in ['ext3', 'ext4']: + if fs in ('ext3', 'ext4'): args.extend(['-F']) if label: - args.extend(['-n', label]) + if fs in ('msdos', 'vfat'): + label_opt = '-n' + else: + label_opt = '-L' + args.extend([label_opt, label]) args.append(path) execute(*args) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 3104fafd3..3f0e2ec53 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -60,6 +60,7 @@ from nova import block_device from nova.compute import instance_types from nova.compute import power_state from nova.compute import vm_mode +from nova import config from nova import context as nova_context from nova import exception from nova import flags @@ -75,7 +76,7 @@ from nova.virt import configdrive from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import firewall -from nova.virt.libvirt import config +from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import firewall as libvirt_firewall from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache @@ -187,8 +188,9 @@ libvirt_opts = [ FLAGS = flags.FLAGS FLAGS.register_opts(libvirt_opts) -flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') -flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc') +CONF = config.CONF +CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') +CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( libvirt_firewall.__name__, @@ -1467,7 +1469,7 @@ class LibvirtDriver(driver.ComputeDriver): the capabilities of the host""" xmlstr = self._conn.getCapabilities() - caps = config.LibvirtConfigCaps() + caps = vconfig.LibvirtConfigCaps() caps.parse_str(xmlstr) return caps @@ -1478,7 +1480,7 @@ class LibvirtDriver(driver.ComputeDriver): caps = self.get_host_capabilities() hostcpu = caps.host.cpu - guestcpu = config.LibvirtConfigGuestCPU() + guestcpu = vconfig.LibvirtConfigGuestCPU() guestcpu.model = hostcpu.model guestcpu.vendor = hostcpu.vendor @@ -1487,7 +1489,7 @@ class LibvirtDriver(driver.ComputeDriver): guestcpu.match = "exact" for hostfeat in hostcpu.features: - guestfeat = config.LibvirtConfigGuestCPUFeature(hostfeat.name) + guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name) guestfeat.policy = "require" return guestcpu @@ -1527,11 +1529,11 @@ class LibvirtDriver(driver.ComputeDriver): # updated to be at least this new, we can kill off the elif # blocks here if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION): - cpu = config.LibvirtConfigGuestCPU() + cpu = vconfig.LibvirtConfigGuestCPU() cpu.mode = mode cpu.model = model elif mode == "custom": - cpu = config.LibvirtConfigGuestCPU() + cpu = vconfig.LibvirtConfigGuestCPU() cpu.model = model elif mode == "host-model": cpu = self.get_host_cpu_for_guest() @@ -1552,7 +1554,7 @@ class LibvirtDriver(driver.ComputeDriver): block_device_info) if FLAGS.libvirt_type == "lxc": - fs = config.LibvirtConfigGuestFilesys() + fs = vconfig.LibvirtConfigGuestFilesys() fs.source_type = "mount" fs.source_dir = os.path.join(FLAGS.instances_path, instance['name'], @@ -1658,7 +1660,7 @@ class LibvirtDriver(driver.ComputeDriver): devices.append(cfg) if configdrive.enabled_for(instance): - diskconfig = config.LibvirtConfigGuestDisk() + diskconfig = vconfig.LibvirtConfigGuestDisk() diskconfig.source_type = "file" diskconfig.driver_format = "raw" diskconfig.driver_cache = self.disk_cachemode @@ -1681,9 +1683,10 @@ class LibvirtDriver(driver.ComputeDriver): """ # FIXME(vish): stick this in db inst_type_id = instance['instance_type_id'] - inst_type = instance_types.get_instance_type(inst_type_id) + inst_type = instance_types.get_instance_type(inst_type_id, + inactive=True) - guest = config.LibvirtConfigGuest() + guest = vconfig.LibvirtConfigGuest() guest.virt_type = FLAGS.libvirt_type guest.name = instance['name'] guest.uuid = instance['uuid'] @@ -1760,18 +1763,18 @@ class LibvirtDriver(driver.ComputeDriver): if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml": guest.acpi = True - clk = config.LibvirtConfigGuestClock() + clk = vconfig.LibvirtConfigGuestClock() clk.offset = "utc" guest.set_clock(clk) if FLAGS.libvirt_type == "kvm": # TODO(berrange) One day this should be per-guest # OS type configurable - tmpit = config.LibvirtConfigGuestTimer() + tmpit = vconfig.LibvirtConfigGuestTimer() tmpit.name = "pit" tmpit.tickpolicy = "delay" - tmrtc = config.LibvirtConfigGuestTimer() + tmrtc = vconfig.LibvirtConfigGuestTimer() tmrtc.name = "rtc" tmrtc.tickpolicy = "catchup" @@ -1796,29 +1799,29 @@ class LibvirtDriver(driver.ComputeDriver): # client app is connected. Thus we can't get away # with a single type=pty console. Instead we have # to configure two separate consoles. - consolelog = config.LibvirtConfigGuestSerial() + consolelog = vconfig.LibvirtConfigGuestSerial() consolelog.type = "file" consolelog.source_path = os.path.join(FLAGS.instances_path, instance['name'], "console.log") guest.add_device(consolelog) - consolepty = config.LibvirtConfigGuestSerial() + consolepty = vconfig.LibvirtConfigGuestSerial() consolepty.type = "pty" guest.add_device(consolepty) else: - consolepty = config.LibvirtConfigGuestConsole() + consolepty = vconfig.LibvirtConfigGuestConsole() consolepty.type = "pty" guest.add_device(consolepty) if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'): if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM: - tablet = config.LibvirtConfigGuestInput() + tablet = vconfig.LibvirtConfigGuestInput() tablet.type = "tablet" tablet.bus = "usb" guest.add_device(tablet) - graphics = config.LibvirtConfigGuestGraphics() + graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "vnc" graphics.keymap = FLAGS.vnc_keymap graphics.listen = FLAGS.vncserver_listen @@ -2352,7 +2355,7 @@ class LibvirtDriver(driver.ComputeDriver): """ info = jsonutils.loads(cpu_info) LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) - cpu = config.LibvirtConfigCPU() + cpu = vconfig.LibvirtConfigCPU() cpu.arch = info['arch'] cpu.model = info['model'] cpu.vendor = info['vendor'] diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index dd4635123..721587512 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -29,6 +29,7 @@ import time from nova.compute import task_states from nova.compute import vm_states +from nova import config from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -55,8 +56,9 @@ imagecache_opts = [ help='Write a checksum for files in _base to disk'), ] -flags.DECLARE('instances_path', 'nova.compute.manager') -flags.DECLARE('base_dir_name', 'nova.compute.manager') +CONF = config.CONF +CONF.import_opt('instances_path', 'nova.compute.manager') +CONF.import_opt('base_dir_name', 'nova.compute.manager') FLAGS = flags.FLAGS FLAGS.register_opts(imagecache_opts) @@ -224,6 +226,9 @@ class ImageCacheManager(object): handle manually when it occurs. """ + if not FLAGS.checksum_base_images: + return None + stored_checksum = read_stored_checksum(base_file) if stored_checksum: f = open(base_file, 'r') @@ -249,7 +254,7 @@ class ImageCacheManager(object): # NOTE(mikal): If the checksum file is missing, then we should # create one. We don't create checksums when we download images # from glance because that would delay VM startup. - if FLAGS.checksum_base_images and create_if_missing: + if create_if_missing: write_stored_checksum(base_file) return None diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 5da0aa6fb..856efec4f 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -25,6 +25,8 @@ import os import re from lxml import etree + +from nova import config from nova import exception from nova import flags from nova.openstack.common import cfg @@ -45,8 +47,9 @@ util_opts = [ 'non-standard locations') ] -flags.DECLARE('instances_path', 'nova.compute.manager') -flags.DECLARE('base_dir_name', 'nova.compute.manager') +CONF = config.CONF +CONF.import_opt('instances_path', 'nova.compute.manager') +CONF.import_opt('base_dir_name', 'nova.compute.manager') FLAGS = flags.FLAGS FLAGS.register_opts(util_opts) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index b4eee39b3..d6edd1ad8 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -19,6 +19,7 @@ """VIF drivers for libvirt.""" +from nova import config from nova import exception from nova import flags from nova.network import linux_net @@ -28,7 +29,7 @@ from nova import utils from nova.virt import netutils from nova.virt import vif -from nova.virt.libvirt import config +from nova.virt.libvirt import config as vconfig LOG = logging.getLogger(__name__) @@ -43,7 +44,8 @@ libvirt_vif_opts = [ FLAGS = flags.FLAGS FLAGS.register_opts(libvirt_vif_opts) -flags.DECLARE('libvirt_type', 'nova.virt.libvirt.driver') +CONF = config.CONF +CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver') LINUX_DEV_LEN = 14 @@ -56,7 +58,7 @@ class LibvirtBridgeDriver(vif.VIFDriver): mac_id = mapping['mac'].replace(':', '') - conf = config.LibvirtConfigGuestInterface() + conf = vconfig.LibvirtConfigGuestInterface() conf.net_type = "bridge" conf.mac_addr = mapping['mac'] conf.source_dev = network['bridge'] @@ -161,7 +163,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver): self.create_ovs_vif_port(dev, iface_id, mapping['mac'], instance['uuid']) - conf = config.LibvirtConfigGuestInterface() + conf = vconfig.LibvirtConfigGuestInterface() if FLAGS.libvirt_use_virtio_for_bridges: conf.model = "virtio" @@ -255,7 +257,7 @@ class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver): """ Pass data required to create OVS virtual port element""" network, mapping = vif - conf = config.LibvirtConfigGuestInterface() + conf = vconfig.LibvirtConfigGuestInterface() conf.net_type = "bridge" conf.source_dev = FLAGS.libvirt_ovs_bridge @@ -286,7 +288,7 @@ class QuantumLinuxBridgeVIFDriver(vif.VIFDriver): if FLAGS.libvirt_type != 'xen': linux_net.QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev) - conf = config.LibvirtConfigGuestInterface() + conf = vconfig.LibvirtConfigGuestInterface() if FLAGS.libvirt_use_virtio_for_bridges: conf.model = 'virtio' diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index fd10f431b..7020c2518 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -20,17 +20,19 @@ import os import time +from nova import config from nova import exception from nova import flags from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova import utils -from nova.virt.libvirt import config +from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import utils as virtutils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS -flags.DECLARE('num_iscsi_scan_tries', 'nova.volume.driver') +CONF = config.CONF +CONF.import_opt('num_iscsi_scan_tries', 'nova.volume.driver') class LibvirtVolumeDriver(object): @@ -40,7 +42,7 @@ class LibvirtVolumeDriver(object): def connect_volume(self, connection_info, mount_device): """Connect the volume. Returns xml for libvirt.""" - conf = config.LibvirtConfigGuestDisk() + conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "block" conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=True) conf.driver_format = "raw" @@ -60,7 +62,7 @@ class LibvirtFakeVolumeDriver(LibvirtVolumeDriver): """Driver to attach Network volumes to libvirt.""" def connect_volume(self, connection_info, mount_device): - conf = config.LibvirtConfigGuestDisk() + conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.driver_name = "qemu" conf.driver_format = "raw" @@ -77,7 +79,7 @@ class LibvirtNetVolumeDriver(LibvirtVolumeDriver): """Driver to attach Network volumes to libvirt.""" def connect_volume(self, connection_info, mount_device): - conf = config.LibvirtConfigGuestDisk() + conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=False) conf.driver_format = "raw" diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py index 671703ed1..05b642b8f 100644 --- a/nova/virt/netutils.py +++ b/nova/virt/netutils.py @@ -23,12 +23,13 @@ import netaddr +from nova import config from nova import flags FLAGS = flags.FLAGS - -flags.DECLARE('injected_network_template', 'nova.virt.disk.api') +CONF = config.CONF +CONF.import_opt('injected_network_template', 'nova.virt.disk.api') Template = None diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py index 0c17dccff..605c95cfd 100644 --- a/nova/virt/xenapi/agent.py +++ b/nova/virt/xenapi/agent.py @@ -49,6 +49,11 @@ xenapi_agent_opts = [ 'configuration is not injected into the image. ' 'Used if compute_driver=xenapi.XenAPIDriver and ' ' flat_injected=True'), + cfg.StrOpt('xenapi_disable_agent', + default=False, + help='Disable XenAPI agent. Reduces the amount of time ' + 'it takes nova to detect that a VM has started, when ' + 'that VM does not have the agent installed'), ] FLAGS = flags.FLAGS @@ -244,6 +249,9 @@ def find_guest_agent(base_dir): tries to locate a guest agent at the path specificed by agent_rel_path """ + if FLAGS.xenapi_disable_agent: + return False + agent_rel_path = FLAGS.xenapi_agent_path agent_path = os.path.join(base_dir, agent_rel_path) if os.path.isfile(agent_path): diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index a928bf861..4d032e891 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -181,13 +181,7 @@ class XenAPIDriver(driver.ComputeDriver): """Finish reverting a resize, powering back on the instance""" # NOTE(vish): Xen currently does not use network info. self._vmops.finish_revert_migration(instance) - block_device_mapping = driver.block_device_info_get_mapping( - block_device_info) - for vol in block_device_mapping: - connection_info = vol['connection_info'] - mount_device = vol['mount_device'].rpartition("/")[2] - self.attach_volume(connection_info, - instance['name'], mount_device) + self._attach_mapped_block_devices(instance, block_device_info) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, @@ -195,6 +189,9 @@ class XenAPIDriver(driver.ComputeDriver): """Completes a resize, turning on the migrated instance""" self._vmops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance) + self._attach_mapped_block_devices(instance, block_device_info) + + def _attach_mapped_block_devices(self, instance, block_device_info): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7aa4a20ce..915c45243 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -30,6 +30,7 @@ from nova.compute import api as compute from nova.compute import power_state from nova.compute import vm_mode from nova.compute import vm_states +from nova import config from nova import context as nova_context from nova import db from nova import exception @@ -63,7 +64,8 @@ xenapi_vmops_opts = [ FLAGS = flags.FLAGS FLAGS.register_opts(xenapi_vmops_opts) -flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc') +CONF = config.CONF +CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( firewall.__name__, @@ -157,8 +159,14 @@ class VMOps(object): self.vif_driver = vif_impl(xenapi_session=self._session) self.default_root_dev = '/dev/sda' + @property + def agent_enabled(self): + return not FLAGS.xenapi_disable_agent + def _get_agent(self, instance, vm_ref): - return xapi_agent.XenAPIBasedAgent(self._session, instance, vm_ref) + if self.agent_enabled: + return xapi_agent.XenAPIBasedAgent(self._session, instance, vm_ref) + raise exception.NovaException(_("Error: Agent is disabled")) def list_instances(self): """List VM instances.""" @@ -517,17 +525,6 @@ class VMOps(object): self._start(instance, vm_ref) ctx = nova_context.get_admin_context() - agent_build = db.agent_build_get_by_triple(ctx, 'xen', - instance['os_type'], instance['architecture']) - if agent_build: - LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' - '/%(architecture)s is %(version)s') % agent_build) - else: - LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' - '/%(architecture)s') % { - 'hypervisor': 'xen', - 'os': instance['os_type'], - 'architecture': instance['architecture']}) # Wait for boot to finish LOG.debug(_('Waiting for instance state to become running'), @@ -540,34 +537,47 @@ class VMOps(object): greenthread.sleep(0.5) - # Update agent, if necessary - # This also waits until the agent starts - agent = self._get_agent(instance, vm_ref) - version = agent.get_agent_version() - if version: - LOG.info(_('Instance agent version: %s'), version, - instance=instance) - - if (version and agent_build and - cmp_version(version, agent_build['version']) < 0): - agent.agent_update(agent_build) - - # if the guest agent is not available, configure the - # instance, but skip the admin password configuration - no_agent = version is None - - # Inject files, if necessary - if injected_files: - # Inject any files, if specified - for path, contents in injected_files: - agent.inject_file(path, contents) - - # Set admin password, if necessary - if admin_password and not no_agent: - agent.set_admin_password(admin_password) - - # Reset network config - agent.resetnetwork() + if self.agent_enabled: + agent_build = db.agent_build_get_by_triple( + ctx, 'xen', instance['os_type'], instance['architecture']) + if agent_build: + LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' + '/%(architecture)s is %(version)s') % agent_build) + else: + LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' + '/%(architecture)s') % { + 'hypervisor': 'xen', + 'os': instance['os_type'], + 'architecture': instance['architecture']}) + + # Update agent, if necessary + # This also waits until the agent starts + agent = self._get_agent(instance, vm_ref) + version = agent.get_agent_version() + if version: + LOG.info(_('Instance agent version: %s'), version, + instance=instance) + + if (version and agent_build and + cmp_version(version, agent_build['version']) < 0): + agent.agent_update(agent_build) + + # if the guest agent is not available, configure the + # instance, but skip the admin password configuration + no_agent = version is None + + # Inject files, if necessary + if injected_files: + # Inject any files, if specified + for path, contents in injected_files: + agent.inject_file(path, contents) + + # Set admin password, if necessary + if admin_password and not no_agent: + agent.set_admin_password(admin_password) + + # Reset network config + agent.resetnetwork() # Set VCPU weight vcpu_weight = instance['instance_type']['vcpu_weight'] @@ -860,15 +870,21 @@ class VMOps(object): def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - agent = self._get_agent(instance, vm_ref) - agent.set_admin_password(new_pass) + if self.agent_enabled: + vm_ref = self._get_vm_opaque_ref(instance) + agent = self._get_agent(instance, vm_ref) + agent.set_admin_password(new_pass) + else: + raise NotImplementedError() def inject_file(self, instance, path, contents): """Write a file to the VM instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - agent = self._get_agent(instance, vm_ref) - agent.inject_file(path, contents) + if self.agent_enabled: + vm_ref = self._get_vm_opaque_ref(instance) + agent = self._get_agent(instance, vm_ref) + agent.inject_file(path, contents) + else: + raise NotImplementedError() @staticmethod def _sanitize_xenstore_key(key): @@ -1422,9 +1438,12 @@ class VMOps(object): def reset_network(self, instance): """Calls resetnetwork method in agent.""" - vm_ref = self._get_vm_opaque_ref(instance) - agent = self._get_agent(instance, vm_ref) - agent.resetnetwork() + if self.agent_enabled: + vm_ref = self._get_vm_opaque_ref(instance) + agent = self._get_agent(instance, vm_ref) + agent.resetnetwork() + else: + raise NotImplementedError() def inject_hostname(self, instance, vm_ref, hostname): """Inject the hostname of the instance into the xenstore.""" diff --git a/tools/pip-requires b/tools/pip-requires index 922f1a24a..a214d7bc2 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -19,5 +19,6 @@ Babel>=0.9.6 iso8601>=0.1.4 httplib2 setuptools_git>=0.4 +python-cinderclient python-quantumclient>=2.1 python-glanceclient>=0.5.0,<2 diff --git a/tools/test-requires b/tools/test-requires index 536d4deee..fc56d3c87 100644 --- a/tools/test-requires +++ b/tools/test-requires @@ -11,5 +11,4 @@ pep8==1.2 pylint==0.25.2 sphinx>=1.1.2 feedparser -python-cinderclient MySQL-python diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py index f04adc74e..10f9c1ffe 100755 --- a/tools/xenserver/vm_vdi_cleaner.py +++ b/tools/xenserver/vm_vdi_cleaner.py @@ -27,17 +27,17 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) +from nova import config from nova import context from nova import db from nova import exception from nova import flags -from nova.openstack.common import cfg from nova.openstack.common import timeutils from nova.virt.xenapi import driver as xenapi_driver -CONF = cfg.CONF -flags.DECLARE("resize_confirm_window", "nova.compute.manager") +CONF = config.CONF +CONF.import_opt("resize_confirm_window", "nova.compute.manager") ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances", |