summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/__init__.py20
-rw-r--r--nova/api/ec2/admin.py39
-rw-r--r--nova/api/ec2/apirequest.py6
-rw-r--r--nova/api/ec2/cloud.py50
-rw-r--r--nova/api/openstack/__init__.py3
-rw-r--r--nova/api/openstack/servers.py28
-rw-r--r--nova/auth/ldapdriver.py11
-rw-r--r--nova/auth/manager.py69
-rw-r--r--nova/compute/api.py11
-rw-r--r--nova/compute/manager.py32
-rw-r--r--nova/compute/monitor.py10
-rw-r--r--nova/db/sqlalchemy/api.py34
-rw-r--r--nova/exception.py5
-rw-r--r--nova/fakerabbit.py11
-rw-r--r--nova/flags.py2
-rw-r--r--nova/network/manager.py15
-rw-r--r--nova/objectstore/handler.py43
-rw-r--r--nova/rpc.py19
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/service.py4
-rw-r--r--nova/tests/test_cloud.py10
-rw-r--r--nova/tests/test_rpc.py3
-rw-r--r--nova/tests/test_virt.py2
-rw-r--r--nova/utils.py8
-rw-r--r--nova/virt/hyperv.py85
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt_conn.py65
-rw-r--r--nova/virt/xenapi/fake.py7
-rw-r--r--nova/virt/xenapi/vm_utils.py60
-rw-r--r--nova/virt/xenapi/vmops.py81
-rw-r--r--nova/virt/xenapi/volume_utils.py18
-rw-r--r--nova/virt/xenapi/volumeops.py25
-rw-r--r--nova/virt/xenapi_conn.py13
-rw-r--r--nova/volume/api.py7
-rw-r--r--nova/volume/manager.py7
-rw-r--r--nova/wsgi.py3
36 files changed, 473 insertions, 337 deletions
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 238cb0f38..9938b23f8 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -131,9 +131,11 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
- LOG.warn(_('Access key %s has had %d failed authentications'
- ' and will be locked out for %d minutes.'),
- access_key, failures, FLAGS.lockout_minutes)
+ lock_mins = FLAGS.lockout_minutes
+ msg = _('Access key %(access_key)s has had %(failures)d'
+ ' failed authentications and will be locked out'
+ ' for %(lock_mins)d minutes.') % locals()
+ LOG.warn(msg)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@@ -179,8 +181,10 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
- LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
- project.name, context=req.environ['ec2.context'])
+ uname = user.name
+ pname = project.name
+ msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals()
+ LOG.audit(msg, context=req.environ['ec2.context'])
return self.application
@@ -206,7 +210,7 @@ class Requestify(wsgi.Middleware):
LOG.debug(_('action: %s'), action)
for key, value in args.items():
- LOG.debug(_('arg: %s\t\tval: %s'), key, value)
+ LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals())
# Success!
api_request = apirequest.APIRequest(self.controller, action, args)
@@ -277,8 +281,8 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
- LOG.audit(_("Unauthorized request for controller=%s "
- "and action=%s"), controller, action, context=context)
+ LOG.audit(_('Unauthorized request for controller=%(controller)s '
+ 'and action=%(action)s') % locals(), context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 758b612e8..78ff1b3e0 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -111,19 +111,23 @@ class AdminController(object):
"""Add or remove a role for a user and project."""
if operation == 'add':
if project:
- LOG.audit(_("Adding role %s to user %s for project %s"), role,
- user, project, context=context)
+ msg = _("Adding role %(role)s to user %(user)s"
+ " for project %(project)s") % locals()
+ LOG.audit(msg, context=context)
else:
- LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
- context=context)
+ msg = _("Adding sitewide role %(role)s to"
+ " user %(user)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
if project:
- LOG.audit(_("Removing role %s from user %s for project %s"),
- role, user, project, context=context)
+ msg = _("Removing role %(role)s from user %(user)s"
+ " for project %(project)s") % locals()
+ LOG.audit(msg, context=context)
else:
- LOG.audit(_("Removing sitewide role %s from user %s"), role,
- user, context=context)
+ msg = _("Removing sitewide role %(role)s"
+ " from user %(user)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
@@ -139,8 +143,9 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
- LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
- project, context=context)
+ msg = _("Getting x509 for user: %(name)s"
+ " on project: %(project)s") % locals()
+ LOG.audit(msg, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@@ -156,8 +161,9 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
- LOG.audit(_("Create project %s managed by %s"), name, manager_user,
- context=context)
+ msg = _("Create project %(name)s managed by"
+ " %(manager_user)s") % locals()
+ LOG.audit(msg, context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@@ -181,12 +187,13 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
- LOG.audit(_("Adding user %s to project %s"), user, project,
- context=context)
+ msg = _("Adding user %(user)s to project %(project)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
- LOG.audit(_("Removing user %s from project %s"), user, project,
- context=context)
+ msg = _("Removing user %(user)s from"
+ " project %(project)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 78576470a..d8a2b5f53 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -93,8 +93,10 @@ class APIRequest(object):
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
- _error = _('Unsupported API request: controller = %s,'
- 'action = %s') % (self.controller, self.action)
+ controller = self.controller
+ action = self.action
+ _error = _('Unsupported API request: controller = %(controller)s,'
+ ' action = %(action)s') % locals()
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index f63ec9085..22b8c19cb 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -252,18 +252,18 @@ class CloudController(object):
regions = []
for region in FLAGS.region_list:
name, _sep, host = region.partition('=')
- endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix,
+ endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme,
host,
FLAGS.ec2_port,
- FLAGS.ec2_suffix)
+ FLAGS.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
- 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
+ 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
FLAGS.ec2_host,
FLAGS.ec2_port,
- FLAGS.ec2_suffix)}]
+ FLAGS.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
@@ -529,11 +529,18 @@ class CloudController(object):
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
- volume_id = [ec2_id_to_id(x) for x in volume_id]
- volumes = self.volume_api.get_all(context)
- # NOTE(vish): volume_id is an optional list of volume ids to filter by.
- volumes = [self._format_volume(context, v) for v in volumes
- if volume_id is None or v['id'] in volume_id]
+ volumes = []
+ for ec2_id in volume_id:
+ internal_id = ec2_id_to_id(ec2_id)
+ try:
+ volume = self.volume_api.get(context, internal_id)
+ volumes.append(volume)
+ except exception.NotFound:
+ raise exception.NotFound(_("Volume %s not found")
+ % ec2_id)
+ else:
+ volumes = self.volume_api.get_all(context)
+ volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
@@ -601,8 +608,9 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
- LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id,
- instance_id, device, context=context)
+ msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
+ " at %(device)s") % locals()
+ LOG.audit(msg, context=context)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
@@ -657,8 +665,15 @@ class CloudController(object):
reservations = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
- instance_id = [ec2_id_to_id(x) for x in instance_id]
- instances = [self.compute_api.get(context, x) for x in instance_id]
+ instances = []
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_id(ec2_id)
+ try:
+ instance = self.compute_api.get(context, internal_id)
+ instances.append(instance)
+ except exception.NotFound:
+ raise exception.NotFound(_("Instance %s not found")
+ % ec2_id)
else:
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
@@ -751,8 +766,8 @@ class CloudController(object):
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
- LOG.audit(_("Associate address %s to instance %s"), public_ip,
- instance_id, context=context)
+ LOG.audit(_("Associate address %(public_ip)s to"
+ " instance %(instance_id)s") % locals(), context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
@@ -840,8 +855,9 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- LOG.audit(_("Registered image %s with id %s"), image_location,
- image_id, context=context)
+ msg = _("Registered image %(image_location)s with"
+ " id %(image_id)s") % locals()
+ LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index f2caac483..c70bb39ed 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -38,9 +38,6 @@ from nova.api.openstack import shared_ip_groups
LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
-flags.DEFINE_string('os_krm_mapping_file',
- 'krm_mapping.json',
- 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 8cbcebed2..9d308ea24 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -124,17 +124,22 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, image_id):
- mapping_filename = FLAGS.os_krm_mapping_file
-
- with open(mapping_filename) as f:
- mapping = json.load(f)
- if image_id in mapping:
- return mapping[image_id]
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """
+ Machine images are associated with Kernels and Ramdisk images via
+ metadata stored in Glance as 'image_properties'
+ """
+ def lookup(param):
+ _image_id = image_id
+ try:
+ return image['properties'][param]
+ except KeyError:
+ raise exception.NotFound(
+ _("%(param)s property not found for image %(_image_id)s") %
+ locals())
- raise exception.NotFound(
- _("No entry for image '%s' in mapping file '%s'") %
- (image_id, mapping_filename))
+ image = self._image_service.show(req.environ['nova.context'], image_id)
+ return lookup('kernel_id'), lookup('ramdisk_id')
def create(self, req):
""" Creates a new server for a given user """
@@ -146,7 +151,8 @@ class Controller(wsgi.Controller):
req.environ['nova.context'])[0]
image_id = common.get_image_id_from_image_hash(self._image_service,
req.environ['nova.context'], env['server']['imageId'])
- kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
+ req, image_id)
instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index a6915ce03..e652f1caa 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -473,8 +473,8 @@ class LdapDriver(object):
raise exception.NotFound(_("The group at dn %s doesn't exist") %
group_dn)
if self.__is_in_group(uid, group_dn):
- raise exception.Duplicate(_("User %s is already a member of "
- "the group %s") % (uid, group_dn))
+ raise exception.Duplicate(_("User %(uid)s is already a member of "
+ "the group %(group_dn)s") % locals())
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
@@ -585,10 +585,11 @@ class LdapDriver(object):
else:
return None
- @staticmethod
- def __dn_to_uid(dn):
+ def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
- return dn.split(',')[0].split('=')[1]
+ query = '(objectclass=novaUser)'
+ user = self.__find_object(dn, query)
+ return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 1652e24e1..450ab803a 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -272,16 +272,22 @@ class AuthManager(object):
project = self.get_project(project_id)
if project == None:
- LOG.audit(_("failed authorization: no project named %s (user=%s)"),
- project_id, user.name)
+ pjid = project_id
+ uname = user.name
+ LOG.audit(_("failed authorization: no project named %(pjid)s"
+ " (user=%(uname)s)") % locals())
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
- LOG.audit(_("Failed authorization: user %s not admin and not "
- "member of project %s"), user.name, project.name)
- raise exception.NotFound(_('User %s is not a member of project %s')
- % (user.id, project.id))
+ uname = user.name
+ uid = user.id
+ pjname = project.name
+ pjid = project.id
+ LOG.audit(_("Failed authorization: user %(uname)s not admin"
+ " and not member of project %(pjname)s") % locals())
+ raise exception.NotFound(_('User %(uid)s is not a member of'
+ ' project %(pjid)s') % locals())
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@@ -408,14 +414,16 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
if project:
- LOG.audit(_("Adding role %s to user %s in project %s"), role,
- User.safe_id(user), Project.safe_id(project))
+ LOG.audit(_("Adding role %(role)s to user %(uid)s"
+ " in project %(pid)s") % locals())
else:
- LOG.audit(_("Adding sitewide role %s to user %s"), role,
- User.safe_id(user))
+ LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
+ % locals())
with self.driver() as drv:
- drv.add_role(User.safe_id(user), role, Project.safe_id(project))
+ drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
@@ -434,14 +442,16 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
if project:
- LOG.audit(_("Removing role %s from user %s on project %s"),
- role, User.safe_id(user), Project.safe_id(project))
+ LOG.audit(_("Removing role %(role)s from user %(uid)s"
+ " on project %(pid)s") % locals())
else:
- LOG.audit(_("Removing sitewide role %s from user %s"), role,
- User.safe_id(user))
+ LOG.audit(_("Removing sitewide role %(role)s"
+ " from user %(uid)s") % locals())
with self.driver() as drv:
- drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+ drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
@@ -502,8 +512,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
- LOG.audit(_("Created project %s with manager %s"), name,
- manager_user)
+ LOG.audit(_("Created project %(name)s with"
+ " manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
@@ -530,11 +540,12 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
- LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
- Project.safe_id(project))
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
+ LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
- Project.safe_id(project))
+ Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
@@ -550,11 +561,11 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
- LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
- Project.safe_id(project))
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
+ LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
- return drv.remove_from_project(User.safe_id(user),
- Project.safe_id(project))
+ return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
@@ -634,7 +645,10 @@ class AuthManager(object):
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
- LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
+ rvname = rv.name
+ rvadmin = rv.admin
+ LOG.audit(_("Created user %(rvname)s"
+ " (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
@@ -656,7 +670,8 @@ class AuthManager(object):
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
- LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
+ LOG.audit(_("Admin status set to %(admin)r"
+ " for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 6a3fe08b6..1d8b9d79f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,8 +92,9 @@ class API(base.Base):
type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
- LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
- context.project_id, min_count)
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s,"
+ " tried to run %(min_count)s instances") % locals())
raise quota.QuotaError(_("Instance quota exceeded. You can only "
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
@@ -183,8 +184,10 @@ class API(base.Base):
instance = self.update(context, instance_id, **updates)
instances.append(instance)
- LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
- context.project_id, context.user_id, instance_id)
+ pid = context.project_id
+ uid = context.user_id
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " instance %(instance_id)s") % locals())
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6f09ce674..376b1ed68 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -77,8 +77,8 @@ def checks_instance_lock(function):
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
- LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
- self, context, instance_id, context=context)
+ LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
+ " |%(instance_id)s|") % locals(), context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
@@ -278,11 +278,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
+ state = instance_ref['state']
+ running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id,
- instance_ref['state'],
- power_state.RUNNING,
+ 'instance: %(instance_id)s (state: %(state)s '
+ 'expected: %(running)s)') % locals(),
context=context)
self.db.instance_set_state(context,
@@ -307,9 +307,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
if instance_ref['state'] != power_state.RUNNING:
+ state = instance_ref['state']
+ running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id, instance_ref['state'], power_state.RUNNING)
+ 'instance: %(instance_id)s (state: %(state)s '
+ 'expected: %(running)s)') % locals())
self.driver.snapshot(instance_ref, image_id)
@@ -517,8 +519,8 @@ class ComputeManager(manager.Manager):
"""Attach a volume to an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
- volume_id, mountpoint, context=context)
+ LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s"
+ " to %(mountpoint)s") % locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@@ -533,8 +535,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- LOG.exception(_("instance %s: attach failed %s, removing"),
- instance_id, mountpoint, context=context)
+ LOG.exception(_("instance %(instance_id)s: attach failed"
+ " %(mountpoint)s, removing") % locals(), context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@@ -548,9 +550,9 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
- LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
- volume_id, volume_ref['mountpoint'], instance_id,
- context=context)
+ mp = volume_ref['mountpoint']
+ LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
+ " on instance %(instance_id)s") % locals(), context=context)
if instance_ref['name'] not in self.driver.list_instances():
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_id, context=context)
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 14d0e8ca1..04e08a235 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -352,8 +352,9 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
- disk, self.instance_id)
+ iid = self.instance_id
+ LOG.error(_('Cannot get blockstats for "%(disk)s"'
+ ' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rd, wr)
@@ -374,8 +375,9 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
- interface, self.instance_id)
+ iid = self.instance_id
+ LOG.error(_('Cannot get ifstats for "%(interface)s"'
+ ' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rx, tx)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 7b965f672..e28981b3d 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -247,7 +247,8 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('No service for %s, %s') % (host, binary))
+ raise exception.NotFound(_('No service for %(host)s, %(binary)s')
+ % locals())
return result
@@ -935,8 +936,8 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('no keypair for user %s, name %s') %
- (user_id, name))
+ raise exception.NotFound(_('no keypair for user %(user_id)s,'
+ ' name %(name)s') % locals())
return result
@@ -1395,11 +1396,13 @@ def volume_get(context, volume_id, session=None):
if is_admin_context(context):
result = session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
@@ -1537,8 +1540,8 @@ def security_group_get_by_name(context, project_id, group_name):
first()
if not result:
raise exception.NotFound(
- _('No security group named %s for project: %s')
- % (group_name, project_id))
+ _('No security group named %(group_name)s'
+ ' for project: %(project_id)s') % locals())
return result
@@ -1922,8 +1925,8 @@ def console_pool_get(context, pool_id):
filter_by(id=pool_id).\
first()
if not result:
- raise exception.NotFound(_("No console pool with id %(pool_id)s") %
- {'pool_id': pool_id})
+ raise exception.NotFound(_("No console pool with id %(pool_id)s")
+ % locals())
return result
@@ -1939,12 +1942,9 @@ def console_pool_get_by_host_type(context, compute_host, host,
options(joinedload('consoles')).\
first()
if not result:
- raise exception.NotFound(_('No console pool of type %(type)s '
+ raise exception.NotFound(_('No console pool of type %(console_type)s '
'for compute host %(compute_host)s '
- 'on proxy host %(host)s') %
- {'type': console_type,
- 'compute_host': compute_host,
- 'host': host})
+ 'on proxy host %(host)s') % locals())
return result
@@ -1982,9 +1982,7 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
first()
if not result:
raise exception.NotFound(_('No console for instance %(instance_id)s '
- 'in pool %(pool_id)s') %
- {'instance_id': instance_id,
- 'pool_id': pool_id})
+ 'in pool %(pool_id)s') % locals())
return result
@@ -2005,9 +2003,7 @@ def console_get(context, console_id, instance_id=None):
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
- idesc = (_("on instance %s") % instance_id) if instance_id else ""
+ idesc = (_("on instance %s") % instance_id) if instance_id else ""
raise exception.NotFound(_("No console with id %(console_id)s"
- " %(instance)s") %
- {'instance': idesc,
- 'console_id': console_id})
+ " %(idesc)s") % locals())
return result
diff --git a/nova/exception.py b/nova/exception.py
index f36ffaee1..f604fd63a 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -33,8 +33,9 @@ class ProcessExecutionError(IOError):
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
- message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
- % (description, cmd, exit_code, stdout, stderr)
+ message = _("%(description)s\nCommand: %(cmd)s\n"
+ "Exit code: %(exit_code)s\nStdout: %(stdout)r\n"
+ "Stderr: %(stderr)r") % locals()
IOError.__init__(self, message)
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 7c2d7177b..dd82a9366 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -45,8 +45,9 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- LOG.debug(_('(%s) publish (key: %s) %s'),
- self.name, routing_key, message)
+ nm = self.name
+ LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
+ ' %(message)s') % locals())
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
@@ -92,8 +93,8 @@ class Backend(base.BaseBackend):
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
- LOG.debug(_('Binding %s to %s with key %s'),
- queue, exchange, routing_key)
+ LOG.debug(_('Binding %(queue)s to %(exchange)s with'
+ ' key %(routing_key)s') % locals())
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
def declare_consumer(self, queue, callback, *args, **kwargs):
@@ -117,7 +118,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- LOG.debug(_('Getting from %s: %s'), queue, message)
+ LOG.debug(_('Getting from %(queue)s: %(message)s') % locals())
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/flags.py b/nova/flags.py
index 81e2e36f9..43bc174d2 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -218,7 +218,7 @@ def _get_my_ip():
DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
DEFINE_list('region_list',
[],
- 'list of region=url pairs separated by commas')
+ 'list of region=fqdn pairs separated by commas')
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 5d7589090..fe99f2612 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -83,7 +83,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
-flags.DEFINE_integer('cnt_vpn_clients', 5,
+flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
'Driver to use for network creation')
@@ -198,8 +198,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s leased that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
- (address, instance_ref['mac_address'], mac))
+ inst_addr = instance_ref['mac_address']
+ raise exception.Error(_("IP %(address)s leased to bad"
+ " mac %(inst_addr)s vs %(mac)s") % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@@ -218,8 +219,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error(_("IP %s released from bad mac %s vs %s") %
- (address, instance_ref['mac_address'], mac))
+ inst_addr = instance_ref['mac_address']
+ raise exception.Error(_("IP %(address)s released from"
+ " bad mac %(inst_addr)s vs %(mac)s") % locals())
if not fixed_ip_ref['leased']:
LOG.warn(_("IP %s released that was not leased"), address,
context=context)
@@ -393,6 +395,7 @@ class FlatDHCPManager(FlatManager):
standalone service.
"""
super(FlatDHCPManager, self).init_host()
+ self.driver.init_host()
self.driver.metadata_forward()
def setup_compute_network(self, context, instance_id):
@@ -458,8 +461,8 @@ class VlanManager(NetworkManager):
standalone service.
"""
super(VlanManager, self).init_host()
- self.driver.metadata_forward()
self.driver.init_host()
+ self.driver.metadata_forward()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index 43ed7ffe7..05ddace4b 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -180,7 +180,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
LOG.debug(_('List of buckets requested'), context=request.context)
- buckets = [b for b in bucket.Bucket.all() \
+ buckets = [b for b in bucket.Bucket.all()
if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
@@ -268,12 +268,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
+ bname = self.bucket.name
+ nm = self.name
+ LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to get object %s from bucket "
- "%s"), self.name, self.bucket.name,
- context=request.context)
+ LOG.audit(_("Unauthorized attempt to get object %(nm)s"
+ " from bucket %(bname)s") % locals(),
+ context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@@ -289,12 +291,13 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
+ nm = self.name
+ bname = self.bucket.name
+ LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
- "%s"),
- self.name, self.bucket.name, context=request.context)
+ LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
+ " bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@@ -310,16 +313,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
-
- LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
+ nm = self.name
+ bname = self.bucket.name
+ LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
context=request.context)
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete object "
- "%(object)s from bucket %(bucket)s") %
- {'object': self.name,
- 'bucket': self.bucket.name},
- context=request.context)
+ LOG.audit(_("Unauthorized attempt to delete object %(nm)s from "
+ "bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
@@ -390,10 +391,10 @@ class ImagesResource(resource.Resource):
image_location = get_argument(request, 'image_location', u'')
image_path = os.path.join(FLAGS.images_path, image_id)
- if not image_path.startswith(FLAGS.images_path) or \
- os.path.exists(image_path):
+ if ((not image_path.startswith(FLAGS.images_path)) or
+ os.path.exists(image_path)):
LOG.audit(_("Not authorized to upload image: invalid directory "
- "%s"),
+ "%s"),
image_path, context=request.context)
raise exception.NotAuthorized()
@@ -427,8 +428,8 @@ class ImagesResource(resource.Resource):
if operation:
# operation implies publicity toggle
newstatus = (operation == 'add')
- LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
- newstatus, context=request.context)
+ LOG.audit(_("Toggling publicity flag of image %(image_id)s"
+ " %(newstatus)r") % locals(), context=request.context)
image_object.set_public(newstatus)
else:
# other attributes imply update
diff --git a/nova/rpc.py b/nova/rpc.py
index bbfa71138..01fc6d44b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -89,15 +89,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- LOG.exception(_("AMQP server on %s:%d is unreachable."
- " Trying again in %d seconds.") % (
- FLAGS.rabbit_host,
- FLAGS.rabbit_port,
- FLAGS.rabbit_retry_interval))
+ fl_host = FLAGS.rabbit_host
+ fl_port = FLAGS.rabbit_port
+ fl_intv = FLAGS.rabbit_retry_interval
+ LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable. Trying again in %(fl_intv)d seconds.")
+ % locals())
self.failed_connection = True
if self.failed_connection:
LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
+ "after %d tries. Shutting down."),
FLAGS.rabbit_max_retries)
sys.exit(1)
@@ -152,7 +153,7 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
- LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
+ LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
@@ -167,7 +168,7 @@ class AdapterConsumer(TopicConsumer):
Example: {'method': 'echo', 'args': {'value': 42}}
"""
- LOG.debug(_('received %s') % (message_data))
+ LOG.debug(_('received %s') % message_data)
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
@@ -180,7 +181,7 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
- LOG.warn(_('no method for message: %s') % (message_data))
+ LOG.warn(_('no method for message: %s') % message_data)
msg_reply(msg_id, _('No method for message: %s') % message_data)
return
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a4d6dd574..e9b47512e 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -66,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
+ LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
diff --git a/nova/service.py b/nova/service.py
index 91e00d3d1..2c30997f2 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -221,10 +221,10 @@ def serve(*services):
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
-
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
- logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
+ flag_get = FLAGS.get(flag, None)
+ logging.debug("%(flag)s : %(flag_get)s" % locals())
for x in services:
x.start()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 771b1fcc0..445cc6e8b 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -87,6 +87,16 @@ class CloudTestCase(test.TestCase):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
+ def test_describe_regions(self):
+ """Makes sure describe regions runs without raising an exception"""
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 1)
+ regions = FLAGS.region_list
+ FLAGS.region_list = ["one=test_host1", "two=test_host2"]
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 2)
+ FLAGS.region_list = regions
+
def test_describe_addresses(self):
"""Makes sure describe addresses runs without raising an exception"""
address = "10.10.10.10"
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 85593ab46..4820e04fb 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -86,7 +86,8 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
- LOG.debug(_("Nested received %s, %s"), queue, value)
+ LOG.debug(_("Nested received %(queue)s, %(value)s")
+ % locals())
ret = rpc.call(context,
queue,
{"method": "echo",
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index f6800e3d9..0b9b847a0 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -256,7 +256,7 @@ class IptablesFirewallTestCase(test.TestCase):
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
- '# Completed on Tue Jan 18 23:47:56 2011'
+ '# Completed on Tue Jan 18 23:47:56 2011',
]
def test_static_filters(self):
diff --git a/nova/utils.py b/nova/utils.py
index 6d3ddd092..2f3bd2894 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -138,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- LOG.debug(_("Result was %s") % (obj.returncode))
+ LOG.debug(_("Result was %s") % obj.returncode)
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -214,9 +214,11 @@ def get_my_linklocal(interface):
else:
return 'fe00::'
except IndexError as ex:
- LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
+ LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s")
+ % locals())
except ProcessExecutionError as ex:
- LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
+ LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s")
+ % locals())
except:
return 'fe00::'
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 30dc1c79b..5facb7aff 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -129,7 +129,7 @@ class HyperVConnection(object):
vm = self._lookup(instance.name)
if vm is not None:
raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -159,7 +159,7 @@ class HyperVConnection(object):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance['name']
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
- [], None, vs_gs_data.GetText_(1))[1:]
+ [], None, vs_gs_data.GetText_(1))[1:]
if ret_val == WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
@@ -184,40 +184,40 @@ class HyperVConnection(object):
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [memsetting.GetText_(1)])
+ vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
- wmi_result_class='Msvm_ProcessorSettingData')[0]
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = vcpus
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [procsetting.GetText_(1)])
+ vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
- LOG.debug(_('Creating disk for %s by attaching disk file %s'),
- vm_name, vhdfile)
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(vhdfile)s') % locals())
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
- and r.Address == "0"]
+ and r.Address == "0"]
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._clone_wmi_obj(
- 'Msvm_ResourceAllocationSettingData', diskdflt)
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
#Set the IDE ctrller as parent.
diskdrive.Parent = ctrller[0].path_()
diskdrive.Address = 0
@@ -263,17 +263,18 @@ class HyperVConnection(object):
default_nic_data = [n for n in emulatednics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._clone_wmi_obj(
- 'Msvm_EmulatedEthernetPortSettingData',
- default_nic_data[0])
+ 'Msvm_EmulatedEthernetPortSettingData',
+ default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
- vm_name)
- LOG.debug(_("Created switch port %s on switch %s"),
- vm_name, extswitch.path_())
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@@ -283,7 +284,7 @@ class HyperVConnection(object):
new_resources = self._add_virt_resource(new_nic_data, vm)
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
- vm_name)
+ vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
@@ -319,8 +320,10 @@ class HyperVConnection(object):
if job.JobState != WMI_JOB_STATE_COMPLETED:
LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
- LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
- job.ElapsedTime)
+ desc = job.Description
+ elap = job.ElapsedTime
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ % locals())
return True
def _find_external_network(self):
@@ -386,7 +389,9 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
- LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+ instance_name = instance.name
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
+ % locals())
def get_info(self, instance_id):
"""Get information about the VM"""
@@ -402,12 +407,14 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
- LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
- cpu_time=%s"), instance_id,
- str(HYPERV_POWER_STATE[info.EnabledState]),
- str(info.MemoryUsage),
- str(info.NumberOfProcessors),
- str(info.UpTime))
+ state = str(HYPERV_POWER_STATE[info.EnabledState])
+ memusage = str(info.MemoryUsage)
+ numprocs = str(info.NumberOfProcessors)
+ uptime = str(info.UpTime)
+
+ LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s,"
+ " mem=%(memusage)s, num_cpu=%(numprocs)s,"
+ " cpu_time=%(uptime)s") % locals())
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@@ -441,22 +448,22 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
- LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
- req_state)
+ LOG.info(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
else:
- LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
- req_state)
- raise Exception(_("Failed to change vm state of %s to %s"),
- vm_name, req_state)
+ msg = _("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals()
+ LOG.error(msg)
+ raise Exception(msg)
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm' %
- instance_name)
+ raise exception.NotFound('Cannot attach volume to missing %s vm'
+ % instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s ' %
- instance_name)
+ raise exception.NotFound('Cannot detach volume from missing %s '
+ % instance_name)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index ecf0e5efb..9c987e14d 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
- LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
+ LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
def _fetch_s3_image(image, path, user, project):
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index d8c1bf48a..58272d6fe 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -236,8 +236,9 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- LOG.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ instance_name = instance['name']
+ LOG.info(_('instance %(instance_name)s: deleting instance files'
+ ' %(target)s') % locals())
if os.path.exists(target):
shutil.rmtree(target)
@@ -418,7 +419,7 @@ class LibvirtConnection(object):
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- LOG.info(_('cool, it\'s a device'))
+ LOG.info(_("cool, it's a device"))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -426,7 +427,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- LOG.info(_('data: %r, fpath: %r'), data, fpath)
+ LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -434,7 +435,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- LOG.info(_('Contents of file %s: %r'), fpath, contents)
+ LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
return contents
@exception.wrap_exception
@@ -621,21 +622,22 @@ class LibvirtConnection(object):
'dns': network_ref['dns'],
'ra_server': ra_server}
if key or net:
+ inst_name = inst['name']
+ img_id = inst.image_id
if key:
- LOG.info(_('instance %s: injecting key into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting key into'
+ ' image %(img_id)s') % locals())
if net:
- LOG.info(_('instance %s: injecting net into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting net into'
+ ' image %(img_id)s') % locals())
try:
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
+ ' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
@@ -643,9 +645,6 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -1229,6 +1228,7 @@ class IptablesFirewallDriver(FirewallDriver):
our_chains += [':nova-local - [0:0]']
our_rules += ['-A FORWARD -j nova-local']
+ our_rules += ['-A OUTPUT -j nova-local']
security_groups = {}
# Add our chains
@@ -1269,13 +1269,22 @@ class IptablesFirewallDriver(FirewallDriver):
if(ip_version == 4):
# Allow DHCP responses
dhcp_server = self._dhcp_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
- (chain_name, dhcp_server)]
+ our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT ' % (chain_name, dhcp_server)]
+ #Allow project network traffic
+ if (FLAGS.allow_project_net_traffic):
+ cidr = self._project_cidr_for_instance(instance)
+ our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
elif(ip_version == 6):
# Allow RA responses
ra_server = self._ra_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p icmpv6' %
- (chain_name, ra_server)]
+ our_rules += ['-A %s -s %s -p icmpv6 '
+ '-j ACCEPT' % (chain_name, ra_server)]
+ #Allow project network traffic
+ if (FLAGS.allow_project_net_traffic):
+ cidrv6 = self._project_cidrv6_for_instance(instance)
+ our_rules += ['-A %s -s %s -j ACCEPT' %
+ (chain_name, cidrv6)]
# If nothing matches, jump to the fallback chain
our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
@@ -1362,11 +1371,21 @@ class IptablesFirewallDriver(FirewallDriver):
instance['id'])
def _dhcp_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['gateway']
def _ra_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['ra_server']
+
+ def _project_cidr_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['cidr']
+
+ def _project_cidrv6_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['cidr_v6']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4bfaf4b57..e8352771c 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake")
def log_db_contents(msg=None):
- LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
@@ -331,7 +333,8 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- LOG.debug(_('Calling %s %s'), name, impl)
+ localname = name
+ LOG.debug(_('Calling %(localname)s %(impl)s') % locals())
self._check_session(params)
return impl(*params)
return callit
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6a9c96fc6..4afd28dd8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -134,7 +134,8 @@ class VMHelper(HelperBase):
'pae': 'true', 'viridian': 'true'}
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref
@classmethod
@@ -154,10 +155,11 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
- vdi_ref)
+ LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
@@ -209,11 +211,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
return vif_ref
@classmethod
@@ -231,8 +233,9 @@ class VMHelper(HelperBase):
'other_config': {},
'sm_config': {},
'tags': []})
- LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
- name_label, virtual_size, read_only, sr_ref)
+ LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
+ ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.')
+ % locals())
return vdi_ref
@classmethod
@@ -242,7 +245,8 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
+ LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
+ % locals())
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -255,8 +259,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %(template_vm_ref)s from'
+ ' VM %(vm_ref)s.') % locals())
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -269,8 +273,8 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as ID %s"),
- vdi_uuids, image_id)
+ logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
+ " ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
@@ -310,7 +314,7 @@ class VMHelper(HelperBase):
meta, image_file = c.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %s:%d"), image, virtual_size)
+ LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
if type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
@@ -344,7 +348,7 @@ class VMHelper(HelperBase):
def _fetch_image_objectstore(cls, session, instance_id, image, access,
secret, type):
url = images.image_url(image)
- LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
+ LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -499,7 +503,8 @@ def get_vhd_parent(session, vdi_rec):
parent_uuid = vdi_rec['sm_config']['vhd-parent']
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ vdi_uuid = vdi_rec['uuid']
+ LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals())
return parent_ref, parent_rec
else:
return None
@@ -540,16 +545,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
def _poll_vhds():
attempts['counter'] += 1
if attempts['counter'] > max_attempts:
- msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...")
- % (attempts['counter'], max_attempts))
+ counter = attempts['counter']
+ msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
+ " %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- LOG.debug(_("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."), parent_uuid,
- original_parent_uuid)
+ LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
+ " %(original_parent_uuid)s, waiting for coalesce...")
+ % locals())
else:
# Breakout of the loop (normally) and return the parent_uuid
raise utils.LoopingCallDone(parent_uuid)
@@ -567,8 +573,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
- raise Exception(_("Unexpected number of VDIs (%s) found for "
- "VM %s") % (num_vdis, vm_ref))
+ raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
@@ -708,8 +714,8 @@ def _write_partition(virtual_size, dev):
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
- LOG.debug(_('Writing partition table %d %d to %s...'),
- primary_first, primary_last, dest)
+ LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
+ ' to %(dest)s...') % locals())
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 6c2fd6a68..628a171fa 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -104,7 +104,9 @@ class VMOps(object):
network_ref, instance.mac_address)
LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
+ % locals())
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -196,7 +198,8 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
except self.XenAPI.Failure, exc:
- logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
+ logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
+ % locals())
return
try:
@@ -252,41 +255,71 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def destroy(self, instance):
- """Destroy VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
-
- def _destroy(self, instance, vm, shutdown=True):
- """ Destroy VM instance """
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ def _shutdown(self, instance, vm):
+ """Shutdown an instance """
+ state = self.get_info(instance['name'])['state']
+ if state == power_state.SHUTDOWN:
+ LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
+ locals())
return
- # Get the VDIs related to the VM
+
+ try:
+ task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ self._session.wait_for_task(instance.id, task)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+
+ def _destroy_vdis(self, instance, vm):
+ """Destroys all VDIs associated with a VM """
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
- if shutdown:
+
+ if not vdis:
+ return
+
+ for vdi in vdis:
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- # Disk clean-up
- if vdis:
- for vdi in vdis:
- try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
- except self.XenAPI.Failure, exc:
- LOG.exception(exc)
- # VM Destroy
+ def _destroy_vm(self, instance, vm):
+ """Destroys a VM record """
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def destroy(self, instance):
+ """
+ Destroy VM instance
+
+ This is the method exposed by xenapi_conn.destroy(). The rest of the
+ destroy_* methods are internal.
+ """
+ vm = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm, shutdown=True)
+
+ def _destroy(self, instance, vm, shutdown=True):
+ """
+ Destroys VM instance by performing:
+
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying that actual VM record
+ """
+ if vm is None:
+ # Don't complain, just return. This lets us clean up instances
+ # that have already disappeared from the underlying platform.
+ return
+
+ if shutdown:
+ self._shutdown(instance, vm)
+
+ self._destroy_vdis(instance, vm)
+ self._destroy_vm(instance, vm)
+
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 0cd15b950..d5ebd29d5 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -71,7 +71,7 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -98,20 +98,20 @@ class VolumeHelper(HelperBase):
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
+ ' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
- exc, pbd)
+ LOG.warn(_('Ignoring exception %(exc)s when unplugging'
+ ' PBD %(pbd)s') % locals())
try:
session.get_xenapi().SR.forget(sr_ref)
LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
- sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when forgetting'
+ ' SR %(sr_ref)s') % locals())
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -172,8 +172,8 @@ class VolumeHelper(HelperBase):
(volume_id is None) or \
(target_host is None) or \
(target_iqn is None):
- raise StorageError(_('Unable to obtain target information %s, %s')
- % (device_path, mountpoint))
+ raise StorageError(_('Unable to obtain target information'
+ ' %(device_path)s, %(mountpoint)s') % locals())
volume_info = {}
volume_info['deviceNumber'] = device_number
volume_info['volumeId'] = volume_id
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 189f968c6..d89a6f995 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -48,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- LOG.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
+ " %(mountpoint)s") % locals())
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -66,9 +66,8 @@ class VolumeOps(object):
except StorageError, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to create VDI on SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
vbd_ref = VMHelper.create_vbd(self._session,
@@ -78,9 +77,8 @@ class VolumeOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to use SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to use SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
task = self._session.call_xenapi('Async.VBD.plug',
@@ -92,8 +90,8 @@ class VolumeOps(object):
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- LOG.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s attached to'
+ ' instance %(instance_name)s') % locals())
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -103,7 +101,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
+ % locals())
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
@@ -125,5 +124,5 @@ class VolumeOps(object):
LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- LOG.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s detached from'
+ ' instance %(instance_name)s') % locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 927f5905b..78f0d14b9 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -298,19 +298,14 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%s] %s status: success %s") % (
- name,
- task,
- result))
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- LOG.warn(_("Task [%s] %s status: %s %s") % (
- name,
- task,
- status,
- error_info))
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
diff --git a/nova/volume/api.py b/nova/volume/api.py
index ce4831cc3..0bcd8a3b0 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -41,10 +41,11 @@ class API(base.Base):
def create(self, context, size, name, description):
if quota.allowed_volumes(context, 1, size) < 1:
- LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"),
- context.project_id, size)
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
+ " %(size)sG volume") % locals())
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
- "create a volume of size %s") % size)
+ "create a volume of size %s") % size)
options = {
'size': size,
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 82e3521a8..6f8e25e19 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -103,9 +103,10 @@ class VolumeManager(manager.Manager):
volume_ref['host'] = self.host
try:
- LOG.debug(_("volume %s: creating lv of size %sG"),
- volume_ref['name'],
- volume_ref['size'])
+ vol_name = volume_ref['name']
+ vol_size = volume_ref['size']
+ LOG.debug(_("volume %(vol_name)s: creating lv of"
+ " size %(vol_size)sG") % locals())
self.driver.create_volume(volume_ref)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
diff --git a/nova/wsgi.py b/nova/wsgi.py
index a48bede9c..e01cc1e1e 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -64,7 +64,8 @@ class Server(object):
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
- logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port)
+ arg0 = sys.argv[0]
+ logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)