summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/adminclient.py64
-rw-r--r--nova/api/direct.py12
-rw-r--r--nova/api/ec2/__init__.py76
-rw-r--r--nova/api/ec2/admin.py63
-rw-r--r--nova/api/ec2/apirequest.py14
-rw-r--r--nova/api/ec2/cloud.py108
-rw-r--r--nova/api/openstack/__init__.py12
-rw-r--r--nova/api/openstack/auth.py1
-rw-r--r--nova/api/openstack/common.py35
-rw-r--r--nova/api/openstack/servers.py72
-rw-r--r--nova/api/openstack/zones.py80
-rw-r--r--nova/auth/dbdriver.py4
-rw-r--r--nova/auth/ldapdriver.py91
-rw-r--r--nova/auth/manager.py69
-rw-r--r--nova/auth/novarc.template7
-rw-r--r--nova/compute/api.py86
-rw-r--r--nova/compute/instance_types.py4
-rw-r--r--nova/compute/manager.py131
-rw-r--r--nova/compute/monitor.py10
-rw-r--r--nova/compute/power_state.py4
-rw-r--r--nova/console/manager.py2
-rw-r--r--nova/console/xvp.py14
-rw-r--r--nova/context.py5
-rw-r--r--nova/db/api.py61
-rw-r--r--nova/db/sqlalchemy/api.py194
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/001_austin.py36
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py27
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py51
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py61
-rw-r--r--nova/db/sqlalchemy/migration.py25
-rw-r--r--nova/db/sqlalchemy/models.py18
-rw-r--r--nova/db/sqlalchemy/session.py13
-rw-r--r--nova/exception.py36
-rw-r--r--nova/fakerabbit.py11
-rw-r--r--nova/flags.py10
-rw-r--r--nova/image/local.py30
-rw-r--r--nova/image/s3.py21
-rw-r--r--nova/log.py36
-rw-r--r--nova/network/linux_net.py97
-rw-r--r--nova/network/manager.py66
-rw-r--r--nova/objectstore/bucket.py2
-rw-r--r--nova/objectstore/handler.py41
-rw-r--r--nova/objectstore/image.py15
-rw-r--r--nova/rpc.py36
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/simple.py12
-rw-r--r--nova/service.py9
-rw-r--r--nova/test.py3
-rw-r--r--nova/tests/api/openstack/__init__.py28
-rw-r--r--nova/tests/api/openstack/test_common.py161
-rw-r--r--nova/tests/api/openstack/test_images.py1
-rw-r--r--nova/tests/api/openstack/test_servers.py80
-rw-r--r--nova/tests/api/openstack/test_zones.py140
-rw-r--r--nova/tests/db/nova.austin.sqlitebin0 -> 44032 bytes
-rw-r--r--nova/tests/test_api.py69
-rw-r--r--nova/tests/test_cloud.py10
-rw-r--r--nova/tests/test_compute.py63
-rw-r--r--nova/tests/test_localization.py100
-rw-r--r--nova/tests/test_log.py21
-rw-r--r--nova/tests/test_rpc.py3
-rw-r--r--nova/tests/test_virt.py74
-rw-r--r--nova/tests/test_xenapi.py17
-rw-r--r--nova/twistd.py6
-rw-r--r--nova/utils.py77
-rw-r--r--nova/version.py2
-rw-r--r--nova/virt/fake.py20
-rw-r--r--nova/virt/hyperv.py89
-rw-r--r--nova/virt/images.py5
-rw-r--r--nova/virt/libvirt.xml.template6
-rw-r--r--nova/virt/libvirt_conn.py145
-rw-r--r--nova/virt/xenapi/fake.py11
-rw-r--r--nova/virt/xenapi/vm_utils.py111
-rw-r--r--nova/virt/xenapi/vmops.py225
-rw-r--r--nova/virt/xenapi/volume_utils.py18
-rw-r--r--nova/virt/xenapi/volumeops.py25
-rw-r--r--nova/virt/xenapi_conn.py33
-rw-r--r--nova/volume/api.py11
-rw-r--r--nova/volume/driver.py43
-rw-r--r--nova/volume/manager.py38
-rw-r--r--nova/volume/san.py335
-rw-r--r--nova/wsgi.py5
81 files changed, 3031 insertions, 818 deletions
diff --git a/nova/adminclient.py b/nova/adminclient.py
index b2609c8c4..c614b274c 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -21,6 +21,7 @@ Nova User API client library.
import base64
import boto
+import boto.exception
import httplib
from boto.ec2.regioninfo import RegionInfo
@@ -190,6 +191,45 @@ class HostInfo(object):
setattr(self, name, value)
+class InstanceType(object):
+ """
+ Information about a Nova instance type, as parsed through SAX.
+
+ **Fields include**
+
+ * name
+ * vcpus
+ * disk_gb
+ * memory_mb
+ * flavor_id
+
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.name = None
+ self.vcpus = None
+ self.disk_gb = None
+ self.memory_mb = None
+ self.flavor_id = None
+
+ def __repr__(self):
+ return 'InstanceType:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == "memoryMb":
+ self.memory_mb = str(value)
+ elif name == "flavorId":
+ self.flavor_id = str(value)
+ elif name == "diskGb":
+ self.disk_gb = str(value)
+ else:
+ setattr(self, name, str(value))
+
+
class NovaAdminClient(object):
def __init__(
@@ -249,10 +289,14 @@ class NovaAdminClient(object):
def get_user(self, name):
"""Grab a single user by name."""
- user = self.apiconn.get_object('DescribeUser', {'Name': name},
- UserInfo)
- if user.username != None:
- return user
+ try:
+ return self.apiconn.get_object('DescribeUser',
+ {'Name': name},
+ UserInfo)
+ except boto.exception.BotoServerError, e:
+ if e.status == 400 and e.error_code == 'NotFound':
+ return None
+ raise
def has_user(self, username):
"""Determine if user exists."""
@@ -337,6 +381,13 @@ class NovaAdminClient(object):
'MemberUsers': member_users}
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
+ def modify_project(self, projectname, manager_user=None, description=None):
+ """Modifies an existing project."""
+ params = {'Name': projectname,
+ 'ManagerUser': manager_user,
+ 'Description': description}
+ return self.apiconn.get_status('ModifyProject', params)
+
def delete_project(self, projectname):
"""Permanently deletes the specified project."""
return self.apiconn.get_object('DeregisterProject',
@@ -373,3 +424,8 @@ class NovaAdminClient(object):
def get_hosts(self):
return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
+
+ def get_instance_types(self):
+ """Grabs the list of all users."""
+ return self.apiconn.get_list('DescribeInstanceTypes', {},
+ [('item', InstanceType)])
diff --git a/nova/api/direct.py b/nova/api/direct.py
index 81b3ae202..208b6d086 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -142,9 +142,15 @@ class Reflection(object):
if argspec[2]:
args_out.insert(0, ('**%s' % argspec[2],))
+ if f.__doc__:
+ short_doc = f.__doc__.split('\n')[0]
+ doc = f.__doc__
+ else:
+ short_doc = doc = _('not available')
+
methods['/%s/%s' % (route, k)] = {
- 'short_doc': f.__doc__.split('\n')[0],
- 'doc': f.__doc__,
+ 'short_doc': short_doc,
+ 'doc': doc,
'name': k,
'args': list(reversed(args_out))}
@@ -196,6 +202,8 @@ class ServiceWrapper(wsgi.Controller):
# TODO(termie): do some basic normalization on methods
method = getattr(self.service_handle, action)
+ # NOTE(vish): make sure we have no unicode keys for py2.6.
+ params = dict([(str(k), v) for (k, v) in params.iteritems()])
result = method(context, **params)
if type(result) is dict or type(result) is list:
return self._serialize(result, req)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 238cb0f38..5adc2c075 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -20,8 +20,6 @@ Starting point for routing EC2 requests.
"""
-import datetime
-import routes
import webob
import webob.dec
import webob.exc
@@ -33,6 +31,7 @@ from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
+from nova.api.ec2 import cloud
from nova.auth import manager
@@ -56,23 +55,20 @@ class RequestLogging(wsgi.Middleware):
@webob.dec.wsgify
def __call__(self, req):
+ start = utils.utcnow()
rv = req.get_response(self.application)
- self.log_request_completion(rv, req)
+ self.log_request_completion(rv, req, start)
return rv
- def log_request_completion(self, response, request):
+ def log_request_completion(self, response, request, start):
controller = request.environ.get('ec2.controller', None)
if controller:
controller = controller.__class__.__name__
action = request.environ.get('ec2.action', None)
ctxt = request.environ.get('ec2.context', None)
- seconds = 'X'
- microseconds = 'X'
- if ctxt:
- delta = datetime.datetime.utcnow() - \
- ctxt.timestamp
- seconds = delta.seconds
- microseconds = delta.microseconds
+ delta = utils.utcnow() - start
+ seconds = delta.seconds
+ microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
@@ -131,9 +127,11 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
- LOG.warn(_('Access key %s has had %d failed authentications'
- ' and will be locked out for %d minutes.'),
- access_key, failures, FLAGS.lockout_minutes)
+ lock_mins = FLAGS.lockout_minutes
+ msg = _('Access key %(access_key)s has had %(failures)d'
+ ' failed authentications and will be locked out'
+ ' for %(lock_mins)d minutes.') % locals()
+ LOG.warn(msg)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@@ -168,7 +166,7 @@ class Authenticate(wsgi.Middleware):
req.path)
# Be explicit for what exceptions are 403, the rest bubble as 500
except (exception.NotFound, exception.NotAuthorized) as ex:
- LOG.audit(_("Authentication Failure: %s"), str(ex))
+ LOG.audit(_("Authentication Failure: %s"), unicode(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
@@ -179,8 +177,10 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
- LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
- project.name, context=req.environ['ec2.context'])
+ uname = user.name
+ pname = project.name
+ msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals()
+ LOG.audit(msg, context=req.environ['ec2.context'])
return self.application
@@ -206,10 +206,11 @@ class Requestify(wsgi.Middleware):
LOG.debug(_('action: %s'), action)
for key, value in args.items():
- LOG.debug(_('arg: %s\t\tval: %s'), key, value)
+ LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals())
# Success!
- api_request = apirequest.APIRequest(self.controller, action, args)
+ api_request = apirequest.APIRequest(self.controller, action,
+ req.params['Version'], args)
req.environ['ec2.request'] = api_request
req.environ['ec2.action_args'] = args
return self.application
@@ -227,7 +228,7 @@ class Authorizer(wsgi.Middleware):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
- 'DescribeAvailabilityzones': ['all'],
+ 'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
@@ -277,8 +278,8 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
- LOG.audit(_("Unauthorized request for controller=%s "
- "and action=%s"), controller, action, context=context)
+ LOG.audit(_('Unauthorized request for controller=%(controller)s '
+ 'and action=%(action)s') % locals(), context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@@ -289,7 +290,7 @@ class Authorizer(wsgi.Middleware):
return True
if 'none' in roles:
return False
- return any(context.project.has_role(context.user.id, role)
+ return any(context.project.has_role(context.user_id, role)
for role in roles)
@@ -309,18 +310,32 @@ class Executor(wsgi.Application):
result = None
try:
result = api_request.invoke(context)
+ except exception.InstanceNotFound as ex:
+ LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
+ context=context)
+ ec2_id = cloud.id_to_ec2_id(ex.instance_id)
+ message = _('Instance %s not found') % ec2_id
+ return self._error(req, context, type(ex).__name__, message)
+ except exception.VolumeNotFound as ex:
+ LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
+ context=context)
+ ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x')
+ message = _('Volume %s not found') % ec2_id
+ return self._error(req, context, type(ex).__name__, message)
except exception.NotFound as ex:
- LOG.info(_('NotFound raised: %s'), str(ex), context=context)
- return self._error(req, context, type(ex).__name__, str(ex))
+ LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
+ return self._error(req, context, type(ex).__name__, unicode(ex))
except exception.ApiError as ex:
- LOG.exception(_('ApiError raised: %s'), str(ex), context=context)
+ LOG.exception(_('ApiError raised: %s'), unicode(ex),
+ context=context)
if ex.code:
- return self._error(req, context, ex.code, str(ex))
+ return self._error(req, context, ex.code, unicode(ex))
else:
- return self._error(req, context, type(ex).__name__, str(ex))
+ return self._error(req, context, type(ex).__name__,
+ unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
- LOG.exception(_('Unexpected error raised: %s'), str(ex),
+ LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
extra=extra, context=context)
return self._error(req,
context,
@@ -343,7 +358,8 @@ class Executor(wsgi.Application):
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>%s</RequestID></Response>' %
- (code, message, context.request_id))
+ (utils.utf8(code), utils.utf8(message),
+ utils.utf8(context.request_id)))
return resp
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 758b612e8..735951082 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -26,6 +26,7 @@ from nova import db
from nova import exception
from nova import log as logging
from nova.auth import manager
+from nova.compute import instance_types
LOG = logging.getLogger('nova.api.ec2.admin')
@@ -62,6 +63,14 @@ def host_dict(host):
return {}
+def instance_dict(name, inst):
+ return {'name': name,
+ 'memory_mb': inst['memory_mb'],
+ 'vcpus': inst['vcpus'],
+ 'disk_gb': inst['local_gb'],
+ 'flavor_id': inst['flavorid']}
+
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -70,6 +79,10 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
+ def describe_instance_types(self, _context, **_kwargs):
+ return {'instanceTypeSet': [instance_dict(n, v) for n, v in
+ instance_types.INSTANCE_TYPES.iteritems()]}
+
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
return user_dict(manager.AuthManager().get_user(name))
@@ -111,19 +124,23 @@ class AdminController(object):
"""Add or remove a role for a user and project."""
if operation == 'add':
if project:
- LOG.audit(_("Adding role %s to user %s for project %s"), role,
- user, project, context=context)
+ msg = _("Adding role %(role)s to user %(user)s"
+ " for project %(project)s") % locals()
+ LOG.audit(msg, context=context)
else:
- LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
- context=context)
+ msg = _("Adding sitewide role %(role)s to"
+ " user %(user)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
if project:
- LOG.audit(_("Removing role %s from user %s for project %s"),
- role, user, project, context=context)
+ msg = _("Removing role %(role)s from user %(user)s"
+ " for project %(project)s") % locals()
+ LOG.audit(msg, context=context)
else:
- LOG.audit(_("Removing sitewide role %s from user %s"), role,
- user, context=context)
+ msg = _("Removing sitewide role %(role)s"
+ " from user %(user)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
@@ -139,8 +156,9 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
- LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
- project, context=context)
+ msg = _("Getting x509 for user: %(name)s"
+ " on project: %(project)s") % locals()
+ LOG.audit(msg, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@@ -156,8 +174,9 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
- LOG.audit(_("Create project %s managed by %s"), name, manager_user,
- context=context)
+ msg = _("Create project %(name)s managed by"
+ " %(manager_user)s") % locals()
+ LOG.audit(msg, context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@@ -165,6 +184,17 @@ class AdminController(object):
description=None,
member_users=None))
+ def modify_project(self, context, name, manager_user, description=None,
+ **kwargs):
+ """Modifies a project"""
+ msg = _("Modify project: %(name)s managed by"
+ " %(manager_user)s") % locals()
+ LOG.audit(msg, context=context)
+ manager.AuthManager().modify_project(name,
+ manager_user=manager_user,
+ description=description)
+ return True
+
def deregister_project(self, context, name):
"""Permanently deletes a project."""
LOG.audit(_("Delete project: %s"), name, context=context)
@@ -181,12 +211,13 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
- LOG.audit(_("Adding user %s to project %s"), user, project,
- context=context)
+ msg = _("Adding user %(user)s to project %(project)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
- LOG.audit(_("Removing user %s from project %s"), user, project,
- context=context)
+ msg = _("Removing user %(user)s from"
+ " project %(project)s") % locals()
+ LOG.audit(msg, context=context)
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 78576470a..00b527d62 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -20,6 +20,7 @@
APIRequest class
"""
+import datetime
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
@@ -83,9 +84,10 @@ def _try_convert(value):
class APIRequest(object):
- def __init__(self, controller, action, args):
+ def __init__(self, controller, action, version, args):
self.controller = controller
self.action = action
+ self.version = version
self.args = args
def invoke(self, context):
@@ -93,8 +95,10 @@ class APIRequest(object):
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
- _error = _('Unsupported API request: controller = %s,'
- 'action = %s') % (self.controller, self.action)
+ controller = self.controller
+ action = self.action
+ _error = _('Unsupported API request: controller = %(controller)s,'
+ ' action = %(action)s') % locals()
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
@@ -130,7 +134,7 @@ class APIRequest(object):
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
- 'http://ec2.amazonaws.com/doc/2009-11-30/')
+ 'http://ec2.amazonaws.com/doc/%s/' % self.version)
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
@@ -168,6 +172,8 @@ class APIRequest(object):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
+ elif isinstance(data, datetime.datetime):
+ data_el.appendChild(xml.createTextNode(data.isoformat()))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 57d41ed67..882cdcfc9 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -59,7 +59,7 @@ def _gen_key(context, user_id, key_name):
# creation before creating key_pair
try:
db.key_pair_get(context, user_id, key_name)
- raise exception.Duplicate("The key_pair %s already exists"
+ raise exception.Duplicate(_("The key_pair %s already exists")
% key_name)
except exception.NotFound:
pass
@@ -133,7 +133,7 @@ class CloudController(object):
return result
def _get_availability_zone_by_host(self, context, host):
- services = db.service_get_all_by_host(context, host)
+ services = db.service_get_all_by_host(context.elevated(), host)
if len(services) > 0:
return services[0]['availability_zone']
return 'unknown zone'
@@ -252,18 +252,18 @@ class CloudController(object):
regions = []
for region in FLAGS.region_list:
name, _sep, host = region.partition('=')
- endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix,
+ endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme,
host,
FLAGS.ec2_port,
- FLAGS.ec2_suffix)
+ FLAGS.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
- 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
+ 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
FLAGS.ec2_host,
FLAGS.ec2_port,
- FLAGS.ec2_suffix)}]
+ FLAGS.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
@@ -282,7 +282,7 @@ class CloudController(object):
'description': 'fixme'}]}
def describe_key_pairs(self, context, key_name=None, **kwargs):
- key_pairs = db.key_pair_get_all_by_user(context, context.user.id)
+ key_pairs = db.key_pair_get_all_by_user(context, context.user_id)
if not key_name is None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
@@ -290,7 +290,7 @@ class CloudController(object):
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
- if context.user.is_admin() or \
+ if context.is_admin or \
not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
@@ -301,7 +301,7 @@ class CloudController(object):
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
- data = _gen_key(context, context.user.id, key_name)
+ data = _gen_key(context, context.user_id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
'keyMaterial': data['private_key']}
@@ -310,7 +310,7 @@ class CloudController(object):
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
- db.key_pair_destroy(context, context.user.id, key_name)
+ db.key_pair_destroy(context, context.user_id, key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
@@ -318,7 +318,7 @@ class CloudController(object):
def describe_security_groups(self, context, group_name=None, **kwargs):
self.compute_api.ensure_default_security_group(context)
- if context.user.is_admin():
+ if context.is_admin:
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
@@ -327,7 +327,9 @@ class CloudController(object):
if not group_name is None:
groups = [g for g in groups if g.name in group_name]
- return {'securityGroupInfo': groups}
+ return {'securityGroupInfo':
+ list(sorted(groups,
+ key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
@@ -492,7 +494,7 @@ class CloudController(object):
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
- group = {'user_id': context.user.id,
+ group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': group_name,
'description': group_description}
@@ -512,8 +514,11 @@ class CloudController(object):
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
- # instance_id is passed in as a list of instances
- ec2_id = instance_id[0]
+ # instance_id may be passed in as a list of instances
+ if type(instance_id) == list:
+ ec2_id = instance_id[0]
+ else:
+ ec2_id = instance_id
instance_id = ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
@@ -529,11 +534,14 @@ class CloudController(object):
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
- volume_id = [ec2_id_to_id(x) for x in volume_id]
- volumes = self.volume_api.get_all(context)
- # NOTE(vish): volume_id is an optional list of volume ids to filter by.
- volumes = [self._format_volume(context, v) for v in volumes
- if volume_id is None or v['id'] in volume_id]
+ volumes = []
+ for ec2_id in volume_id:
+ internal_id = ec2_id_to_id(ec2_id)
+ volume = self.volume_api.get(context, internal_id)
+ volumes.append(volume)
+ else:
+ volumes = self.volume_api.get_all(context)
+ volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
@@ -601,8 +609,9 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
- LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id,
- instance_id, device, context=context)
+ msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
+ " at %(device)s") % locals()
+ LOG.audit(msg, context=context)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
@@ -657,12 +666,15 @@ class CloudController(object):
reservations = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
- instance_id = [ec2_id_to_id(x) for x in instance_id]
- instances = [self.compute_api.get(context, x) for x in instance_id]
+ instances = []
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context, internal_id)
+ instances.append(instance)
else:
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
- if not context.user.is_admin():
+ if not context.is_admin:
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
@@ -690,7 +702,7 @@ class CloudController(object):
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
- if context.user.is_admin():
+ if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
@@ -707,7 +719,12 @@ class CloudController(object):
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
- r['groupSet'] = self._convert_to_set([], 'groups')
+ security_group_names = []
+ if instance.get('security_groups'):
+ for security_group in instance['security_groups']:
+ security_group_names.append(security_group['name'])
+ r['groupSet'] = self._convert_to_set(security_group_names,
+ 'groupId')
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
@@ -719,7 +736,7 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- if context.user.is_admin():
+ if context.is_admin:
iterator = db.floating_ip_get_all(context)
else:
iterator = db.floating_ip_get_all_by_project(context,
@@ -729,11 +746,11 @@ class CloudController(object):
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
- instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
+ instance_id = floating_ip_ref['fixed_ip']['instance']['id']
ec2_id = id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
- if context.user.is_admin():
+ if context.is_admin:
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
address_rv['instance_id'] = details
@@ -751,8 +768,8 @@ class CloudController(object):
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
- LOG.audit(_("Associate address %s to instance %s"), public_ip,
- instance_id, context=context)
+ LOG.audit(_("Associate address %(public_ip)s to"
+ " instance %(instance_id)s") % locals(), context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
@@ -824,11 +841,26 @@ class CloudController(object):
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
+ def _format_image(self, context, image):
+ """Convert from format defined by BaseImageService to S3 format."""
+ i = {}
+ i['imageId'] = image.get('id')
+ i['kernelId'] = image.get('kernel_id')
+ i['ramdiskId'] = image.get('ramdisk_id')
+ i['imageOwnerId'] = image.get('owner_id')
+ i['imageLocation'] = image.get('location')
+ i['imageState'] = image.get('status')
+ i['type'] = image.get('type')
+ i['isPublic'] = image.get('is_public')
+ i['architecture'] = image.get('architecture')
+ return i
+
def describe_images(self, context, image_id=None, **kwargs):
- # Note: image_id is a list!
+ # NOTE: image_id is a list!
images = self.image_service.index(context)
if image_id:
- images = filter(lambda x: x['imageId'] in image_id, images)
+ images = filter(lambda x: x['id'] in image_id, images)
+ images = [self._format_image(context, i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
@@ -840,8 +872,9 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- LOG.audit(_("Registered image %s with id %s"), image_location,
- image_id, context=context)
+ msg = _("Registered image %(image_location)s with"
+ " id %(image_id)s") % locals()
+ LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
@@ -850,6 +883,9 @@ class CloudController(object):
% attribute)
try:
image = self.image_service.show(context, image_id)
+ image = self._format_image(context,
+ self.image_service.show(context,
+ image_id))
except IndexError:
raise exception.ApiError(_('invalid id: %s') % image_id)
result = {'image_id': image_id, 'launchPermission': []}
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index f2caac483..d0b18eced 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -34,13 +34,11 @@ from nova.api.openstack import flavors
from nova.api.openstack import images
from nova.api.openstack import servers
from nova.api.openstack import shared_ip_groups
+from nova.api.openstack import zones
LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
-flags.DEFINE_string('os_krm_mapping_file',
- 'krm_mapping.json',
- 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
@@ -54,8 +52,8 @@ class FaultWrapper(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- LOG.exception(_("Caught error: %s"), str(ex))
- exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
+ LOG.exception(_("Caught error: %s"), unicode(ex))
+ exc = webob.exc.HTTPInternalServerError(explanation=unicode(ex))
return faults.Fault(exc)
@@ -82,6 +80,10 @@ class APIRouter(wsgi.Router):
server_members["actions"] = "GET"
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
+ server_members['reset_network'] = 'POST'
+
+ mapper.resource("zone", "zones", controller=zones.Controller(),
+ collection={'detail': 'GET'})
mapper.resource("server", "servers", controller=servers.Controller(),
collection={'detail': 'GET'},
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 1dfdd5318..473071738 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -19,6 +19,7 @@ import datetime
import hashlib
import json
import time
+import logging
import webob.exc
import webob.dec
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 037ed47a0..1dc3767e2 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -18,22 +18,29 @@
from nova import exception
-def limited(items, req):
- """Return a slice of items according to requested offset and limit.
-
- items - a sliceable
- req - wobob.Request possibly containing offset and limit GET variables.
- offset is where to start in the list, and limit is the maximum number
- of items to return.
+def limited(items, request, max_limit=1000):
+ """
+ Return a slice of items according to requested offset and limit.
- If limit is not specified, 0, or > 1000, defaults to 1000.
+ @param items: A sliceable entity
+ @param request: `webob.Request` possibly containing 'offset' and 'limit'
+ GET variables. 'offset' is where to start in the list,
+ and 'limit' is the maximum number of items to return. If
+ 'limit' is not specified, 0, or > max_limit, we default
+ to max_limit.
+ @kwarg max_limit: The maximum number of items to return from 'items'
"""
+ try:
+ offset = int(request.GET.get('offset', 0))
+ except ValueError:
+ offset = 0
+
+ try:
+ limit = int(request.GET.get('limit', max_limit))
+ except ValueError:
+ limit = max_limit
- offset = int(req.GET.get('offset', 0))
- limit = int(req.GET.get('limit', 0))
- if not limit:
- limit = 1000
- limit = min(1000, limit)
+ limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
@@ -54,7 +61,7 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
except NotImplementedError:
items = image_service.index(context)
for image in items:
- image_id = image['imageId']
+ image_id = image['id']
if abs(hash(image_id)) == int(image_hash):
return image_id
raise exception.NotFound(image_hash)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 8cbcebed2..ce9601ecb 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
@@ -64,6 +62,22 @@ def _translate_detail_keys(inst):
inst_dict['status'] = power_mapping[inst_dict['status']]
inst_dict['addresses'] = dict(public=[], private=[])
+
+ # grab single private fixed ip
+ try:
+ private_ip = inst['fixed_ip']['address']
+ if private_ip:
+ inst_dict['addresses']['private'].append(private_ip)
+ except KeyError:
+ LOG.debug(_("Failed to read private ip"))
+
+ # grab all public floating ips
+ try:
+ for floating in inst['fixed_ip']['floating_ips']:
+ inst_dict['addresses']['public'].append(floating['address'])
+ except KeyError:
+ LOG.debug(_("Failed to read public ip(s)"))
+
inst_dict['metadata'] = {}
inst_dict['hostId'] = ''
@@ -124,17 +138,23 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, image_id):
- mapping_filename = FLAGS.os_krm_mapping_file
-
- with open(mapping_filename) as f:
- mapping = json.load(f)
- if image_id in mapping:
- return mapping[image_id]
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """
+ Machine images are associated with Kernels and Ramdisk images via
+ metadata stored in Glance as 'image_properties'
+ """
+ def lookup(param):
+ _image_id = image_id
+ try:
+ return image['properties'][param]
+ except KeyError:
+ raise exception.NotFound(
+ _("%(param)s property not found for image %(_image_id)s") %
+ locals())
- raise exception.NotFound(
- _("No entry for image '%s' in mapping file '%s'") %
- (image_id, mapping_filename))
+ image_id = str(image_id)
+ image = self._image_service.show(req.environ['nova.context'], image_id)
+ return lookup('kernel_id'), lookup('ramdisk_id')
def create(self, req):
""" Creates a new server for a given user """
@@ -142,11 +162,16 @@ class Controller(wsgi.Controller):
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- key_pair = auth_manager.AuthManager.get_key_pairs(
- req.environ['nova.context'])[0]
+ key_pairs = auth_manager.AuthManager.get_key_pairs(
+ req.environ['nova.context'])
+ if not key_pairs:
+ raise exception.NotFound(_("No keypairs defined"))
+ key_pair = key_pairs[0]
+
image_id = common.get_image_id_from_image_hash(self._image_service,
req.environ['nova.context'], env['server']['imageId'])
- kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
+ req, image_id)
instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
@@ -156,7 +181,8 @@ class Controller(wsgi.Controller):
display_name=env['server']['name'],
display_description=env['server']['name'],
key_name=key_pair['name'],
- key_data=key_pair['public_key'])
+ key_data=key_pair['public_key'],
+ onset_files=env.get('onset_files', []))
return _translate_keys(instances[0])
def update(self, req, id):
@@ -242,6 +268,20 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def reset_network(self, req, id):
+ """
+ Reset networking on an instance (admin only).
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.reset_network(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::reset_network %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
new file mode 100644
index 000000000..830464ffd
--- /dev/null
+++ b/nova/api/openstack/zones.py
@@ -0,0 +1,80 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import common
+import logging
+
+from nova import flags
+from nova import wsgi
+from nova import db
+
+
+FLAGS = flags.FLAGS
+
+
+def _filter_keys(item, keys):
+ """
+ Filters all model attributes except for keys
+ item is a dict
+
+ """
+ return dict((k, v) for k, v in item.iteritems() if k in keys)
+
+
+def _scrub_zone(zone):
+ return _filter_keys(zone, ('id', 'api_url'))
+
+
+class Controller(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "zone": ["id", "api_url"]}}}
+
+ def index(self, req):
+ """Return all zones in brief"""
+ items = db.zone_get_all(req.environ['nova.context'])
+ items = common.limited(items, req)
+ items = [_scrub_zone(item) for item in items]
+ return dict(zones=items)
+
+ def detail(self, req):
+ """Return all zones in detail"""
+ return self.index(req)
+
+ def show(self, req, id):
+ """Return data about the given zone id"""
+ zone_id = int(id)
+ zone = db.zone_get(req.environ['nova.context'], zone_id)
+ return dict(zone=_scrub_zone(zone))
+
+ def delete(self, req, id):
+ zone_id = int(id)
+ db.zone_delete(req.environ['nova.context'], zone_id)
+ return {}
+
+ def create(self, req):
+ context = req.environ['nova.context']
+ env = self._deserialize(req.body, req)
+ zone = db.zone_create(context, env["zone"])
+ return dict(zone=_scrub_zone(zone))
+
+ def update(self, req, id):
+ context = req.environ['nova.context']
+ env = self._deserialize(req.body, req)
+ zone_id = int(id)
+ zone = db.zone_update(context, zone_id, env["zone"])
+ return dict(zone=_scrub_zone(zone))
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index 0eb6fe588..d8dad8edd 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -119,8 +119,8 @@ class DbDriver(object):
for member_uid in member_uids:
member = db.user_get(context.get_admin_context(), member_uid)
if not member:
- raise exception.NotFound("Project can't be created "
- "because user %s doesn't exist"
+ raise exception.NotFound(_("Project can't be created "
+ "because user %s doesn't exist")
% member_uid)
members.add(member)
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index bc53e0ec6..5da7751a0 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -74,6 +74,25 @@ LOG = logging.getLogger("nova.ldapdriver")
# in which we may want to change the interface a bit more.
+def _clean(attr):
+ """Clean attr for insertion into ldap"""
+ if attr is None:
+ return None
+ if type(attr) is unicode:
+ return str(attr)
+ return attr
+
+
+def sanitize(fn):
+ """Decorator to sanitize all args"""
+ def _wrapped(self, *args, **kwargs):
+ args = [_clean(x) for x in args]
+ kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
+ return fn(self, *args, **kwargs)
+ _wrapped.func_name = fn.func_name
+ return _wrapped
+
+
class LdapDriver(object):
"""Ldap Auth driver
@@ -106,23 +125,27 @@ class LdapDriver(object):
self.conn.unbind_s()
return False
+ @sanitize
def get_user(self, uid):
"""Retrieve user by id"""
attr = self.__get_ldap_user(uid)
return self.__to_user(attr)
+ @sanitize
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree
return self.__to_user(self.__find_object(dn, query))
+ @sanitize
def get_project(self, pid):
"""Retrieve project by id"""
dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
+ @sanitize
def get_users(self):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
@@ -134,6 +157,7 @@ class LdapDriver(object):
users.append(user)
return users
+ @sanitize
def get_projects(self, uid=None):
"""Retrieve list of projects"""
pattern = LdapDriver.project_pattern
@@ -143,10 +167,11 @@ class LdapDriver(object):
pattern)
return [self.__to_project(attr) for attr in attrs]
+ @sanitize
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
- raise exception.Duplicate("LDAP user %s already exists" % name)
+ raise exception.Duplicate(_("LDAP user %s already exists") % name)
if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name):
# Retrieve user by name
@@ -196,6 +221,7 @@ class LdapDriver(object):
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
+ @sanitize
def create_project(self, name, manager_uid,
description=None, member_uids=None):
"""Create a project"""
@@ -231,6 +257,7 @@ class LdapDriver(object):
self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
+ @sanitize
def modify_project(self, project_id, manager_uid=None, description=None):
"""Modify an existing project"""
if not manager_uid and not description:
@@ -249,21 +276,25 @@ class LdapDriver(object):
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
+ @sanitize
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
+ @sanitize
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
+ @sanitize
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
+ @sanitize
def has_role(self, uid, role, project_id=None):
"""Check if user has role
@@ -273,6 +304,7 @@ class LdapDriver(object):
role_dn = self.__role_to_dn(role, project_id)
return self.__is_in_group(uid, role_dn)
+ @sanitize
def add_role(self, uid, role, project_id=None):
"""Add role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
@@ -283,11 +315,13 @@ class LdapDriver(object):
else:
return self.__add_to_group(uid, role_dn)
+ @sanitize
def remove_role(self, uid, role, project_id=None):
"""Remove role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
+ @sanitize
def get_user_roles(self, uid, project_id=None):
"""Retrieve list of roles for user (or user and project)"""
if project_id is None:
@@ -307,10 +341,11 @@ class LdapDriver(object):
roles = self.__find_objects(project_dn, query)
return [role['cn'][0] for role in roles]
+ @sanitize
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s doesn't exist" % uid)
+ raise exception.NotFound(_("User %s doesn't exist") % uid)
self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only:
# Delete attributes
@@ -332,12 +367,14 @@ class LdapDriver(object):
# Delete entry
self.conn.delete_s(self.__uid_to_dn(uid))
+ @sanitize
def delete_project(self, project_id):
"""Delete a project"""
project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
+ @sanitize
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
"""Modify an existing user"""
if not access_key and not secret_key and admin is None:
@@ -432,15 +469,15 @@ class LdapDriver(object):
description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
- raise exception.Duplicate("Group can't be created because "
- "group %s already exists" % name)
+ raise exception.Duplicate(_("Group can't be created because "
+ "group %s already exists") % name)
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound("Group can't be created "
- "because user %s doesn't exist" %
- member_uid)
+ raise exception.NotFound(_("Group can't be created "
+ "because user %s doesn't exist")
+ % member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -455,8 +492,8 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be searched in group "
- "because the user doesn't exist" % uid)
+ raise exception.NotFound(_("User %s can't be searched in group "
+ "because the user doesn't exist") % uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
@@ -467,29 +504,29 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be added to the group "
- "because the user doesn't exist" % uid)
+ raise exception.NotFound(_("User %s can't be added to the group "
+ "because the user doesn't exist") % uid)
if not self.__group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
+ raise exception.NotFound(_("The group at dn %s doesn't exist") %
group_dn)
if self.__is_in_group(uid, group_dn):
- raise exception.Duplicate(_("User %s is already a member of "
- "the group %s") % (uid, group_dn))
+ raise exception.Duplicate(_("User %(uid)s is already a member of "
+ "the group %(group_dn)s") % locals())
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- group_dn)
+ raise exception.NotFound(_("The group at dn %s doesn't exist")
+ % group_dn)
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be removed from the "
- "group because the user doesn't exist" %
- uid)
+ raise exception.NotFound(_("User %s can't be removed from the "
+ "group because the user doesn't exist")
+ % uid)
if not self.__is_in_group(uid, group_dn):
- raise exception.NotFound("User %s is not a member of the group" %
- uid)
+ raise exception.NotFound(_("User %s is not a member of the group")
+ % uid)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
@@ -509,8 +546,9 @@ class LdapDriver(object):
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be removed from all "
- "because the user doesn't exist" % uid)
+ raise exception.NotFound(_("User %s can't be removed from all "
+ "because the user doesn't exist")
+ % uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -584,10 +622,11 @@ class LdapDriver(object):
else:
return None
- @staticmethod
- def __dn_to_uid(dn):
+ def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
- return dn.split(',')[0].split('=')[1]
+ query = '(objectclass=novaUser)'
+ user = self.__find_object(dn, query)
+ return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 1652e24e1..450ab803a 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -272,16 +272,22 @@ class AuthManager(object):
project = self.get_project(project_id)
if project == None:
- LOG.audit(_("failed authorization: no project named %s (user=%s)"),
- project_id, user.name)
+ pjid = project_id
+ uname = user.name
+ LOG.audit(_("failed authorization: no project named %(pjid)s"
+ " (user=%(uname)s)") % locals())
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
- LOG.audit(_("Failed authorization: user %s not admin and not "
- "member of project %s"), user.name, project.name)
- raise exception.NotFound(_('User %s is not a member of project %s')
- % (user.id, project.id))
+ uname = user.name
+ uid = user.id
+ pjname = project.name
+ pjid = project.id
+ LOG.audit(_("Failed authorization: user %(uname)s not admin"
+ " and not member of project %(pjname)s") % locals())
+ raise exception.NotFound(_('User %(uid)s is not a member of'
+ ' project %(pjid)s') % locals())
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@@ -408,14 +414,16 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
if project:
- LOG.audit(_("Adding role %s to user %s in project %s"), role,
- User.safe_id(user), Project.safe_id(project))
+ LOG.audit(_("Adding role %(role)s to user %(uid)s"
+ " in project %(pid)s") % locals())
else:
- LOG.audit(_("Adding sitewide role %s to user %s"), role,
- User.safe_id(user))
+ LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
+ % locals())
with self.driver() as drv:
- drv.add_role(User.safe_id(user), role, Project.safe_id(project))
+ drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
@@ -434,14 +442,16 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
if project:
- LOG.audit(_("Removing role %s from user %s on project %s"),
- role, User.safe_id(user), Project.safe_id(project))
+ LOG.audit(_("Removing role %(role)s from user %(uid)s"
+ " on project %(pid)s") % locals())
else:
- LOG.audit(_("Removing sitewide role %s from user %s"), role,
- User.safe_id(user))
+ LOG.audit(_("Removing sitewide role %(role)s"
+ " from user %(uid)s") % locals())
with self.driver() as drv:
- drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+ drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
@@ -502,8 +512,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
- LOG.audit(_("Created project %s with manager %s"), name,
- manager_user)
+ LOG.audit(_("Created project %(name)s with"
+ " manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
@@ -530,11 +540,12 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
- LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
- Project.safe_id(project))
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
+ LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
- Project.safe_id(project))
+ Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
@@ -550,11 +561,11 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
- LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
- Project.safe_id(project))
+ uid = User.safe_id(user)
+ pid = Project.safe_id(project)
+ LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
- return drv.remove_from_project(User.safe_id(user),
- Project.safe_id(project))
+ return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
@@ -634,7 +645,10 @@ class AuthManager(object):
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
- LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
+ rvname = rv.name
+ rvadmin = rv.admin
+ LOG.audit(_("Created user %(rvname)s"
+ " (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
@@ -656,7 +670,8 @@ class AuthManager(object):
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
- LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
+ LOG.audit(_("Admin status set to %(admin)r"
+ " for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index c53a4acdc..cda2ecc28 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -10,7 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
-export CLOUD_SERVERS_API_KEY="%(access)s"
-export CLOUD_SERVERS_USERNAME="%(user)s"
-export CLOUD_SERVERS_URL="%(os)s"
-
+export NOVA_API_KEY="%(access)s"
+export NOVA_USERNAME="%(user)s"
+export NOVA_URL="%(os)s"
diff --git a/nova/compute/api.py b/nova/compute/api.py
index a6b99c1cb..81ea6dc53 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -67,10 +67,10 @@ class API(base.Base):
"""Get the network topic for an instance."""
try:
instance = self.get(context, instance_id)
- except exception.NotFound as e:
+ except exception.NotFound:
LOG.warning(_("Instance %d was not found in get_network_topic"),
instance_id)
- raise e
+ raise
host = instance['host']
if not host:
@@ -85,38 +85,38 @@ class API(base.Base):
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
- availability_zone=None, user_data=None):
+ availability_zone=None, user_data=None,
+ onset_files=None):
"""Create the number of instances requested if quota and
- other arguments check out ok."""
-
+ other arguments check out ok.
+ """
type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
- LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
- context.project_id, min_count)
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s,"
+ " tried to run %(min_count)s instances") % locals())
raise quota.QuotaError(_("Instance quota exceeded. You can only "
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
- is_vpn = image_id == FLAGS.vpn_image_id
- if not is_vpn:
- image = self.image_service.show(context, image_id)
- if kernel_id is None:
- kernel_id = image.get('kernelId', None)
- if ramdisk_id is None:
- ramdisk_id = image.get('ramdiskId', None)
- # No kernel and ramdisk for raw images
- if kernel_id == str(FLAGS.null_kernel):
- kernel_id = None
- ramdisk_id = None
- LOG.debug(_("Creating a raw instance"))
- # Make sure we have access to kernel and ramdisk (if not raw)
- logging.debug("Using Kernel=%s, Ramdisk=%s" %
- (kernel_id, ramdisk_id))
- if kernel_id:
- self.image_service.show(context, kernel_id)
- if ramdisk_id:
- self.image_service.show(context, ramdisk_id)
+ image = self.image_service.show(context, image_id)
+ if kernel_id is None:
+ kernel_id = image.get('kernel_id', None)
+ if ramdisk_id is None:
+ ramdisk_id = image.get('ramdisk_id', None)
+ # No kernel and ramdisk for raw images
+ if kernel_id == str(FLAGS.null_kernel):
+ kernel_id = None
+ ramdisk_id = None
+ LOG.debug(_("Creating a raw instance"))
+ # Make sure we have access to kernel and ramdisk (if not raw)
+ logging.debug("Using Kernel=%s, Ramdisk=%s" %
+ (kernel_id, ramdisk_id))
+ if kernel_id:
+ self.image_service.show(context, kernel_id)
+ if ramdisk_id:
+ self.image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
@@ -155,7 +155,6 @@ class API(base.Base):
'key_data': key_data,
'locked': False,
'availability_zone': availability_zone}
-
elevated = context.elevated()
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
@@ -183,14 +182,17 @@ class API(base.Base):
instance = self.update(context, instance_id, **updates)
instances.append(instance)
- LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
- context.project_id, context.user_id, instance_id)
+ pid = context.project_id
+ uid = context.user_id
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " instance %(instance_id)s") % locals())
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
- "availability_zone": availability_zone}})
+ "availability_zone": availability_zone,
+ "onset_files": onset_files}})
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
@@ -246,13 +248,16 @@ class API(base.Base):
# ..then we distill the security groups to which they belong..
security_groups = set()
for rule in security_group_rules:
- security_groups.add(rule['parent_group_id'])
+ security_group = self.db.security_group_get(
+ context,
+ rule['parent_group_id'])
+ security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = set()
for security_group in security_groups:
for instance in security_group['instances']:
- instances.add(instance['id'])
+ instances.add(instance)
# ...then we find the hosts where they live...
hosts = set()
@@ -287,10 +292,10 @@ class API(base.Base):
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
- except exception.NotFound as e:
+ except exception.NotFound:
LOG.warning(_("Instance %d was not found during terminate"),
instance_id)
- raise e
+ raise
if (instance['state_description'] == 'terminating'):
LOG.warning(_("Instance %d is already being terminated"),
@@ -312,7 +317,7 @@ class API(base.Base):
def get(self, context, instance_id):
"""Get a single instance with the given ID."""
- rv = self.db.instance_get_by_id(context, instance_id)
+ rv = self.db.instance_get(context, instance_id)
return dict(rv.iteritems())
def get_all(self, context, project_id=None, reservation_id=None,
@@ -428,6 +433,10 @@ class API(base.Base):
"""Set the root/admin password for the given instance."""
self._cast_compute_message('set_admin_password', context, instance_id)
+ def inject_file(self, context, instance_id):
+ """Write a file to the given instance."""
+ self._cast_compute_message('inject_file', context, instance_id)
+
def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console"""
instance = self.get(context, instance_id)
@@ -460,6 +469,13 @@ class API(base.Base):
instance = self.get(context, instance_id)
return instance['locked']
+ def reset_network(self, context, instance_id):
+ """
+ Reset networking on the instance.
+
+ """
+ self._cast_compute_message('reset_network', context, instance_id)
+
def attach_volume(self, context, instance_id, volume_id, device):
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 196d6a8df..309313fd0 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -38,8 +38,8 @@ def get_by_type(instance_type):
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError(_("Unknown instance type: %s"),
- instance_type)
+ raise exception.ApiError(_("Unknown instance type: %s") % \
+ instance_type, "Invalid")
return instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6f09ce674..b8d4b7ee9 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -34,10 +34,10 @@ terminating it.
:func:`nova.utils.import_object`
"""
+import base64
import datetime
import random
import string
-import logging
import socket
import functools
@@ -77,8 +77,8 @@ def checks_instance_lock(function):
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
- LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
- self, context, instance_id, context=context)
+ LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
+ " |%(instance_id)s|") % locals(), context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
@@ -118,7 +118,7 @@ class ComputeManager(manager.Manager):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
- self.driver.init_host()
+ self.driver.init_host(host=self.host)
def _update_state(self, context, instance_id):
"""Update the state of an instance from the driver info."""
@@ -128,10 +128,10 @@ class ComputeManager(manager.Manager):
info = self.driver.get_info(instance_ref['name'])
state = info['state']
except exception.NotFound:
- state = power_state.NOSTATE
+ state = power_state.FAILED
self.db.instance_set_state(context, instance_id, state)
- def get_console_topic(self, context, **_kwargs):
+ def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host
Currently this is just set in the flags for each compute
host."""
@@ -140,7 +140,7 @@ class ComputeManager(manager.Manager):
FLAGS.console_topic,
FLAGS.console_host)
- def get_network_topic(self, context, **_kwargs):
+ def get_network_topic(self, context, **kwargs):
"""Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make
# the call to get_network_host cheaper, so that
@@ -159,21 +159,22 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception
def refresh_security_group_rules(self, context,
- security_group_id, **_kwargs):
+ security_group_id, **kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception
def refresh_security_group_members(self, context,
- security_group_id, **_kwargs):
+ security_group_id, **kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
- def run_instance(self, context, instance_id, **_kwargs):
+ def run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
+ instance_ref.onset_files = kwargs.get('onset_files', [])
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
@@ -231,22 +232,25 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Terminating instance %s"), instance_id, context=context)
- if not FLAGS.stub_network:
- address = self.db.instance_get_floating_address(context,
- instance_ref['id'])
- if address:
- LOG.debug(_("Disassociating address %s"), address,
+ fixed_ip = instance_ref.get('fixed_ip')
+ if not FLAGS.stub_network and fixed_ip:
+ floating_ips = fixed_ip.get('floating_ips') or []
+ for floating_ip in floating_ips:
+ address = floating_ip['address']
+ LOG.debug("Disassociating address %s", address,
context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
+ network_topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ floating_ip['host'])
rpc.cast(context,
- self.get_network_topic(context),
+ network_topic,
{"method": "disassociate_floating_ip",
"args": {"floating_address": address}})
- address = self.db.instance_get_fixed_address(context,
- instance_ref['id'])
+ address = fixed_ip['address']
if address:
LOG.debug(_("Deallocating address %s"), address,
context=context)
@@ -256,7 +260,7 @@ class ComputeManager(manager.Manager):
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
- volumes = instance_ref.get('volumes', []) or []
+ volumes = instance_ref.get('volumes') or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
@@ -278,11 +282,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
+ state = instance_ref['state']
+ running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id,
- instance_ref['state'],
- power_state.RUNNING,
+ 'instance: %(instance_id)s (state: %(state)s '
+ 'expected: %(running)s)') % locals(),
context=context)
self.db.instance_set_state(context,
@@ -307,9 +311,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
if instance_ref['state'] != power_state.RUNNING:
+ state = instance_ref['state']
+ running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id, instance_ref['state'], power_state.RUNNING)
+ 'instance: %(instance_id)s (state: %(state)s '
+ 'expected: %(running)s)') % locals())
self.driver.snapshot(instance_ref, image_id)
@@ -319,28 +325,43 @@ class ComputeManager(manager.Manager):
"""Set the root/admin password for an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref['state'] != power_state.RUNNING:
- logging.warn('trying to reset the password on a non-running '
- 'instance: %s (state: %s expected: %s)',
- instance_ref['id'],
- instance_ref['state'],
- power_state.RUNNING)
-
- logging.debug('instance %s: setting admin password',
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to reset the password on a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ LOG.audit(_('instance %s: setting admin password'),
instance_ref['name'])
if new_pass is None:
# Generate a random password
- new_pass = self._generate_password(FLAGS.password_length)
-
+ new_pass = utils.generate_password(FLAGS.password_length)
self.driver.set_admin_password(instance_ref, new_pass)
self._update_state(context, instance_id)
- def _generate_password(self, length=20):
- """Generate a random sequence of letters and digits
- to be used as a password.
- """
- chrs = string.letters + string.digits
- return "".join([random.choice(chrs) for i in xrange(length)])
+ @exception.wrap_exception
+ @checks_instance_lock
+ def inject_file(self, context, instance_id, path, file_contents):
+ """Write a file to the specified path on an instance on this server"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to inject a file into a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ # Files/paths *should* be base64-encoded at this point, but
+ # double-check to make sure.
+ b64_path = utils.ensure_b64_encoding(path)
+ b64_contents = utils.ensure_b64_encoding(file_contents)
+ plain_path = base64.b64decode(b64_path)
+ nm = instance_ref['name']
+ msg = _('instance %(nm)s: injecting file to %(plain_path)s') % locals()
+ LOG.audit(msg)
+ self.driver.inject_file(instance_ref, b64_path, b64_contents)
@exception.wrap_exception
@checks_instance_lock
@@ -494,6 +515,18 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
+ @checks_instance_lock
+ def reset_network(self, context, instance_id):
+ """
+ Reset networking on the instance.
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.debug(_('instance %s: reset network'), instance_id,
+ context=context)
+ self.driver.reset_network(instance_ref)
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
@@ -507,7 +540,7 @@ class ComputeManager(manager.Manager):
def get_ajax_console(self, context, instance_id):
"""Return connection information for an ajax console"""
context = context.elevated()
- logging.debug(_("instance %s: getting ajax console"), instance_id)
+ LOG.debug(_("instance %s: getting ajax console"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_ajax_console(instance_ref)
@@ -517,8 +550,8 @@ class ComputeManager(manager.Manager):
"""Attach a volume to an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
- volume_id, mountpoint, context=context)
+ LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s"
+ " to %(mountpoint)s") % locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@@ -533,8 +566,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- LOG.exception(_("instance %s: attach failed %s, removing"),
- instance_id, mountpoint, context=context)
+ LOG.exception(_("instance %(instance_id)s: attach failed"
+ " %(mountpoint)s, removing") % locals(), context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@@ -548,9 +581,9 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
- LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
- volume_id, volume_ref['mountpoint'], instance_id,
- context=context)
+ mp = volume_ref['mountpoint']
+ LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
+ " on instance %(instance_id)s") % locals(), context=context)
if instance_ref['name'] not in self.driver.list_instances():
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_id, context=context)
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 14d0e8ca1..04e08a235 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -352,8 +352,9 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
- disk, self.instance_id)
+ iid = self.instance_id
+ LOG.error(_('Cannot get blockstats for "%(disk)s"'
+ ' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rd, wr)
@@ -374,8 +375,9 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
- interface, self.instance_id)
+ iid = self.instance_id
+ LOG.error(_('Cannot get ifstats for "%(interface)s"'
+ ' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rx, tx)
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index 37039d2ec..adfc2dff0 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -27,6 +27,7 @@ SHUTDOWN = 0x04
SHUTOFF = 0x05
CRASHED = 0x06
SUSPENDED = 0x07
+FAILED = 0x08
def name(code):
@@ -38,5 +39,6 @@ def name(code):
SHUTDOWN: 'shutdown',
SHUTOFF: 'shutdown',
CRASHED: 'crashed',
- SUSPENDED: 'suspended'}
+ SUSPENDED: 'suspended',
+ FAILED: 'failed to spawn'}
return d[code]
diff --git a/nova/console/manager.py b/nova/console/manager.py
index c55ca8e8f..5697e7cb1 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -67,7 +67,7 @@ class ConsoleProxyManager(manager.Manager):
pool['id'],
instance_id)
except exception.NotFound:
- logging.debug("Adding console")
+ logging.debug(_("Adding console"))
if not password:
password = self.driver.generate_password()
if not port:
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 2a76223da..ee66dac46 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -96,7 +96,7 @@ class XVPConsoleProxy(object):
return os.urandom(length * 2).encode('base64')[:length]
def _rebuild_xvp_conf(self, context):
- logging.debug("Rebuilding xvp conf")
+ logging.debug(_("Rebuilding xvp conf"))
pools = [pool for pool in
db.console_pool_get_all_by_host_type(context, self.host,
self.console_type)
@@ -113,12 +113,12 @@ class XVPConsoleProxy(object):
self._xvp_restart()
def _write_conf(self, config):
- logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf)
+ logging.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf)
with open(FLAGS.console_xvp_conf, 'w') as cfile:
cfile.write(config)
def _xvp_stop(self):
- logging.debug("Stopping xvp")
+ logging.debug(_("Stopping xvp"))
pid = self._xvp_pid()
if not pid:
return
@@ -131,19 +131,19 @@ class XVPConsoleProxy(object):
def _xvp_start(self):
if self._xvp_check_running():
return
- logging.debug("Starting xvp")
+ logging.debug(_("Starting xvp"))
try:
utils.execute('xvp -p %s -c %s -l %s' %
(FLAGS.console_xvp_pid,
FLAGS.console_xvp_conf,
FLAGS.console_xvp_log))
except exception.ProcessExecutionError, err:
- logging.error("Error starting xvp: %s" % err)
+ logging.error(_("Error starting xvp: %s") % err)
def _xvp_restart(self):
- logging.debug("Restarting xvp")
+ logging.debug(_("Restarting xvp"))
if not self._xvp_check_running():
- logging.debug("xvp not running...")
+ logging.debug(_("xvp not running..."))
self._xvp_start()
else:
pid = self._xvp_pid()
diff --git a/nova/context.py b/nova/context.py
index f2669c9f1..0256bf448 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -28,7 +28,6 @@ from nova import utils
class RequestContext(object):
-
def __init__(self, user, project, is_admin=None, read_deleted=False,
remote_address=None, timestamp=None, request_id=None):
if hasattr(user, 'id'):
@@ -53,7 +52,7 @@ class RequestContext(object):
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
- timestamp = datetime.datetime.utcnow()
+ timestamp = utils.utcnow()
if isinstance(timestamp, str) or isinstance(timestamp, unicode):
timestamp = utils.parse_isotime(timestamp)
self.timestamp = timestamp
@@ -101,7 +100,7 @@ class RequestContext(object):
return cls(**values)
def elevated(self, read_deleted=False):
- """Return a version of this context with admin flag set"""
+ """Return a version of this context with admin flag set."""
return RequestContext(self.user_id,
self.project_id,
True,
diff --git a/nova/db/api.py b/nova/db/api.py
index f9d561587..d7f3746d2 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -71,7 +71,6 @@ class NoMoreTargets(exception.Error):
"""No more available blades"""
pass
-
###################
@@ -289,11 +288,21 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
+def fixed_ip_get_all(context):
+ """Get all defined fixed ips."""
+ return IMPL.fixed_ip_get_all(context)
+
+
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
+def fixed_ip_get_all_by_instance(context, instance_id):
+ """Get fixed ips by instance or raise if none exist."""
+ return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
+
+
def fixed_ip_get_instance(context, address):
"""Get an instance for a fixed ip by address."""
return IMPL.fixed_ip_get_instance(context, address)
@@ -351,6 +360,11 @@ def instance_get_all_by_project(context, project_id):
return IMPL.instance_get_all_by_project(context, project_id)
+def instance_get_all_by_host(context, host):
+ """Get all instance belonging to a host."""
+ return IMPL.instance_get_all_by_host(context, host)
+
+
def instance_get_all_by_reservation(context, reservation_id):
"""Get all instance belonging to a reservation."""
return IMPL.instance_get_all_by_reservation(context, reservation_id)
@@ -375,11 +389,6 @@ def instance_get_project_vpn(context, project_id):
return IMPL.instance_get_project_vpn(context, project_id)
-def instance_get_by_id(context, instance_id):
- """Get an instance by id."""
- return IMPL.instance_get_by_id(context, instance_id)
-
-
def instance_is_vpn(context, instance_id):
"""True if instance is a vpn."""
return IMPL.instance_is_vpn(context, instance_id)
@@ -501,6 +510,11 @@ def network_get(context, network_id):
return IMPL.network_get(context, network_id)
+def network_get_all(context):
+ """Return all defined networks."""
+ return IMPL.network_get_all(context)
+
+
# pylint: disable-msg=C0103
def network_get_associated_fixed_ips(context, network_id):
"""Get all network's ips that have been associated."""
@@ -517,6 +531,11 @@ def network_get_by_instance(context, instance_id):
return IMPL.network_get_by_instance(context, instance_id)
+def network_get_all_by_instance(context, instance_id):
+ """Get all networks by instance id or raise if none exist."""
+ return IMPL.network_get_all_by_instance(context, instance_id)
+
+
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
@@ -557,7 +576,7 @@ def project_get_network(context, project_id, associate=True):
"""
- return IMPL.project_get_network(context, project_id)
+ return IMPL.project_get_network(context, project_id, associate)
def project_get_network_v6(context, project_id):
@@ -981,3 +1000,31 @@ def console_get_all_by_instance(context, instance_id):
def console_get(context, console_id, instance_id=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_id)
+
+
+####################
+
+
+def zone_create(context, values):
+ """Create a new child Zone entry."""
+ return IMPL.zone_create(context, values)
+
+
+def zone_update(context, zone_id, values):
+ """Update a child Zone entry."""
+ return IMPL.zone_update(context, values)
+
+
+def zone_delete(context, zone_id):
+ """Delete a child Zone."""
+ return IMPL.zone_delete(context, zone_id)
+
+
+def zone_get(context, zone_id):
+ """Get a specific child Zone."""
+ return IMPL.zone_get(context, zone_id)
+
+
+def zone_get_all(context):
+ """Get all child Zones."""
+ return IMPL.zone_get_all(context)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index b63b84bed..2697fac73 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -19,6 +19,7 @@
Implementation of SQLAlchemy backend.
"""
+import datetime
import warnings
from nova import db
@@ -247,7 +248,8 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('No service for %s, %s') % (host, binary))
+ raise exception.NotFound(_('No service for %(host)s, %(binary)s')
+ % locals())
return result
@@ -577,10 +579,21 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
'AND instance_id IS NOT NULL '
'AND allocated = 0',
{'host': host,
- 'time': time.isoformat()})
+ 'time': time})
return result.rowcount
+@require_admin_context
+def fixed_ip_get_all(context, session=None):
+ if not session:
+ session = get_session()
+ result = session.query(models.FixedIp).all()
+ if not result:
+ raise exception.NotFound(_('No fixed ips defined'))
+
+ return result
+
+
@require_context
def fixed_ip_get_by_address(context, address, session=None):
if not session:
@@ -607,6 +620,17 @@ def fixed_ip_get_instance(context, address):
@require_context
+def fixed_ip_get_all_by_instance(context, instance_id):
+ session = get_session()
+ rv = session.query(models.FixedIp).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False)
+ if not rv:
+ raise exception.NotFound(_('No address for instance %s') % instance_id)
+ return rv
+
+
+@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
mac = utils.to_mac(address)
@@ -669,8 +693,14 @@ def instance_data_get_for_project(context, project_id):
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
- instance_ref = instance_get(context, instance_id, session=session)
- instance_ref.delete(session=session)
+ session.execute('update instances set deleted=1,'
+ 'deleted_at=:at where id=:id',
+ {'id': instance_id,
+ 'at': datetime.datetime.utcnow()})
+ session.execute('update security_group_instance_association '
+ 'set deleted=1,deleted_at=:at where instance_id=:id',
+ {'id': instance_id,
+ 'at': datetime.datetime.utcnow()})
@require_context
@@ -684,6 +714,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -697,7 +728,9 @@ def instance_get(context, instance_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No instance for id %s') % instance_id)
+ raise exception.InstanceNotFound(_('Instance %s not found')
+ % instance_id,
+ instance_id)
return result
@@ -708,6 +741,7 @@ def instance_get_all(context):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -718,11 +752,24 @@ def instance_get_all_by_user(context, user_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
all()
+@require_admin_context
+def instance_get_all_by_host(context, host):
+ session = get_session()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
+ filter_by(host=host).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
@require_context
def instance_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -731,6 +778,7 @@ def instance_get_all_by_project(context, project_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -744,6 +792,7 @@ def instance_get_all_by_reservation(context, reservation_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -751,6 +800,7 @@ def instance_get_all_by_reservation(context, reservation_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=False).\
@@ -770,32 +820,6 @@ def instance_get_project_vpn(context, project_id):
@require_context
-def instance_get_by_id(context, instance_id):
- session = get_session()
-
- if is_admin_context(context):
- result = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- filter_by(id=instance_id).\
- filter_by(deleted=can_read_deleted(context)).\
- first()
- elif is_user_context(context):
- result = session.query(models.Instance).\
- options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- filter_by(project_id=context.project_id).\
- filter_by(id=instance_id).\
- filter_by(deleted=False).\
- first()
- if not result:
- raise exception.NotFound(_('Instance %s not found') % (instance_id))
-
- return result
-
-
-@require_context
def instance_get_fixed_address(context, instance_id):
session = get_session()
with session.begin():
@@ -934,8 +958,8 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('no keypair for user %s, name %s') %
- (user_id, name))
+ raise exception.NotFound(_('no keypair for user %(user_id)s,'
+ ' name %(name)s') % locals())
return result
@@ -1054,6 +1078,15 @@ def network_get(context, network_id, session=None):
return result
+@require_admin_context
+def network_get_all(context):
+ session = get_session()
+ result = session.query(models.Network)
+ if not result:
+ raise exception.NotFound(_('No networks defined'))
+ return result
+
+
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable-msg=C0103
@@ -1098,6 +1131,19 @@ def network_get_by_instance(_context, instance_id):
@require_admin_context
+def network_get_all_by_instance(_context, instance_id):
+ session = get_session()
+ rv = session.query(models.Network).\
+ filter_by(deleted=False).\
+ join(models.Network.fixed_ips).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False)
+ if not rv:
+ raise exception.NotFound(_('No network for instance %s') % instance_id)
+ return rv
+
+
+@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
@@ -1394,17 +1440,20 @@ def volume_get(context, volume_id, session=None):
if is_admin_context(context):
result = session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No volume for id %s') % volume_id)
+ raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
+ volume_id)
return result
@@ -1449,7 +1498,8 @@ def volume_get_instance(context, volume_id):
options(joinedload('instance')).\
first()
if not result:
- raise exception.NotFound(_('Volume %s not found') % ec2_id)
+ raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
+ volume_id)
return result.instance
@@ -1536,8 +1586,8 @@ def security_group_get_by_name(context, project_id, group_name):
first()
if not result:
raise exception.NotFound(
- _('No security group named %s for project: %s')
- % (group_name, project_id))
+ _('No security group named %(group_name)s'
+ ' for project: %(project_id)s') % locals())
return result
@@ -1590,6 +1640,11 @@ def security_group_destroy(context, security_group_id):
# TODO(vish): do we have to use sql here?
session.execute('update security_groups set deleted=1 where id=:id',
{'id': security_group_id})
+ session.execute('update security_group_instance_association '
+ 'set deleted=1,deleted_at=:at '
+ 'where security_group_id=:id',
+ {'id': security_group_id,
+ 'at': datetime.datetime.utcnow()})
session.execute('update security_group_rules set deleted=1 '
'where group_id=:id',
{'id': security_group_id})
@@ -1921,8 +1976,8 @@ def console_pool_get(context, pool_id):
filter_by(id=pool_id).\
first()
if not result:
- raise exception.NotFound(_("No console pool with id %(pool_id)s") %
- {'pool_id': pool_id})
+ raise exception.NotFound(_("No console pool with id %(pool_id)s")
+ % locals())
return result
@@ -1938,12 +1993,9 @@ def console_pool_get_by_host_type(context, compute_host, host,
options(joinedload('consoles')).\
first()
if not result:
- raise exception.NotFound(_('No console pool of type %(type)s '
+ raise exception.NotFound(_('No console pool of type %(console_type)s '
'for compute host %(compute_host)s '
- 'on proxy host %(host)s') %
- {'type': console_type,
- 'compute_host': compute_host,
- 'host': host})
+ 'on proxy host %(host)s') % locals())
return result
@@ -1981,9 +2033,7 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
first()
if not result:
raise exception.NotFound(_('No console for instance %(instance_id)s '
- 'in pool %(pool_id)s') %
- {'instance_id': instance_id,
- 'pool_id': pool_id})
+ 'in pool %(pool_id)s') % locals())
return result
@@ -2004,9 +2054,51 @@ def console_get(context, console_id, instance_id=None):
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
- idesc = (_("on instance %s") % instance_id) if instance_id else ""
+ idesc = (_("on instance %s") % instance_id) if instance_id else ""
raise exception.NotFound(_("No console with id %(console_id)s"
- " %(instance)s") %
- {'instance': idesc,
- 'console_id': console_id})
+ " %(idesc)s") % locals())
return result
+
+
+####################
+
+
+@require_admin_context
+def zone_create(context, values):
+ zone = models.Zone()
+ zone.update(values)
+ zone.save()
+ return zone
+
+
+@require_admin_context
+def zone_update(context, zone_id, values):
+ zone = session.query(models.Zone).filter_by(id=zone_id).first()
+ if not zone:
+ raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ zone.update(values)
+ zone.save()
+ return zone
+
+
+@require_admin_context
+def zone_delete(context, zone_id):
+ session = get_session()
+ with session.begin():
+ session.execute('delete from zones '
+ 'where id=:id', {'id': zone_id})
+
+
+@require_admin_context
+def zone_get(context, zone_id):
+ session = get_session()
+ result = session.query(models.Zone).filter_by(id=zone_id).first()
+ if not result:
+ raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ return result
+
+
+@require_admin_context
+def zone_get_all(context):
+ session = get_session()
+ return session.query(models.Zone).all()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
index a312a7190..9e7ab3554 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
@@ -134,6 +134,9 @@ instances = Table('instances', meta,
Column('ramdisk_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
+ Column('server_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
Column('launch_index', Integer()),
Column('key_name',
String(length=255, convert_unicode=False, assert_unicode=None,
@@ -178,23 +181,6 @@ instances = Table('instances', meta,
)
-iscsi_targets = Table('iscsi_targets', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('target_num', Integer()),
- Column('host',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('volume_id',
- Integer(),
- ForeignKey('volumes.id'),
- nullable=True),
- )
-
-
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
@@ -522,24 +508,26 @@ def upgrade(migrate_engine):
# bind migrate_engine to your metadata
meta.bind = migrate_engine
- for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
- instances, iscsi_targets, key_pairs, networks,
- projects, quotas, security_groups, security_group_inst_assoc,
- security_group_rules, services, users,
- user_project_association, user_project_role_association,
- user_role_association, volumes):
+ tables = [auth_tokens,
+ instances, key_pairs, networks, fixed_ips, floating_ips,
+ quotas, security_groups, security_group_inst_assoc,
+ security_group_rules, services, users, projects,
+ user_project_association, user_project_role_association,
+ user_role_association, volumes, export_devices]
+ for table in tables:
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
+ meta.drop_all(tables=tables)
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
- instances, iscsi_targets, key_pairs, networks,
+ instances, key_pairs, networks,
projects, quotas, security_groups, security_group_inst_assoc,
security_group_rules, services, users,
user_project_association, user_project_role_association,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
index bd3a3e6f8..413536a59 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
@@ -41,6 +41,10 @@ networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
#
# New Tables
@@ -131,6 +135,23 @@ instance_actions = Table('instance_actions', meta,
)
+iscsi_targets = Table('iscsi_targets', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('target_num', Integer()),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('volume_id',
+ Integer(),
+ ForeignKey('volumes.id'),
+ nullable=True),
+ )
+
+
#
# Tables to alter
#
@@ -188,12 +209,16 @@ def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
- for table in (certificates, consoles, console_pools, instance_actions):
+
+ tables = [certificates, console_pools, consoles, instance_actions,
+ iscsi_targets]
+ for table in tables:
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
+ meta.drop_all(tables=tables)
raise
auth_tokens.c.user_id.alter(type=String(length=255,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
new file mode 100644
index 000000000..5ba7910f1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+networks = Table('networks', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+
+
+#
+# Tables to alter
+#
+
+networks_label = Column(
+ 'label',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ networks.create_column(networks_label)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
new file mode 100644
index 000000000..ade981687
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
@@ -0,0 +1,61 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+#
+# New Tables
+#
+zones = Table('zones', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('api_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('username',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('password',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+#
+# Tables to alter
+#
+
+# (none currently)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (zones, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 33d14827b..9bdaa6d6b 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -17,12 +17,22 @@
# under the License.
import os
+import sys
from nova import flags
import sqlalchemy
from migrate.versioning import api as versioning_api
-from migrate.versioning import exceptions as versioning_exceptions
+
+try:
+ from migrate.versioning import exceptions as versioning_exceptions
+except ImportError:
+ try:
+ # python-migration changed location of exceptions after 1.6.3
+ # See LP Bug #717467
+ from migrate import exceptions as versioning_exceptions
+ except ImportError:
+ sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS
@@ -45,13 +55,16 @@ def db_version():
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine)
try:
- for table in ('auth_tokens', 'export_devices', 'fixed_ips',
- 'floating_ips', 'instances', 'iscsi_targets',
+ for table in ('auth_tokens', 'zones', 'export_devices',
+ 'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
- 'security_group_rules',
- 'security_group_instance_association', 'services',
+ 'security_group_instance_association',
+ 'security_group_rules', 'security_groups',
+ 'services',
'users', 'user_project_association',
- 'user_project_role_association', 'volumes'):
+ 'user_project_role_association',
+ 'user_role_association',
+ 'volumes'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index c54ebe3ba..40a96fc17 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -311,10 +311,14 @@ class SecurityGroup(BASE, NovaBase):
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
- 'SecurityGroupInstanceAssociation.security_group_id,'
+ 'SecurityGroupInstanceAssociation.security_group_id,'
+ 'SecurityGroupInstanceAssociation.deleted == False,'
'SecurityGroup.deleted == False)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
+ # (anthony) the condition below shouldn't be necessary now that the
+ # association is being marked as deleted. However, removing this
+ # may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == False)',
backref='security_groups')
@@ -369,6 +373,7 @@ class Network(BASE, NovaBase):
"vpn_public_port"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
+ label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(String(255), unique=True)
@@ -531,6 +536,15 @@ class Console(BASE, NovaBase):
pool = relationship(ConsolePool, backref=backref('consoles'))
+class Zone(BASE, NovaBase):
+ """Represents a child zone of this zone."""
+ __tablename__ = 'zones'
+ id = Column(Integer, primary_key=True)
+ api_url = Column(String(255))
+ username = Column(String(255))
+ password = Column(String(255))
+
+
def register_models():
"""Register Models and create metadata.
@@ -543,7 +557,7 @@ def register_models():
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate, ConsolePool, Console) # , Image, Host
+ Project, Certificate, ConsolePool, Console, Zone)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index c3876c02a..4a9a28f43 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -20,8 +20,10 @@ Session Handling for SQLAlchemy backend
"""
from sqlalchemy import create_engine
+from sqlalchemy import pool
from sqlalchemy.orm import sessionmaker
+from nova import exception
from nova import flags
FLAGS = flags.FLAGS
@@ -36,11 +38,18 @@ def get_session(autocommit=True, expire_on_commit=False):
global _MAKER
if not _MAKER:
if not _ENGINE:
+ kwargs = {'pool_recycle': FLAGS.sql_idle_timeout,
+ 'echo': False}
+
+ if FLAGS.sql_connection.startswith('sqlite'):
+ kwargs['poolclass'] = pool.NullPool
+
_ENGINE = create_engine(FLAGS.sql_connection,
- pool_recycle=FLAGS.sql_idle_timeout,
- echo=False)
+ **kwargs)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
session = _MAKER()
+ session.query = exception.wrap_db_error(session.query)
+ session.flush = exception.wrap_db_error(session.flush)
return session
diff --git a/nova/exception.py b/nova/exception.py
index ecd814e5d..7d65bd6a5 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -33,8 +33,9 @@ class ProcessExecutionError(IOError):
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
- message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
- % (description, cmd, exit_code, stdout, stderr)
+ message = _("%(description)s\nCommand: %(cmd)s\n"
+ "Exit code: %(exit_code)s\nStdout: %(stdout)r\n"
+ "Stderr: %(stderr)r") % locals()
IOError.__init__(self, message)
@@ -45,7 +46,6 @@ class Error(Exception):
class ApiError(Error):
-
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
@@ -56,6 +56,18 @@ class NotFound(Error):
pass
+class InstanceNotFound(NotFound):
+ def __init__(self, message, instance_id):
+ self.instance_id = instance_id
+ super(InstanceNotFound, self).__init__(message)
+
+
+class VolumeNotFound(NotFound):
+ def __init__(self, message, volume_id):
+ self.volume_id = volume_id
+ super(VolumeNotFound, self).__init__(message)
+
+
class Duplicate(Error):
pass
@@ -80,6 +92,24 @@ class TimeoutException(Error):
pass
+class DBError(Error):
+ """Wraps an implementation specific exception"""
+ def __init__(self, inner_exception):
+ self.inner_exception = inner_exception
+ super(DBError, self).__init__(str(inner_exception))
+
+
+def wrap_db_error(f):
+ def _wrap(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception, e:
+ LOG.exception(_('DB exception wrapped'))
+ raise DBError(e)
+ return _wrap
+ _wrap.func_name = f.func_name
+
+
def wrap_exception(f):
def _wrap(*args, **kw):
try:
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 7c2d7177b..dd82a9366 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -45,8 +45,9 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- LOG.debug(_('(%s) publish (key: %s) %s'),
- self.name, routing_key, message)
+ nm = self.name
+ LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
+ ' %(message)s') % locals())
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
@@ -92,8 +93,8 @@ class Backend(base.BaseBackend):
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
- LOG.debug(_('Binding %s to %s with key %s'),
- queue, exchange, routing_key)
+ LOG.debug(_('Binding %(queue)s to %(exchange)s with'
+ ' key %(routing_key)s') % locals())
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
def declare_consumer(self, queue, callback, *args, **kwargs):
@@ -117,7 +118,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- LOG.debug(_('Getting from %s: %s'), queue, message)
+ LOG.debug(_('Getting from %(queue)s: %(message)s') % locals())
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/flags.py b/nova/flags.py
index 81e2e36f9..f64a62da9 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -208,7 +208,7 @@ def _get_my_ip():
(addr, port) = csock.getsockname()
csock.close()
return addr
- except socket.gaierror as ex:
+ except socket.error as ex:
return "127.0.0.1"
@@ -218,7 +218,7 @@ def _get_my_ip():
DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
DEFINE_list('region_list',
[],
- 'list of region=url pairs separated by commas')
+ 'list of region=fqdn pairs separated by commas')
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
@@ -282,12 +282,14 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state")
+DEFINE_string('logdir', None, 'output to a per-service log file in named '
+ 'directory')
DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite',
'connection string for sql database')
-DEFINE_string('sql_idle_timeout',
- '3600',
+DEFINE_integer('sql_idle_timeout',
+ 3600,
'timeout for idle sql database connections')
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
diff --git a/nova/image/local.py b/nova/image/local.py
index b44593221..f78b9aa89 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -18,6 +18,7 @@
import cPickle as pickle
import os.path
import random
+import tempfile
from nova import exception
from nova.image import service
@@ -26,15 +27,12 @@ from nova.image import service
class LocalImageService(service.BaseImageService):
"""Image service storing images to local disk.
+ It assumes that image_ids are integers.
- It assumes that image_ids are integers."""
+ """
def __init__(self):
- self._path = "/tmp/nova/images"
- try:
- os.makedirs(self._path)
- except OSError: # Exists
- pass
+ self._path = tempfile.mkdtemp()
def _path_to(self, image_id):
return os.path.join(self._path, str(image_id))
@@ -56,9 +54,7 @@ class LocalImageService(service.BaseImageService):
raise exception.NotFound
def create(self, context, data):
- """
- Store the image data and return the new image id.
- """
+ """Store the image data and return the new image id."""
id = random.randint(0, 2 ** 31 - 1)
data['id'] = id
self.update(context, id, data)
@@ -72,8 +68,9 @@ class LocalImageService(service.BaseImageService):
raise exception.NotFound
def delete(self, context, image_id):
- """
- Delete the given image. Raises OSError if the image does not exist.
+ """Delete the given image.
+ Raises OSError if the image does not exist.
+
"""
try:
os.unlink(self._path_to(image_id))
@@ -81,8 +78,13 @@ class LocalImageService(service.BaseImageService):
raise exception.NotFound
def delete_all(self):
- """
- Clears out all images in local directory
- """
+ """Clears out all images in local directory."""
for id in self._ids():
os.unlink(self._path_to(id))
+
+ def delete_imagedir(self):
+ """Deletes the local directory.
+ Raises OSError if directory is not empty.
+
+ """
+ os.rmdir(self._path)
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 7b04aa072..14135a1ee 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -36,6 +36,22 @@ from nova.image import service
FLAGS = flags.FLAGS
+def map_s3_to_base(image):
+ """Convert from S3 format to format defined by BaseImageService."""
+ i = {}
+ i['id'] = image.get('imageId')
+ i['name'] = image.get('imageId')
+ i['kernel_id'] = image.get('kernelId')
+ i['ramdisk_id'] = image.get('ramdiskId')
+ i['location'] = image.get('imageLocation')
+ i['owner_id'] = image.get('imageOwnerId')
+ i['status'] = image.get('imageState')
+ i['type'] = image.get('type')
+ i['is_public'] = image.get('isPublic')
+ i['architecture'] = image.get('architecture')
+ return i
+
+
class S3ImageService(service.BaseImageService):
def modify(self, context, image_id, operation):
@@ -70,14 +86,15 @@ class S3ImageService(service.BaseImageService):
response = self._conn(context).make_request(
method='GET',
bucket='_images')
- return json.loads(response.read())
+ images = json.loads(response.read())
+ return [map_s3_to_base(i) for i in images]
def show(self, context, image_id):
"""return a image object if the context has permissions"""
if FLAGS.connection_type == 'fake':
return {'imageId': 'bar'}
result = self.index(context)
- result = [i for i in result if i['imageId'] == image_id]
+ result = [i for i in result if i['id'] == image_id]
if not result:
raise exception.NotFound(_('Image %s could not be found')
% image_id)
diff --git a/nova/log.py b/nova/log.py
index 4997d3f28..6b201ffcc 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -28,9 +28,12 @@ It also allows setting of formatting information through flags.
import cStringIO
+import inspect
import json
import logging
import logging.handlers
+import os
+import sys
import traceback
from nova import flags
@@ -40,15 +43,15 @@ from nova import version
FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string',
- '(%(name)s %(nova_version)s): %(levelname)s '
+ '%(asctime)s %(levelname)s %(name)s '
'[%(request_id)s %(user)s '
'%(project)s] %(message)s',
- 'format string to use for log messages')
+ 'format string to use for log messages with context')
flags.DEFINE_string('logging_default_format_string',
- '(%(name)s %(nova_version)s): %(levelname)s [N/A] '
+ '%(asctime)s %(levelname)s %(name)s [-] '
'%(message)s',
- 'format string to use for log messages')
+ 'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix',
'from %(processName)s (pid=%(process)d) %(funcName)s'
@@ -91,7 +94,7 @@ critical = logging.critical
log = logging.log
# handlers
StreamHandler = logging.StreamHandler
-FileHandler = logging.FileHandler
+WatchedFileHandler = logging.handlers.WatchedFileHandler
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
SysLogHandler = logging.handlers.SysLogHandler
@@ -110,6 +113,18 @@ def _dictify_context(context):
return context
+def _get_binary_name():
+ return os.path.basename(inspect.stack()[-1][1])
+
+
+def get_log_file_path(binary=None):
+ if FLAGS.logfile:
+ return FLAGS.logfile
+ if FLAGS.logdir:
+ binary = binary or _get_binary_name()
+ return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
+
+
def basicConfig():
logging.basicConfig()
for handler in logging.root.handlers:
@@ -122,8 +137,9 @@ def basicConfig():
syslog = SysLogHandler(address='/dev/log')
syslog.setFormatter(_formatter)
logging.root.addHandler(syslog)
- if FLAGS.logfile:
- logfile = FileHandler(FLAGS.logfile)
+ logpath = get_log_file_path()
+ if logpath:
+ logfile = WatchedFileHandler(logpath)
logfile.setFormatter(_formatter)
logging.root.addHandler(logfile)
@@ -191,6 +207,12 @@ class NovaLogger(logging.Logger):
kwargs.pop('exc_info')
self.error(message, **kwargs)
+
+def handle_exception(type, value, tb):
+ logging.root.critical(str(value), exc_info=(type, value, tb))
+
+
+sys.excepthook = handle_exception
logging.setLoggerClass(NovaLogger)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index d29e17603..535ce87bc 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -20,6 +20,7 @@ Implements vlans, bridges, and iptables rules using linux utilities.
import os
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
@@ -37,10 +38,13 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
+flags.DEFINE_string('dhcp_domain',
+ 'novalocal',
+ 'domain to use for building the hostnames')
flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files')
-flags.DEFINE_string('public_interface', 'vlan1',
+flags.DEFINE_string('public_interface', 'eth0',
'Interface for public IP addresses')
flags.DEFINE_string('vlan_interface', 'eth0',
'network device for vlans')
@@ -50,6 +54,8 @@ flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False,
'use the nova_ routing chains instead of default')
+flags.DEFINE_string('input_chain', 'INPUT',
+ 'chain to add nova_input to')
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
@@ -152,6 +158,8 @@ def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule"""
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
+ _confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
+ % (floating_ip, fixed_ip))
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
% (fixed_ip, floating_ip))
@@ -160,6 +168,8 @@ def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip"""
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
+ _remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
+ % (floating_ip, fixed_ip))
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
% (fixed_ip, floating_ip))
@@ -177,32 +187,77 @@ def ensure_vlan(vlan_num):
LOG.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
- _execute("sudo ifconfig %s up" % interface)
+ _execute("sudo ip link set %s up" % interface)
return interface
def ensure_bridge(bridge, interface, net_attrs=None):
- """Create a bridge unless it already exists"""
+ """Create a bridge unless it already exists.
+
+ :param interface: the interface to create the bridge on.
+ :param net_attrs: dictionary with attributes used to create the bridge.
+
+ If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
+ using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
+ the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
+
+ The code will attempt to move any ips that already exist on the interface
+ onto the bridge and reset the default gateway if necessary.
+ """
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge)
- if interface:
- _execute("sudo brctl addif %s %s" % (bridge, interface))
+ _execute("sudo ip link set %s up" % bridge)
if net_attrs:
- _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
- (bridge,
- net_attrs['gateway'],
- net_attrs['broadcast'],
- net_attrs['netmask']))
+ # NOTE(vish): The ip for dnsmasq has to be the first address on the
+ # bridge for it to respond to reqests properly
+ suffix = net_attrs['cidr'].rpartition('/')[2]
+ out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
+ (net_attrs['gateway'],
+ suffix,
+ net_attrs['broadcast'],
+ bridge),
+ check_exit_code=False)
+ if err and err != "RTNETLINK answers: File exists\n":
+ raise exception.Error("Failed to add ip: %s" % err)
if(FLAGS.use_ipv6):
- _execute("sudo ifconfig %s add %s up" % \
- (bridge,
- net_attrs['cidr_v6']))
- else:
- _execute("sudo ifconfig %s up" % bridge)
+ _execute("sudo ip -f inet6 addr change %s dev %s" %
+ (net_attrs['cidr_v6'], bridge))
+ # NOTE(vish): If the public interface is the same as the
+ # bridge, then the bridge has to be in promiscuous
+ # to forward packets properly.
+ if(FLAGS.public_interface == bridge):
+ _execute("sudo ip link set dev %s promisc on" % bridge)
+ if interface:
+ # NOTE(vish): This will break if there is already an ip on the
+ # interface, so we move any ips to the bridge
+ gateway = None
+ out, err = _execute("sudo route -n")
+ for line in out.split("\n"):
+ fields = line.split()
+ if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
+ gateway = fields[1]
+ out, err = _execute("sudo ip addr show dev %s scope global" %
+ interface)
+ for line in out.split("\n"):
+ fields = line.split()
+ if fields and fields[0] == "inet":
+ params = ' '.join(fields[1:-1])
+ _execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
+ _execute("sudo ip addr add %s dev %s" % (params, bridge))
+ if gateway:
+ _execute("sudo route add 0.0.0.0 gw %s" % gateway)
+ out, err = _execute("sudo brctl addif %s %s" %
+ (bridge, interface),
+ check_exit_code=False)
+
+ if (err and err != "device %s is already a member of a bridge; can't "
+ "enslave it to bridge %s.\n" % (interface, bridge)):
+ raise exception.Error("Failed to add interface: %s" % err)
+
if FLAGS.use_nova_chains:
(out, err) = _execute("sudo iptables -N nova_forward",
check_exit_code=False)
@@ -298,10 +353,9 @@ interface %s
% pid, check_exit_code=False)
if conffile in out:
try:
- _execute('sudo kill -HUP %d' % pid)
- return
+ _execute('sudo kill %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
- LOG.debug(_("Hupping radvd threw %s"), exc)
+ LOG.debug(_("killing radvd threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
command = _ra_cmd(network_ref)
@@ -314,8 +368,9 @@ interface %s
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address"""
instance_ref = fixed_ip_ref['instance']
- return "%s,%s.novalocal,%s" % (instance_ref['mac_address'],
+ return "%s,%s.%s,%s" % (instance_ref['mac_address'],
instance_ref['hostname'],
+ FLAGS.dhcp_domain,
fixed_ip_ref['address'])
@@ -330,7 +385,8 @@ def _execute(cmd, *args, **kwargs):
def _device_exists(device):
"""Check if ethernet device exists"""
- (_out, err) = _execute("ifconfig %s" % device, check_exit_code=False)
+ (_out, err) = _execute("ip link show dev %s" % device,
+ check_exit_code=False)
return not err
@@ -360,6 +416,7 @@ def _dnsmasq_cmd(net):
' --strict-order',
' --bind-interfaces',
' --conf-file=',
+ ' --domain=%s' % FLAGS.dhcp_domain,
' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
' --listen-address=%s' % net['gateway'],
' --except-interface=lo',
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 61de8055a..c6eba225e 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -83,7 +83,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
-flags.DEFINE_integer('cnt_vpn_clients', 5,
+flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
'Driver to use for network creation')
@@ -110,6 +110,7 @@ class NetworkManager(manager.Manager):
This class must be subclassed to support specific topologies.
"""
+ timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
@@ -118,6 +119,10 @@ class NetworkManager(manager.Manager):
super(NetworkManager, self).__init__(*args, **kwargs)
def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ self.driver.init_host()
# Set up networking for the projects for which we're already
# the designated network host.
ctxt = context.get_admin_context()
@@ -134,6 +139,19 @@ class NetworkManager(manager.Manager):
self.driver.ensure_floating_forward(floating_ip['address'],
fixed_address)
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+ super(NetworkManager, self).periodic_tasks(context)
+ if self.timeout_fixed_ips:
+ now = utils.utcnow()
+ timeout = FLAGS.fixed_ip_disassociate_timeout
+ time = now - datetime.timedelta(seconds=timeout)
+ num = self.db.fixed_ip_disassociate_all_by_timeout(context,
+ self.host,
+ time)
+ if num:
+ LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
LOG.debug(_("setting network host"), context=context)
@@ -198,8 +216,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s leased that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
- (address, instance_ref['mac_address'], mac))
+ inst_addr = instance_ref['mac_address']
+ raise exception.Error(_("IP %(address)s leased to bad"
+ " mac %(inst_addr)s vs %(mac)s") % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@@ -211,15 +230,16 @@ class NetworkManager(manager.Manager):
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- LOG.debug("Releasing IP %s", address, context=context)
+ LOG.debug(_("Releasing IP %s"), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
raise exception.Error(_("IP %s released that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error(_("IP %s released from bad mac %s vs %s") %
- (address, instance_ref['mac_address'], mac))
+ inst_addr = instance_ref['mac_address']
+ raise exception.Error(_("IP %(address)s released from"
+ " bad mac %(inst_addr)s vs %(mac)s") % locals())
if not fixed_ip_ref['leased']:
LOG.warn(_("IP %s released that was not leased"), address,
context=context)
@@ -300,6 +320,7 @@ class FlatManager(NetworkManager):
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
+ timeout_fixed_ips = False
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
@@ -325,11 +346,12 @@ class FlatManager(NetworkManager):
pass
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, *args, **kwargs):
+ cidr_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
significant_bits_v6 = 64
+ count = 1
for index in range(num_networks):
start = index * network_size
significant_bits = 32 - int(math.log(network_size, 2))
@@ -342,6 +364,11 @@ class FlatManager(NetworkManager):
net['gateway'] = str(project_net[1])
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
+ if num_networks > 1:
+ net['label'] = "%s_%d" % (label, count)
+ else:
+ net['label'] = label
+ count += 1
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
@@ -425,6 +452,10 @@ class FlatDHCPManager(FlatManager):
self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.flat_interface,
network_ref)
+ if not FLAGS.fake_network:
+ self.driver.update_dhcp(context, network_id)
+ if(FLAGS.use_ipv6):
+ self.driver.update_ra(context, network_id)
class VlanManager(NetworkManager):
@@ -441,25 +472,12 @@ class VlanManager(NetworkManager):
instances in its subnet.
"""
- def periodic_tasks(self, context=None):
- """Tasks to be run at a periodic interval."""
- super(VlanManager, self).periodic_tasks(context)
- now = datetime.datetime.utcnow()
- timeout = FLAGS.fixed_ip_disassociate_timeout
- time = now - datetime.timedelta(seconds=timeout)
- num = self.db.fixed_ip_disassociate_all_by_timeout(context,
- self.host,
- time)
- if num:
- LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
-
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
super(VlanManager, self).init_host()
self.driver.metadata_forward()
- self.driver.init_host()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
@@ -494,8 +512,14 @@ class VlanManager(NetworkManager):
network_ref['bridge'])
def create_networks(self, context, cidr, num_networks, network_size,
- vlan_start, vpn_start, cidr_v6):
+ cidr_v6, vlan_start, vpn_start, **kwargs):
"""Create networks based on parameters."""
+ # Check that num_networks + vlan_start is not > 4094, fixes lp708025
+ if num_networks + vlan_start > 4094:
+ raise ValueError(_('The sum between the number of networks and'
+ ' the vlan start cannot be greater'
+ ' than 4094'))
+
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index 82767e52f..b213e18e8 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -107,7 +107,7 @@ class Bucket(object):
def is_authorized(self, context):
try:
- return context.user.is_admin() or \
+ return context.is_admin or \
self.owner_id == context.project_id
except Exception, e:
return False
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index bc26fd3c5..05ddace4b 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -180,7 +180,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
LOG.debug(_('List of buckets requested'), context=request.context)
- buckets = [b for b in bucket.Bucket.all() \
+ buckets = [b for b in bucket.Bucket.all()
if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
@@ -268,12 +268,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
+ bname = self.bucket.name
+ nm = self.name
+ LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to get object %s from bucket "
- "%s"), self.name, self.bucket.name,
- context=request.context)
+ LOG.audit(_("Unauthorized attempt to get object %(nm)s"
+ " from bucket %(bname)s") % locals(),
+ context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@@ -289,12 +291,13 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
+ nm = self.name
+ bname = self.bucket.name
+ LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
- "%s"),
- self.name, self.bucket.name, context=request.context)
+ LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
+ " bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@@ -310,14 +313,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
-
- LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
+ nm = self.name
+ bname = self.bucket.name
+ LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
context=request.context)
if not self.bucket.is_authorized(request.context):
- LOG.audit("Unauthorized attempt to delete object %s from "
- "bucket %s", self.name, self.bucket.name,
- context=request.context)
+ LOG.audit(_("Unauthorized attempt to delete object %(nm)s from "
+ "bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
@@ -388,10 +391,10 @@ class ImagesResource(resource.Resource):
image_location = get_argument(request, 'image_location', u'')
image_path = os.path.join(FLAGS.images_path, image_id)
- if not image_path.startswith(FLAGS.images_path) or \
- os.path.exists(image_path):
+ if ((not image_path.startswith(FLAGS.images_path)) or
+ os.path.exists(image_path)):
LOG.audit(_("Not authorized to upload image: invalid directory "
- "%s"),
+ "%s"),
image_path, context=request.context)
raise exception.NotAuthorized()
@@ -425,8 +428,8 @@ class ImagesResource(resource.Resource):
if operation:
# operation implies publicity toggle
newstatus = (operation == 'add')
- LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
- newstatus, context=request.context)
+ LOG.audit(_("Toggling publicity flag of image %(image_id)s"
+ " %(newstatus)r") % locals(), context=request.context)
image_object.set_public(newstatus)
else:
# other attributes imply update
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index abc28182e..27227e2ca 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -69,7 +69,7 @@ class Image(object):
# but only modified by admin or owner.
try:
return (self.metadata['isPublic'] and readonly) or \
- context.user.is_admin() or \
+ context.is_admin or \
self.metadata['imageOwnerId'] == context.project_id
except:
return False
@@ -259,22 +259,25 @@ class Image(object):
process_input=encrypted_key,
check_exit_code=False)
if err:
- raise exception.Error("Failed to decrypt private key: %s" % err)
+ raise exception.Error(_("Failed to decrypt private key: %s")
+ % err)
iv, err = utils.execute(
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
- raise exception.Error("Failed to decrypt initialization "
- "vector: %s" % err)
+ raise exception.Error(_("Failed to decrypt initialization "
+ "vector: %s") % err)
_out, err = utils.execute(
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
% (encrypted_filename, key, iv, decrypted_filename),
check_exit_code=False)
if err:
- raise exception.Error("Failed to decrypt image file %s : %s" %
- (encrypted_filename, err))
+ raise exception.Error(_("Failed to decrypt image file "
+ "%(image_file)s: %(err)s") %
+ {'image_file': encrypted_filename,
+ 'err': err})
@staticmethod
def untarzip_image(path, filename):
diff --git a/nova/rpc.py b/nova/rpc.py
index 49b11602b..205bb524a 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -29,6 +29,7 @@ import uuid
from carrot import connection as carrot_connection
from carrot import messaging
+from eventlet import greenpool
from eventlet import greenthread
from nova import context
@@ -42,11 +43,13 @@ from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.rpc')
+flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
+
class Connection(carrot_connection.BrokerConnection):
"""Connection instance object"""
@classmethod
- def instance(cls, new=False):
+ def instance(cls, new=True):
"""Returns the instance"""
if new or not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
@@ -89,15 +92,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- LOG.exception(_("AMQP server on %s:%d is unreachable."
- " Trying again in %d seconds.") % (
- FLAGS.rabbit_host,
- FLAGS.rabbit_port,
- FLAGS.rabbit_retry_interval))
+ fl_host = FLAGS.rabbit_host
+ fl_port = FLAGS.rabbit_port
+ fl_intv = FLAGS.rabbit_retry_interval
+ LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable. Trying again in %(fl_intv)d seconds.")
+ % locals())
self.failed_connection = True
if self.failed_connection:
LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
+ "after %d tries. Shutting down."),
FLAGS.rabbit_max_retries)
sys.exit(1)
@@ -152,13 +156,17 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
- LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
+ LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy
+ self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
+ def receive(self, *args, **kwargs):
+ self.pool.spawn_n(self._receive, *args, **kwargs)
+
@exception.wrap_exception
- def receive(self, message_data, message):
+ def _receive(self, message_data, message):
"""Magically looks for a method on the proxy object and calls it
Message data should be a dictionary with two keys:
@@ -167,7 +175,7 @@ class AdapterConsumer(TopicConsumer):
Example: {'method': 'echo', 'args': {'value': 42}}
"""
- LOG.debug(_('received %s') % (message_data))
+ LOG.debug(_('received %s') % message_data)
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
@@ -180,7 +188,7 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
- LOG.warn(_('no method for message: %s') % (message_data))
+ LOG.warn(_('no method for message: %s') % message_data)
msg_reply(msg_id, _('No method for message: %s') % message_data)
return
@@ -245,7 +253,7 @@ def msg_reply(msg_id, reply=None, failure=None):
LOG.error(_("Returning exception %s to caller"), message)
LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
- conn = Connection.instance(True)
+ conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
try:
publisher.send({'result': reply, 'failure': failure})
@@ -318,7 +326,7 @@ def call(context, topic, msg):
self.result = data['result']
wait_msg = WaitMessage()
- conn = Connection.instance(True)
+ conn = Connection.instance()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
consumer.register_callback(wait_msg)
@@ -343,7 +351,7 @@ def call(context, topic, msg):
def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response"""
- LOG.debug("Making asynchronous cast...")
+ LOG.debug(_("Making asynchronous cast..."))
_pack_context(msg, context)
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a4d6dd574..e9b47512e 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -66,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
+ LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 47baf0d73..0191ceb3d 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -43,12 +43,14 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
- if instance_ref['availability_zone'] and context.is_admin:
+ if (instance_ref['availability_zone']
+ and ':' in instance_ref['availability_zone']
+ and context.is_admin):
zone, _x, host = instance_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-compute')
if not self.service_is_up(service):
- raise driver.WillNotSchedule("Host %s is not alive" % host)
+ raise driver.WillNotSchedule(_("Host %s is not alive") % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
@@ -75,12 +77,14 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
- if (':' in volume_ref['availability_zone']) and context.is_admin:
+ if (volume_ref['availability_zone']
+ and ':' in volume_ref['availability_zone']
+ and context.is_admin):
zone, _x, host = volume_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
- raise driver.WillNotSchedule("Host %s not available" % host)
+ raise driver.WillNotSchedule(_("Host %s not available") % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
diff --git a/nova/service.py b/nova/service.py
index efc08fd63..59648adf2 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -38,6 +38,7 @@ from nova import log as logging
from nova import flags
from nova import rpc
from nova import utils
+from nova import version
FLAGS = flags.FLAGS
@@ -156,7 +157,9 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- logging.audit(_("Starting %s node"), topic)
+ vcs_string = version.version_string_with_vcs()
+ logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)")
+ % locals())
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -219,10 +222,10 @@ def serve(*services):
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
-
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
- logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
+ flag_get = FLAGS.get(flag, None)
+ logging.debug("%(flag)s : %(flag_get)s" % locals())
for x in services:
x.start()
diff --git a/nova/test.py b/nova/test.py
index 881baccd5..a12cf9d32 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -69,9 +69,10 @@ class TestCase(unittest.TestCase):
network_manager.VlanManager().create_networks(ctxt,
FLAGS.fixed_range,
5, 16,
+ FLAGS.fixed_range_v6,
FLAGS.vlan_start,
FLAGS.vpn_start,
- FLAGS.fixed_range_v6)
+ )
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index 14eaaa62c..77b1dd37f 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
self.assertEqual(middleware.limiter.__class__.__name__, "Limiter")
middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar')
self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy")
-
-
-class LimiterTest(unittest.TestCase):
-
- def test_limiter(self):
- items = range(2000)
- req = Request.blank('/')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?offset=0')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?offset=3')
- self.assertEqual(limited(items, req), items[3:1003])
- req = Request.blank('/?offset=2005')
- self.assertEqual(limited(items, req), [])
- req = Request.blank('/?limit=10')
- self.assertEqual(limited(items, req), items[:10])
- req = Request.blank('/?limit=0')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?limit=3000')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?offset=1&limit=3')
- self.assertEqual(limited(items, req), items[1:4])
- req = Request.blank('/?offset=3&limit=0')
- self.assertEqual(limited(items, req), items[3:1003])
- req = Request.blank('/?offset=3&limit=1500')
- self.assertEqual(limited(items, req), items[3:1003])
- req = Request.blank('/?offset=3000&limit=10')
- self.assertEqual(limited(items, req), [])
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
new file mode 100644
index 000000000..9d9837cc9
--- /dev/null
+++ b/nova/tests/api/openstack/test_common.py
@@ -0,0 +1,161 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suites for 'common' code used throughout the OpenStack HTTP API.
+"""
+
+import unittest
+
+from webob import Request
+
+from nova.api.openstack.common import limited
+
+
+class LimiterTest(unittest.TestCase):
+ """
+ Unit tests for the `nova.api.openstack.common.limited` method which takes
+ in a list of items and, depending on the 'offset' and 'limit' GET params,
+ returns a subset or complete set of the given items.
+ """
+
+ def setUp(self):
+ """
+ Run before each test.
+ """
+ self.tiny = range(1)
+ self.small = range(10)
+ self.medium = range(1000)
+ self.large = range(10000)
+
+ def test_limiter_offset_zero(self):
+ """
+ Test offset key works with 0.
+ """
+ req = Request.blank('/?offset=0')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_offset_medium(self):
+ """
+ Test offset key works with a medium sized number.
+ """
+ req = Request.blank('/?offset=10')
+ self.assertEqual(limited(self.tiny, req), [])
+ self.assertEqual(limited(self.small, req), self.small[10:])
+ self.assertEqual(limited(self.medium, req), self.medium[10:])
+ self.assertEqual(limited(self.large, req), self.large[10:1010])
+
+ def test_limiter_offset_over_max(self):
+ """
+ Test offset key works with a number over 1000 (max_limit).
+ """
+ req = Request.blank('/?offset=1001')
+ self.assertEqual(limited(self.tiny, req), [])
+ self.assertEqual(limited(self.small, req), [])
+ self.assertEqual(limited(self.medium, req), [])
+ self.assertEqual(limited(self.large, req), self.large[1001:2001])
+
+ def test_limiter_offset_blank(self):
+ """
+ Test offset key works with a blank offset.
+ """
+ req = Request.blank('/?offset=')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_offset_bad(self):
+ """
+ Test offset key works with a BAD offset.
+ """
+ req = Request.blank(u'/?offset=\u0020aa')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_nothing(self):
+ """
+ Test request with no offset or limit
+ """
+ req = Request.blank('/')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_zero(self):
+ """
+ Test limit of zero.
+ """
+ req = Request.blank('/?limit=0')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_medium(self):
+ """
+ Test limit of 10.
+ """
+ req = Request.blank('/?limit=10')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium[:10])
+ self.assertEqual(limited(self.large, req), self.large[:10])
+
+ def test_limiter_limit_over_max(self):
+ """
+ Test limit of 3000.
+ """
+ req = Request.blank('/?limit=3000')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_and_offset(self):
+ """
+ Test request with both limit and offset.
+ """
+ items = range(2000)
+ req = Request.blank('/?offset=1&limit=3')
+ self.assertEqual(limited(items, req), items[1:4])
+ req = Request.blank('/?offset=3&limit=0')
+ self.assertEqual(limited(items, req), items[3:1003])
+ req = Request.blank('/?offset=3&limit=1500')
+ self.assertEqual(limited(items, req), items[3:1003])
+ req = Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(limited(items, req), [])
+
+ def test_limiter_custom_max_limit(self):
+ """
+ Test a max_limit other than 1000.
+ """
+ items = range(2000)
+ req = Request.blank('/?offset=1&limit=3')
+ self.assertEqual(limited(items, req, max_limit=2000), items[1:4])
+ req = Request.blank('/?offset=3&limit=0')
+ self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ req = Request.blank('/?offset=3&limit=2500')
+ self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ req = Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(limited(items, req, max_limit=2000), [])
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 5d9ddefbe..8ab4d7569 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -143,6 +143,7 @@ class LocalImageServiceTest(unittest.TestCase,
def tearDown(self):
self.service.delete_all()
+ self.service.delete_imagedir()
self.stubs.UnsetAll()
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 29883e7c8..a7be0796e 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import json
import unittest
@@ -39,6 +40,13 @@ def return_server(context, id):
return stub_instance(id)
+def return_server_with_addresses(private, public):
+ def _return_server(context, id):
+ return stub_instance(id, private_address=private,
+ public_addresses=public)
+ return _return_server
+
+
def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
@@ -55,9 +63,45 @@ def instance_address(context, instance_id):
return None
-def stub_instance(id, user_id=1):
- return Instance(id=id, state=0, image_id=10, user_id=user_id,
- display_name='server%s' % id)
+def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
+ if public_addresses == None:
+ public_addresses = list()
+
+ instance = {
+ "id": id,
+ "admin_pass": "",
+ "user_id": user_id,
+ "project_id": "",
+ "image_id": 10,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": "",
+ "key_data": "",
+ "state": 0,
+ "state_description": "",
+ "memory_mb": 0,
+ "vcpus": 0,
+ "local_gb": 0,
+ "hostname": "",
+ "host": "",
+ "instance_type": "",
+ "user_data": "",
+ "reservation_id": "",
+ "mac_address": "",
+ "scheduled_at": datetime.datetime.now(),
+ "launched_at": datetime.datetime.now(),
+ "terminated_at": datetime.datetime.now(),
+ "availability_zone": "",
+ "display_name": "server%s" % id,
+ "display_description": "",
+ "locked": False}
+
+ instance["fixed_ip"] = {
+ "address": private_address,
+ "floating_ips": [{"address":ip} for ip in public_addresses]}
+
+ return instance
def fake_compute_api(cls, req, id):
@@ -76,7 +120,7 @@ class ServersTest(unittest.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
@@ -105,6 +149,22 @@ class ServersTest(unittest.TestCase):
self.assertEqual(res_dict['server']['id'], '1')
self.assertEqual(res_dict['server']['name'], 'server1')
+ def test_get_server_by_id_with_addresses(self):
+ private = "192.168.0.3"
+ public = ["1.2.3.4"]
+ new_return_server = return_server_with_addresses(private, public)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+ req = webob.Request.blank('/v1.0/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], '1')
+ self.assertEqual(res_dict['server']['name'], 'server1')
+ addresses = res_dict['server']['addresses']
+ self.assertEqual(len(addresses["public"]), len(public))
+ self.assertEqual(addresses["public"][0], public[0])
+ self.assertEqual(len(addresses["private"]), 1)
+ self.assertEqual(addresses["private"][0], private)
+
def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers')
res = req.get_response(fakes.wsgi_app())
@@ -281,6 +341,18 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ def test_server_reset_network(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/reset_network')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
def test_server_diagnostics(self):
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
req.method = "GET"
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
new file mode 100644
index 000000000..df497ef1b
--- /dev/null
+++ b/nova/tests/api/openstack/test_zones.py
@@ -0,0 +1,140 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+import stubout
+import webob
+import json
+
+import nova.db
+from nova import context
+from nova import flags
+from nova.api.openstack import zones
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def zone_get(context, zone_id):
+ return dict(id=1, api_url='http://foo.com', username='bob',
+ password='xxx')
+
+
+def zone_create(context, values):
+ zone = dict(id=1)
+ zone.update(values)
+ return zone
+
+
+def zone_update(context, zone_id, values):
+ zone = dict(id=zone_id, api_url='http://foo.com', username='bob',
+ password='xxx')
+ zone.update(values)
+ return zone
+
+
+def zone_delete(context, zone_id):
+ pass
+
+
+def zone_get_all(context):
+ return [
+ dict(id=1, api_url='http://foo.com', username='bob',
+ password='xxx'),
+ dict(id=2, api_url='http://blah.com', username='alice',
+ password='qwerty')]
+
+
+class ZonesTest(unittest.TestCase):
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+
+ self.stubs.Set(nova.db, 'zone_get', zone_get)
+ self.stubs.Set(nova.db, 'zone_get_all', zone_get_all)
+ self.stubs.Set(nova.db, 'zone_update', zone_update)
+ self.stubs.Set(nova.db, 'zone_create', zone_create)
+ self.stubs.Set(nova.db, 'zone_delete', zone_delete)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+
+ def test_get_zone_list(self):
+ req = webob.Request.blank('/v1.0/zones')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(len(res_dict['zones']), 2)
+
+ def test_get_zone_by_id(self):
+ req = webob.Request.blank('/v1.0/zones/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['zone']['id'], 1)
+ self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
+ self.assertFalse('password' in res_dict['zone'])
+ self.assertEqual(res.status_int, 200)
+
+ def test_zone_delete(self):
+ req = webob.Request.blank('/v1.0/zones/1')
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+
+ def test_zone_create(self):
+ body = dict(zone=dict(api_url='http://blah.zoo', username='fred',
+ password='fubar'))
+ req = webob.Request.blank('/v1.0/zones')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['zone']['id'], 1)
+ self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo')
+ self.assertFalse('username' in res_dict['zone'])
+
+ def test_zone_update(self):
+ body = dict(zone=dict(username='zeb', password='sneaky'))
+ req = webob.Request.blank('/v1.0/zones/1')
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['zone']['id'], 1)
+ self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
+ self.assertFalse('username' in res_dict['zone'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/nova/tests/db/nova.austin.sqlite b/nova/tests/db/nova.austin.sqlite
new file mode 100644
index 000000000..ad1326bce
--- /dev/null
+++ b/nova/tests/db/nova.austin.sqlite
Binary files differ
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 66a16b0cb..fa27825cd 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -36,6 +36,7 @@ from nova.auth import manager
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
def __init__(self, response_string):
+ self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
@@ -66,13 +67,16 @@ class FakeHttplibConnection(object):
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
- sock = FakeHttplibSocket(resp)
- self.http_response = httplib.HTTPResponse(sock)
+ self.sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(self.sock)
self.http_response.begin()
def getresponse(self):
return self.http_response
+ def getresponsebody(self):
+ return self.sock.response_string
+
def close(self):
"""Required for compatibility with boto/tornado"""
pass
@@ -104,7 +108,7 @@ class ApiEc2TestCase(test.TestCase):
self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(),
'nova.api.ec2.cloud.CloudController'))
- def expect_http(self, host=None, is_secure=False):
+ def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
@@ -113,13 +117,31 @@ class ApiEc2TestCase(test.TestCase):
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
+ if api_version:
+ self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
- http = FakeHttplibConnection(
+ self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
- self.ec2.new_http_connection(host, is_secure).AndReturn(http)
- return http
+ self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
+ return self.http
+
+ def test_xmlns_version_matches_request_version(self):
+ self.expect_http(api_version='2010-10-30')
+ self.mox.ReplayAll()
+
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # Any request should be fine
+ self.ec2.get_all_instances()
+ self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
+ 'The version in the xmlns of the response does '
+ 'not match the API version given in the request.')
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
@@ -226,16 +248,14 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
- # I don't bother checkng that we actually find it here,
- # because the create/delete unit test further up should
- # be good enough for that.
- for group in rv:
- if group.name == security_group_name:
- self.assertEquals(len(group.rules), 1)
- self.assertEquals(int(group.rules[0].from_port), 80)
- self.assertEquals(int(group.rules[0].to_port), 81)
- self.assertEquals(len(group.rules[0].grants), 1)
- self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+
+ self.assertEquals(len(group.rules), 1)
+ self.assertEquals(int(group.rules[0].from_port), 80)
+ self.assertEquals(int(group.rules[0].to_port), 81)
+ self.assertEquals(len(group.rules[0].grants), 1)
+ self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
@@ -292,16 +312,13 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
- # I don't bother checkng that we actually find it here,
- # because the create/delete unit test further up should
- # be good enough for that.
- for group in rv:
- if group.name == security_group_name:
- self.assertEquals(len(group.rules), 1)
- self.assertEquals(int(group.rules[0].from_port), 80)
- self.assertEquals(int(group.rules[0].to_port), 81)
- self.assertEquals(len(group.rules[0].grants), 1)
- self.assertEquals(str(group.rules[0].grants[0]), '::/0')
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+ self.assertEquals(len(group.rules), 1)
+ self.assertEquals(int(group.rules[0].from_port), 80)
+ self.assertEquals(int(group.rules[0].to_port), 81)
+ self.assertEquals(len(group.rules[0].grants), 1)
+ self.assertEquals(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 771b1fcc0..445cc6e8b 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -87,6 +87,16 @@ class CloudTestCase(test.TestCase):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
+ def test_describe_regions(self):
+ """Makes sure describe regions runs without raising an exception"""
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 1)
+ regions = FLAGS.region_list
+ FLAGS.region_list = ["one=test_host1", "two=test_host2"]
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 2)
+ FLAGS.region_list = regions
+
def test_describe_addresses(self):
"""Makes sure describe addresses runs without raising an exception"""
address = "10.10.10.10"
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 09f6ee94a..b049ac943 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -49,7 +49,7 @@ class ComputeTestCase(test.TestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.get_admin_context()
+ self.context = context.RequestContext('fake', 'fake', False)
def tearDown(self):
self.manager.delete_user(self.user)
@@ -69,6 +69,13 @@ class ComputeTestCase(test.TestCase):
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
+ def _create_group(self):
+ values = {'name': 'testgroup',
+ 'description': 'testgroup',
+ 'user_id': self.user.id,
+ 'project_id': self.project.id}
+ return db.security_group_create(self.context, values)
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -82,21 +89,53 @@ class ComputeTestCase(test.TestCase):
def test_create_instance_associates_security_groups(self):
"""Make sure create associates security groups"""
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': self.user.id,
- 'project_id': self.project.id}
- group = db.security_group_create(self.context, values)
+ group = self._create_group()
ref = self.compute_api.create(
self.context,
instance_type=FLAGS.default_instance_type,
image_id=None,
- security_group=['default'])
+ security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
- self.context, ref[0]['id'])), 1)
+ self.context, ref[0]['id'])), 1)
+ group = db.security_group_get(self.context, group['id'])
+ self.assert_(len(group.instances) == 1)
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_destroy_instance_disassociates_security_groups(self):
+ """Make sure destroying disassociates security groups"""
+ group = self._create_group()
+
+ ref = self.compute_api.create(
+ self.context,
+ instance_type=FLAGS.default_instance_type,
+ image_id=None,
+ security_group=['testgroup'])
+ try:
+ db.instance_destroy(self.context, ref[0]['id'])
+ group = db.security_group_get(self.context, group['id'])
+ self.assert_(len(group.instances) == 0)
finally:
db.security_group_destroy(self.context, group['id'])
+
+ def test_destroy_security_group_disassociates_instances(self):
+ """Make sure destroying security groups disassociates instances"""
+ group = self._create_group()
+
+ ref = self.compute_api.create(
+ self.context,
+ instance_type=FLAGS.default_instance_type,
+ image_id=None,
+ security_group=['testgroup'])
+
+ try:
+ db.security_group_destroy(self.context, group['id'])
+ group = db.security_group_get(context.get_admin_context(
+ read_deleted=True), group['id'])
+ self.assert_(len(group.instances) == 0)
+ finally:
db.instance_destroy(self.context, ref[0]['id'])
def test_run_terminate(self):
@@ -163,6 +202,14 @@ class ComputeTestCase(test.TestCase):
self.compute.set_admin_password(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
+ def test_inject_file(self):
+ """Ensure we can write a file to an instance"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.inject_file(self.context, instance_id, "/tmp/test",
+ "File Contents")
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py
new file mode 100644
index 000000000..6992773f5
--- /dev/null
+++ b/nova/tests/test_localization.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import glob
+import logging
+import os
+import re
+import sys
+import unittest
+
+import nova
+
+
+class LocalizationTestCase(unittest.TestCase):
+ def test_multiple_positional_format_placeholders(self):
+ pat = re.compile("\W_\(")
+ single_pat = re.compile("\W%\W")
+ root_path = os.path.dirname(nova.__file__)
+ problems = {}
+ for root, dirs, files in os.walk(root_path):
+ for fname in files:
+ if not fname.endswith(".py"):
+ continue
+ pth = os.path.join(root, fname)
+ txt = fulltext = file(pth).read()
+ txt_lines = fulltext.splitlines()
+ if not pat.search(txt):
+ continue
+ problems[pth] = []
+ pos = txt.find("_(")
+ while pos > -1:
+ # Make sure that this isn't part of a dunder;
+ # e.g., __init__(...
+ # or something like 'self.assert_(...'
+ test_txt = txt[pos - 1: pos + 10]
+ if not (pat.search(test_txt)):
+ txt = txt[pos + 2:]
+ pos = txt.find("_(")
+ continue
+ pos += 2
+ txt = txt[pos:]
+ innerChars = []
+ # Count pairs of open/close parens until _() closing
+ # paren is found.
+ parenCount = 1
+ pos = 0
+ while parenCount > 0:
+ char = txt[pos]
+ if char == "(":
+ parenCount += 1
+ elif char == ")":
+ parenCount -= 1
+ innerChars.append(char)
+ pos += 1
+ inner_all = "".join(innerChars)
+ # Filter out '%%' and '%('
+ inner = inner_all.replace("%%", "").replace("%(", "")
+ # Filter out the single '%' operators
+ inner = single_pat.sub("", inner)
+ # Within the remaining content, count %
+ fmtCount = inner.count("%")
+ if fmtCount > 1:
+ inner_first = inner_all.splitlines()[0]
+ lns = ["%s" % (p + 1)
+ for p, t in enumerate(txt_lines)
+ if inner_first in t]
+ lnums = ", ".join(lns)
+ # Using ugly string concatenation to avoid having
+ # this test fail itself.
+ inner_all = "_" + "(" + "%s" % inner_all
+ problems[pth].append("Line: %s Text: %s" %
+ (lnums, inner_all))
+ # Look for more
+ pos = txt.find("_(")
+ if not problems[pth]:
+ del problems[pth]
+ if problems:
+ out = ["Problem(s) found in localized string formatting",
+ "(see http://www.gnu.org/software/hello/manual/"
+ "gettext/Python.html for more information)",
+ "",
+ " ------------ Files to fix ------------"]
+ for pth in problems:
+ out.append(" %s:" % pth)
+ for val in set(problems[pth]):
+ out.append(" %s" % val)
+ raise AssertionError("\n".join(out))
diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py
index 868a5ead3..c2c9d7772 100644
--- a/nova/tests/test_log.py
+++ b/nova/tests/test_log.py
@@ -46,6 +46,27 @@ class RootLoggerTestCase(test.TestCase):
self.assert_(True) # didn't raise exception
+class LogHandlerTestCase(test.TestCase):
+ def test_log_path_logdir(self):
+ self.flags(logdir='/some/path')
+ self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ '/some/path/foo-bar.log')
+
+ def test_log_path_logfile(self):
+ self.flags(logfile='/some/path/foo-bar.log')
+ self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ '/some/path/foo-bar.log')
+
+ def test_log_path_none(self):
+ self.assertTrue(log.get_log_file_path(binary='foo-bar') is None)
+
+ def test_log_path_logfile_overrides_logdir(self):
+ self.flags(logdir='/some/other/path',
+ logfile='/some/path/foo-bar.log')
+ self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ '/some/path/foo-bar.log')
+
+
class NovaFormatterTestCase(test.TestCase):
def setUp(self):
super(NovaFormatterTestCase, self).setUp()
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 85593ab46..4820e04fb 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -86,7 +86,8 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
- LOG.debug(_("Nested received %s, %s"), queue, value)
+ LOG.debug(_("Nested received %(queue)s, %(value)s")
+ % locals())
ret = rpc.call(context,
queue,
{"method": "echo",
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 556fe561c..6e5a0114b 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -221,19 +221,18 @@ class IptablesFirewallTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
- self.fw = libvirt_conn.IptablesFirewallDriver()
+
+ class FakeLibvirtConnection(object):
+ pass
+ self.fake_libvirt_connection = FakeLibvirtConnection()
+ self.fw = libvirt_conn.IptablesFirewallDriver(
+ get_connection=lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
- def _p(self, *args, **kwargs):
- if 'iptables-restore' in args:
- print ' '.join(args), kwargs['stdin']
- if 'iptables-save' in args:
- return
-
in_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
@@ -255,11 +254,21 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Mon Dec 6 11:54:13 2010',
]
+ in6_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
def test_static_filters(self):
- self.fw.execute = self._p
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
- 'project_id': 'fake'})
+ 'project_id': 'fake',
+ 'mac_address': '56:12:12:12:12:12'})
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
@@ -304,18 +313,31 @@ class IptablesFirewallTestCase(test.TestCase):
secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
- self.fw.add_instance(instance_ref)
-
- out_rules = self.fw.modify_rules(self.in_rules)
+# self.fw.add_instance(instance_ref)
+ def fake_iptables_execute(cmd, process_input=None):
+ if cmd == 'sudo ip6tables-save -t filter':
+ return '\n'.join(self.in6_rules), None
+ if cmd == 'sudo iptables-save -t filter':
+ return '\n'.join(self.in_rules), None
+ if cmd == 'sudo iptables-restore':
+ self.out_rules = process_input.split('\n')
+ return '', ''
+ if cmd == 'sudo ip6tables-restore':
+ self.out6_rules = process_input.split('\n')
+ return '', ''
+ self.fw.execute = fake_iptables_execute
+
+ self.fw.prepare_instance_filter(instance_ref)
+ self.fw.apply_instance_filter(instance_ref)
in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
for rule in in_rules:
if not 'nova' in rule:
- self.assertTrue(rule in out_rules,
+ self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
- for rule in out_rules:
+ for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-d 10.11.12.13 -j' in rule:
instance_chain = rule.split(' ')[-1]
@@ -323,7 +345,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
- for rule in out_rules:
+ for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
@@ -332,16 +354,16 @@ class IptablesFirewallTestCase(test.TestCase):
"The security group chain wasn't added")
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
- security_group_chain in out_rules,
+ security_group_chain in self.out_rules,
"ICMP acceptance rule wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type'
- ' 8 -j ACCEPT' % security_group_chain in out_rules,
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
+ '8 -j ACCEPT' % security_group_chain in self.out_rules,
"ICMP Echo Request acceptance rule wasn't added")
self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
'--dports 80:81 -j ACCEPT' % security_group_chain \
- in out_rules,
+ in self.out_rules,
"TCP port 80/81 acceptance rule wasn't added")
@@ -457,6 +479,19 @@ class NWFilterTestCase(test.TestCase):
'project_id': 'fake'})
inst_id = instance_ref['id']
+ ip = '10.11.12.13'
+
+ network_ref = db.project_get_network(self.context,
+ 'fake')
+
+ fixed_ip = {'address': ip,
+ 'network_id': network_ref['id']}
+
+ admin_ctxt = context.get_admin_context()
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': instance_ref['id']})
+
def _ensure_all_called():
instance_filter = 'nova-instance-%s' % instance_ref['name']
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
@@ -476,5 +511,6 @@ class NWFilterTestCase(test.TestCase):
self.fw.setup_basic_filtering(instance)
self.fw.prepare_instance_filter(instance)
+ self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 9f5b266f3..6b8efc9d8 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -32,6 +32,7 @@ from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi.vmops import SimpleDH
+from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
@@ -141,6 +142,10 @@ class XenAPIVolumeTestCase(test.TestCase):
self.stubs.UnsetAll()
+def reset_network(*args):
+ pass
+
+
class XenAPIVMTestCase(test.TestCase):
"""
Unit tests for VM operations
@@ -162,6 +167,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
+ self.stubs.Set(VMOps, 'reset_network', reset_network)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
self.conn = xenapi_conn.get_connection(False)
@@ -243,7 +249,8 @@ class XenAPIVMTestCase(test.TestCase):
# Check that the VM is running according to XenAPI.
self.assertEquals(vm['power_state'], 'Running')
- def _test_spawn(self, image_id, kernel_id, ramdisk_id):
+ def _test_spawn(self, image_id, kernel_id, ramdisk_id,
+ instance_type="m1.large"):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1,
'id': 1,
@@ -252,7 +259,7 @@ class XenAPIVMTestCase(test.TestCase):
'image_id': image_id,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
- 'instance_type': 'm1.large',
+ 'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
conn = xenapi_conn.get_connection(False)
@@ -260,6 +267,12 @@ class XenAPIVMTestCase(test.TestCase):
conn.spawn(instance)
self.check_vm_record(conn)
+ def test_spawn_not_enough_memory(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self.assertRaises(Exception,
+ self._test_spawn,
+ 1, 2, 3, "m1.xlarge")
+
def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None)
diff --git a/nova/twistd.py b/nova/twistd.py
index 556271999..60ff7879a 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -43,8 +43,6 @@ else:
FLAGS = flags.FLAGS
-flags.DEFINE_string('logdir', None, 'directory to keep log files in '
- '(will be prepended to $logfile)')
class TwistdServerOptions(ServerOptions):
@@ -156,7 +154,7 @@ def WrapTwistedOptions(wrapped):
try:
self.parseArgs(*argv)
except TypeError:
- raise usage.UsageError("Wrong number of arguments.")
+ raise usage.UsageError(_("Wrong number of arguments."))
self.postOptions()
return args
@@ -220,7 +218,7 @@ def stop(pidfile):
time.sleep(0.1)
except OSError, err:
err = str(err)
- if err.find("No such process") > 0:
+ if err.find(_("No such process")) > 0:
if os.path.exists(pidfile):
os.remove(pidfile)
else:
diff --git a/nova/utils.py b/nova/utils.py
index 6d3ddd092..42efa0008 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -20,13 +20,14 @@
System-level utilities and helper functions.
"""
+import base64
import datetime
import inspect
import json
import os
import random
-import subprocess
import socket
+import string
import struct
import sys
import time
@@ -36,6 +37,7 @@ import netaddr
from eventlet import event
from eventlet import greenthread
+from eventlet.green import subprocess
from nova import exception
from nova.exception import ProcessExecutionError
@@ -138,7 +140,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- LOG.debug(_("Result was %s") % (obj.returncode))
+ LOG.debug(_("Result was %s") % obj.returncode)
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -152,6 +154,42 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
return result
+def ssh_execute(ssh, cmd, process_input=None,
+ addl_env=None, check_exit_code=True):
+ LOG.debug(_("Running cmd (SSH): %s"), cmd)
+ if addl_env:
+ raise exception.Error("Environment not supported over SSH")
+
+ if process_input:
+ # This is (probably) fixable if we need it...
+ raise exception.Error("process_input not supported over SSH")
+
+ stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
+ channel = stdout_stream.channel
+
+ #stdin.write('process_input would go here')
+ #stdin.flush()
+
+ # NOTE(justinsb): This seems suspicious...
+ # ...other SSH clients have buffering issues with this approach
+ stdout = stdout_stream.read()
+ stderr = stderr_stream.read()
+ stdin_stream.close()
+
+ exit_status = channel.recv_exit_status()
+
+ # exit_status == -1 if no exit code was returned
+ if exit_status != -1:
+ LOG.debug(_("Result was %s") % exit_status)
+ if check_exit_code and exit_status != 0:
+ raise exception.ProcessExecutionError(exit_code=exit_status,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=cmd)
+
+ return (stdout, stderr)
+
+
def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
@@ -199,6 +237,15 @@ def generate_mac():
return ':'.join(map(lambda x: "%02x" % x, mac))
+def generate_password(length=20):
+ """Generate a random sequence of letters and digits
+ to be used as a password. Note that this is not intended
+ to represent the ultimate in security.
+ """
+ chrs = string.letters + string.digits
+ return "".join([random.choice(chrs) for i in xrange(length)])
+
+
def last_octet(address):
return int(address.split(".")[-1])
@@ -206,19 +253,17 @@ def last_octet(address):
def get_my_linklocal(interface):
try:
if_str = execute("ip -f inet6 -o addr show %s" % interface)
- condition = "\s+inet6\s+([0-9a-f:]+/\d+)\s+scope\s+link"
+ condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
- return 'fe00::'
- except IndexError as ex:
- LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
- except ProcessExecutionError as ex:
- LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
- except:
- return 'fe00::'
+ raise exception.Error(_("Link Local address is not found.:%s")
+ % if_str)
+ except Exception as ex:
+ raise exception.Error(_("Couldn't get Link Local IP of %(interface)s"
+ " :%(ex)s") % locals())
def to_global_ipv6(prefix, mac):
@@ -442,3 +487,15 @@ def dumps(value):
def loads(s):
return json.loads(s)
+
+
+def ensure_b64_encoding(val):
+ """Safety method to ensure that values expected to be base64-encoded
+ actually are. If they are, the value is returned unchanged. Otherwise,
+ the encoded value is returned.
+ """
+ try:
+ dummy = base64.decode(val)
+ return val
+ except TypeError:
+ return base64.b64encode(val)
diff --git a/nova/version.py b/nova/version.py
index 7b27acb6a..c3ecc2245 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -21,7 +21,7 @@ except ImportError:
'revision_id': 'LOCALREVISION',
'revno': 0}
-NOVA_VERSION = ['2011', '1']
+NOVA_VERSION = ['2011', '2']
YEAR, COUNT = NOVA_VERSION
FINAL = False # This becomes true at Release Candidate time
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f8b3c7807..92749f38a 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -76,9 +76,10 @@ class FakeConnection(object):
cls._instance = cls()
return cls._instance
- def init_host(self):
+ def init_host(self, host):
"""
- Initialize anything that is necessary for the driver to function
+ Initialize anything that is necessary for the driver to function,
+ including catching up with currently running VM's on the given host.
"""
return
@@ -151,6 +152,21 @@ class FakeConnection(object):
"""
pass
+ def inject_file(self, instance, b64_path, b64_contents):
+ """
+ Writes a file on the specified instance.
+
+ The first parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name. The second
+ parameter is the base64-encoded path to which the file is to be
+ written on the instance; the third is the contents of the file, also
+ base64-encoded.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+ """
+ pass
+
def rescue(self, instance):
"""
Rescue the specified instance.
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 30dc1c79b..29d18dac5 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -113,7 +113,7 @@ class HyperVConnection(object):
self._conn = wmi.WMI(moniker='//./root/virtualization')
self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
- def init_host(self):
+ def init_host(self, host):
#FIXME(chiradeep): implement this
LOG.debug(_('In init host'))
pass
@@ -129,7 +129,7 @@ class HyperVConnection(object):
vm = self._lookup(instance.name)
if vm is not None:
raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -159,7 +159,7 @@ class HyperVConnection(object):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance['name']
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
- [], None, vs_gs_data.GetText_(1))[1:]
+ [], None, vs_gs_data.GetText_(1))[1:]
if ret_val == WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
@@ -184,40 +184,40 @@ class HyperVConnection(object):
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [memsetting.GetText_(1)])
+ vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
- wmi_result_class='Msvm_ProcessorSettingData')[0]
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
- procsetting.Limit = vcpus
+ procsetting.Limit = 100000 # static assignment to 100%
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [procsetting.GetText_(1)])
+ vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
- LOG.debug(_('Creating disk for %s by attaching disk file %s'),
- vm_name, vhdfile)
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(vhdfile)s') % locals())
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
- and r.Address == "0"]
+ and r.Address == "0"]
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._clone_wmi_obj(
- 'Msvm_ResourceAllocationSettingData', diskdflt)
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
#Set the IDE ctrller as parent.
diskdrive.Parent = ctrller[0].path_()
diskdrive.Address = 0
@@ -263,17 +263,18 @@ class HyperVConnection(object):
default_nic_data = [n for n in emulatednics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._clone_wmi_obj(
- 'Msvm_EmulatedEthernetPortSettingData',
- default_nic_data[0])
+ 'Msvm_EmulatedEthernetPortSettingData',
+ default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
- vm_name)
- LOG.debug(_("Created switch port %s on switch %s"),
- vm_name, extswitch.path_())
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@@ -283,7 +284,7 @@ class HyperVConnection(object):
new_resources = self._add_virt_resource(new_nic_data, vm)
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
- vm_name)
+ vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
@@ -319,8 +320,10 @@ class HyperVConnection(object):
if job.JobState != WMI_JOB_STATE_COMPLETED:
LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
- LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
- job.ElapsedTime)
+ desc = job.Description
+ elap = job.ElapsedTime
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ % locals())
return True
def _find_external_network(self):
@@ -386,7 +389,9 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
- LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+ instance_name = instance.name
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
+ % locals())
def get_info(self, instance_id):
"""Get information about the VM"""
@@ -402,12 +407,14 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
- LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
- cpu_time=%s"), instance_id,
- str(HYPERV_POWER_STATE[info.EnabledState]),
- str(info.MemoryUsage),
- str(info.NumberOfProcessors),
- str(info.UpTime))
+ state = str(HYPERV_POWER_STATE[info.EnabledState])
+ memusage = str(info.MemoryUsage)
+ numprocs = str(info.NumberOfProcessors)
+ uptime = str(info.UpTime)
+
+ LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s,"
+ " mem=%(memusage)s, num_cpu=%(numprocs)s,"
+ " cpu_time=%(uptime)s") % locals())
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@@ -441,22 +448,22 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
- LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
- req_state)
+ LOG.info(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
else:
- LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
- req_state)
- raise Exception(_("Failed to change vm state of %s to %s"),
- vm_name, req_state)
+ msg = _("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals()
+ LOG.error(msg)
+ raise Exception(msg)
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm' %
- instance_name)
+ raise exception.NotFound('Cannot attach volume to missing %s vm'
+ % instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s ' %
- instance_name)
+ raise exception.NotFound('Cannot detach volume from missing %s '
+ % instance_name)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index ecf0e5efb..7a6fef330 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
- LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
+ LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
def _fetch_s3_image(image, path, user, project):
@@ -111,5 +111,8 @@ def _image_path(path):
def image_url(image):
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ return "http://%s:%s/images/%s" % (FLAGS.glance_host,
+ FLAGS.glance_port, image)
return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 8139c3620..88bfbc668 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -75,11 +75,13 @@
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
<filterref filter="nova-instance-${name}">
<parameter name="IP" value="${ip_address}" />
- <parameter name="DHCPSERVER" value="${dhcp_server}" />
- <parameter name="RASERVER" value="${ra_server}" />
+ <parameter name="DHCPSERVER" value="${dhcp_server}" />
#if $getVar('extra_params', False)
${extra_params}
#end if
+#if $getVar('ra_server', False)
+ <parameter name="RASERVER" value="${ra_server}" />
+#end if
</filterref>
</interface>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index f5b0bd365..4e0fd106f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -149,16 +149,34 @@ class LibvirtConnection(object):
self._wrapped_conn = None
self.read_only = read_only
- self.nwfilter = NWFilterFirewall(self._get_connection)
+ fw_class = utils.import_class(FLAGS.firewall_driver)
+ self.firewall_driver = fw_class(get_connection=self._get_connection)
- if not FLAGS.firewall_driver:
- self.firewall_driver = self.nwfilter
- self.nwfilter.handle_security_groups = True
- else:
- self.firewall_driver = utils.import_object(FLAGS.firewall_driver)
+ def init_host(self, host):
+ # Adopt existing VM's running here
+ ctxt = context.get_admin_context()
+ for instance in db.instance_get_all_by_host(ctxt, host):
+ try:
+ LOG.debug(_('Checking state of %s'), instance['name'])
+ state = self.get_info(instance['name'])['state']
+ except exception.NotFound:
+ state = power_state.SHUTOFF
- def init_host(self):
- pass
+ LOG.debug(_('Current state of %(name)s was %(state)s.'),
+ {'name': instance['name'], 'state': state})
+ db.instance_set_state(ctxt, instance['id'], state)
+
+ if state == power_state.SHUTOFF:
+ # TODO(soren): This is what the compute manager does when you
+ # terminate # an instance. At some point I figure we'll have a
+ # "terminated" state and some sort of cleanup job that runs
+ # occasionally, cleaning them out.
+ db.instance_destroy(ctxt, instance['id'])
+
+ if state != power_state.RUNNING:
+ continue
+ self.firewall_driver.prepare_instance_filter(instance)
+ self.firewall_driver.apply_instance_filter(instance)
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
@@ -236,8 +254,9 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- LOG.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ instance_name = instance['name']
+ LOG.info(_('instance %(instance_name)s: deleting instance files'
+ ' %(target)s') % locals())
if os.path.exists(target):
shutil.rmtree(target)
@@ -385,7 +404,7 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
- self.nwfilter.setup_basic_filtering(instance)
+ self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
@@ -418,7 +437,7 @@ class LibvirtConnection(object):
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- LOG.info(_('cool, it\'s a device'))
+ LOG.info(_("cool, it's a device"))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -426,7 +445,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- LOG.info(_('data: %r, fpath: %r'), data, fpath)
+ LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -434,7 +453,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- LOG.info(_('Contents of file %s: %r'), fpath, contents)
+ LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
return contents
@exception.wrap_exception
@@ -510,7 +529,6 @@ class LibvirtConnection(object):
base_dir = os.path.join(FLAGS.instances_path, '_base')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
- os.chmod(base_dir, 0777)
base = os.path.join(base_dir, fname)
if not os.path.exists(base):
fn(target=base, *args, **kwargs)
@@ -541,7 +559,6 @@ class LibvirtConnection(object):
# ensure directories exist and are writable
utils.execute('mkdir -p %s' % basepath(suffix=''))
- utils.execute('chmod 0777 %s' % basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
@@ -623,21 +640,22 @@ class LibvirtConnection(object):
'dns': network_ref['dns'],
'ra_server': ra_server}
if key or net:
+ inst_name = inst['name']
+ img_id = inst.image_id
if key:
- LOG.info(_('instance %s: injecting key into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting key into'
+ ' image %(img_id)s') % locals())
if net:
- LOG.info(_('instance %s: injecting net into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting net into'
+ ' image %(img_id)s') % locals())
try:
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
+ ' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
@@ -645,9 +663,6 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -658,8 +673,7 @@ class LibvirtConnection(object):
# Assume that the gateway also acts as the dhcp server.
dhcp_server = network['gateway']
ra_server = network['ra_server']
- if not ra_server:
- ra_server = 'fd00::'
+
if FLAGS.allow_project_net_traffic:
if FLAGS.use_ipv6:
net, mask = _get_net_and_mask(network['cidr'])
@@ -698,11 +712,13 @@ class LibvirtConnection(object):
'mac_address': instance['mac_address'],
'ip_address': ip_address,
'dhcp_server': dhcp_server,
- 'ra_server': ra_server,
'extra_params': extra_params,
'rescue': rescue,
'local': instance_type['local_gb'],
'driver_type': driver_type}
+
+ if ra_server:
+ xml_info['ra_server'] = ra_server + "/128"
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
@@ -732,7 +748,8 @@ class LibvirtConnection(object):
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
- raise exception.APIError("diagnostics are not supported for libvirt")
+ raise exception.APIError(_("diagnostics are not supported "
+ "for libvirt"))
def get_disks(self, instance_name):
"""
@@ -884,6 +901,20 @@ class FirewallDriver(object):
the security group."""
raise NotImplementedError()
+ def setup_basic_filtering(self, instance):
+ """Create rules to block spoofing and allow dhcp.
+
+ This gets called when spawning an instance, before
+ :method:`prepare_instance_filter`.
+
+ """
+ raise NotImplementedError()
+
+ def _ra_server_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['ra_server']
+
class NWFilterFirewall(FirewallDriver):
"""
@@ -931,11 +962,15 @@ class NWFilterFirewall(FirewallDriver):
"""
- def __init__(self, get_connection):
+ def __init__(self, get_connection, **kwargs):
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
def _get_connection(self):
return self._libvirt_get_connection()
_conn = property(_get_connection)
@@ -1094,7 +1129,9 @@ class NWFilterFirewall(FirewallDriver):
'nova-base-ipv6',
'nova-allow-dhcp-server']
if FLAGS.use_ipv6:
- instance_secgroup_filter_children += ['nova-allow-ra-server']
+ ra_server = self._ra_server_for_instance(instance)
+ if ra_server:
+ instance_secgroup_filter_children += ['nova-allow-ra-server']
ctxt = context.get_admin_context()
@@ -1168,9 +1205,14 @@ class NWFilterFirewall(FirewallDriver):
class IptablesFirewallDriver(FirewallDriver):
- def __init__(self, execute=None):
+ def __init__(self, execute=None, **kwargs):
self.execute = execute or utils.execute
self.instances = {}
+ self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+
+ def setup_basic_filtering(self, instance):
+ """Use NWFilter from libvirt for this."""
+ return self.nwfilter.setup_basic_filtering(instance)
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
@@ -1226,6 +1268,7 @@ class IptablesFirewallDriver(FirewallDriver):
our_chains += [':nova-local - [0:0]']
our_rules += ['-A FORWARD -j nova-local']
+ our_rules += ['-A OUTPUT -j nova-local']
security_groups = {}
# Add our chains
@@ -1266,13 +1309,23 @@ class IptablesFirewallDriver(FirewallDriver):
if(ip_version == 4):
# Allow DHCP responses
dhcp_server = self._dhcp_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
- (chain_name, dhcp_server)]
+ our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT ' % (chain_name, dhcp_server)]
+ #Allow project network traffic
+ if (FLAGS.allow_project_net_traffic):
+ cidr = self._project_cidr_for_instance(instance)
+ our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
elif(ip_version == 6):
# Allow RA responses
ra_server = self._ra_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p icmpv6' %
- (chain_name, ra_server)]
+ if ra_server:
+ our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
+ (chain_name, ra_server + "/128")]
+ #Allow project network traffic
+ if (FLAGS.allow_project_net_traffic):
+ cidrv6 = self._project_cidrv6_for_instance(instance)
+ our_rules += ['-A %s -s %s -j ACCEPT' %
+ (chain_name, cidrv6)]
# If nothing matches, jump to the fallback chain
our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
@@ -1359,11 +1412,21 @@ class IptablesFirewallDriver(FirewallDriver):
instance['id'])
def _dhcp_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['gateway']
def _ra_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['ra_server']
+
+ def _project_cidr_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['cidr']
+
+ def _project_cidrv6_for_instance(self, instance):
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ return network['cidr_v6']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4bfaf4b57..018d0dcd3 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake")
def log_db_contents(msg=None):
- LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
@@ -284,6 +286,10 @@ class SessionBase(object):
rec['currently_attached'] = False
rec['device'] = ''
+ def host_compute_free_memory(self, _1, ref):
+ #Always return 12GB available
+ return 12 * 1024 * 1024 * 1024
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
@@ -331,7 +337,8 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- LOG.debug(_('Calling %s %s'), name, impl)
+ localname = name
+ LOG.debug(_('Calling %(localname)s %(impl)s') % locals())
self._check_session(params)
return impl(*params)
return callit
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index b80ff4dba..80cc3035d 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,6 +22,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
import os
import pickle
import re
+import time
import urllib
from xml.dom import minidom
@@ -133,10 +134,21 @@ class VMHelper(HelperBase):
'pae': 'true', 'viridian': 'true'}
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref
@classmethod
+ def ensure_free_mem(cls, session, instance):
+ instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ mem = long(instance_type['memory_mb']) * 1024 * 1024
+ #get free memory from host
+ host = session.get_xenapi_host()
+ host_free_mem = long(session.get_xenapi().host.
+ compute_free_memory(host))
+ return host_free_mem >= mem
+
+ @classmethod
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
@@ -153,10 +165,11 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
- vdi_ref)
+ LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
@@ -208,11 +221,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
return vif_ref
@classmethod
@@ -230,8 +243,9 @@ class VMHelper(HelperBase):
'other_config': {},
'sm_config': {},
'tags': []})
- LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
- name_label, virtual_size, read_only, sr_ref)
+ LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
+ ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.')
+ % locals())
return vdi_ref
@classmethod
@@ -241,7 +255,8 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
+ LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
+ % locals())
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -254,8 +269,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %(template_vm_ref)s from'
+ ' VM %(vm_ref)s.') % locals())
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -268,8 +283,8 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as ID %s"),
- vdi_uuids, image_id)
+ logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
+ " ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
@@ -309,7 +324,7 @@ class VMHelper(HelperBase):
meta, image_file = c.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %s:%d"), image, virtual_size)
+ LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
if type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
@@ -343,7 +358,7 @@ class VMHelper(HelperBase):
def _fetch_image_objectstore(cls, session, instance_id, image, access,
secret, type):
url = images.image_url(image)
- LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
+ LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -379,7 +394,7 @@ class VMHelper(HelperBase):
pv = True
elif pv_str.lower() == 'false':
pv = False
- LOG.debug(_("PV Kernel in VDI:%d"), pv)
+ LOG.debug(_("PV Kernel in VDI:%s"), pv)
return pv
@classmethod
@@ -435,6 +450,14 @@ class VMHelper(HelperBase):
return None
@classmethod
+ def lookup_kernel_ramdisk(cls, session, vm):
+ vm_rec = session.get_xenapi().VM.get_record(vm)
+ if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
+ return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
+ else:
+ return (None, None)
+
+ @classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
@@ -498,7 +521,8 @@ def get_vhd_parent(session, vdi_rec):
parent_uuid = vdi_rec['sm_config']['vhd-parent']
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ vdi_uuid = vdi_rec['uuid']
+ LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals())
return parent_ref, parent_rec
else:
return None
@@ -539,16 +563,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
def _poll_vhds():
attempts['counter'] += 1
if attempts['counter'] > max_attempts:
- msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...")
- % (attempts['counter'], max_attempts))
+ counter = attempts['counter']
+ msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
+ " %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- LOG.debug(_("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."), parent_uuid,
- original_parent_uuid)
+ LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
+ " %(original_parent_uuid)s, waiting for coalesce...")
+ % locals())
else:
# Breakout of the loop (normally) and return the parent_uuid
raise utils.LoopingCallDone(parent_uuid)
@@ -566,8 +591,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
- raise Exception(_("Unexpected number of VDIs (%s) found for "
- "VM %s") % (num_vdis, vm_ref))
+ raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
@@ -589,6 +614,27 @@ def find_sr(session):
return None
+def remap_vbd_dev(dev):
+ """Return the appropriate location for a plugged-in VBD device
+
+ Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
+ fixed in future versions:
+ https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
+
+ For now, we work around it by just doing a string replace.
+ """
+ # NOTE(sirp): This hack can go away when we pull support for Maverick
+ should_remap = FLAGS.xenapi_remap_vbd_dev
+ if not should_remap:
+ return dev
+
+ old_prefix = 'xvd'
+ new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix
+ remapped_dev = dev.replace(old_prefix, new_prefix)
+
+ return remapped_dev
+
+
def with_vdi_attached_here(session, vdi, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
@@ -611,7 +657,13 @@ def with_vdi_attached_here(session, vdi, read_only, f):
LOG.debug(_('Plugging VBD %s ... '), vbd)
session.get_xenapi().VBD.plug(vbd)
LOG.debug(_('Plugging VBD %s done.'), vbd)
- return f(session.get_xenapi().VBD.get_device(vbd))
+ orig_dev = session.get_xenapi().VBD.get_device(vbd)
+ LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals())
+ dev = remap_vbd_dev(orig_dev)
+ if dev != orig_dev:
+ LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
+ 'remapping to %(dev)s') % locals())
+ return f(dev)
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
vbd_unplug_with_retry(session, vbd)
@@ -624,6 +676,7 @@ def vbd_unplug_with_retry(session, vbd):
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
+ # FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
session.get_xenapi().VBD.unplug(vbd)
@@ -679,8 +732,8 @@ def _write_partition(virtual_size, dev):
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
- LOG.debug(_('Writing partition table %d %d to %s...'),
- primary_first, primary_last, dest)
+ LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
+ ' to %(dest)s...') % locals())
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 6c2fd6a68..0168681f6 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -67,13 +67,19 @@ class VMOps(object):
raise exception.Duplicate(_('Attempted to create'
' non-unique name %s') % instance.name)
- bridge = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])['bridge']
- network_ref = \
- NetworkHelper.find_network_with_bridge(self._session, bridge)
+ #ensure enough free memory is available
+ if not VMHelper.ensure_free_mem(self._session, instance):
+ name = instance['name']
+ LOG.exception(_('instance %(name)s: not enough free memory')
+ % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
+
#if kernel is not present we must download a raw disk
if instance.kernel_id:
disk_image_type = ImageType.DISK
@@ -99,14 +105,70 @@ class VMOps(object):
instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
- if network_ref:
- VMHelper.create_vif(self._session, vm_ref,
- network_ref, instance.mac_address)
+ # write network info
+ admin_context = context.get_admin_context()
+
+ # TODO(tr3buchet) - remove comment in multi-nic
+ # I've decided to go ahead and consider multiple IPs and networks
+ # at this stage even though they aren't implemented because these will
+ # be needed for multi-nic and there was no sense writing it for single
+ # network/single IP and then having to turn around and re-write it
+ IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
+ for network in db.network_get_all_by_instance(admin_context,
+ instance['id']):
+ network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+
+ def ip_dict(ip):
+ return {'netmask': network['netmask'],
+ 'enabled': '1',
+ 'ip': ip.address}
+
+ mac_id = instance.mac_address.replace(':', '')
+ location = 'vm-data/networking/%s' % mac_id
+ mapping = {'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance.mac_address,
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ self.write_to_param_xenstore(vm_ref, {location: mapping})
+
+ # TODO(tr3buchet) - remove comment in multi-nic
+ # this bit here about creating the vifs will be updated
+ # in multi-nic to handle multiple IPs on the same network
+ # and multiple networks
+ # for now it works as there is only one of each
+ bridge = network['bridge']
+ network_ref = \
+ NetworkHelper.find_network_with_bridge(self._session, bridge)
+
+ if network_ref:
+ VMHelper.create_vif(self._session, vm_ref,
+ network_ref, instance.mac_address)
+
LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
-
+ instance_name = instance.name
+ LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
+ % locals())
+
+ def _inject_onset_files():
+ onset_files = instance.onset_files
+ if onset_files:
+ # Check if this is a JSON-encoded string and convert if needed.
+ if isinstance(onset_files, basestring):
+ try:
+ onset_files = json.loads(onset_files)
+ except ValueError:
+ LOG.exception(_("Invalid value for onset_files: '%s'")
+ % onset_files)
+ onset_files = []
+ # Inject any files, if specified
+ for path, contents in instance.onset_files:
+ LOG.debug(_("Injecting file path: '%s'") % path)
+ self.inject_file(instance, path, contents)
# NOTE(armando): Do we really need to do this in virt?
+ # NOTE(tr3buchet): not sure but wherever we do it, we need to call
+ # reset_network afterwards
timer = utils.LoopingCall(f=None)
def _wait_for_boot():
@@ -117,6 +179,8 @@ class VMOps(object):
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance['name'])
timer.stop()
+ _inject_onset_files()
+ return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
@@ -125,8 +189,13 @@ class VMOps(object):
instance['id'],
power_state.SHUTDOWN)
timer.stop()
+ return False
timer.f = _wait_for_boot
+
+ # call reset networking
+ self.reset_network(instance)
+
return timer.start(interval=0.5, now=True)
def _get_vm_opaque_ref(self, instance_or_vm):
@@ -147,7 +216,7 @@ class VMOps(object):
if isinstance(instance_or_vm, (int, long)):
ctx = context.get_admin_context()
try:
- instance_obj = db.instance_get_by_id(ctx, instance_or_vm)
+ instance_obj = db.instance_get(ctx, instance_or_vm)
instance_name = instance_obj.name
except exception.NotFound:
# The unit tests screw this up, as they use an integer for
@@ -159,7 +228,8 @@ class VMOps(object):
instance_name = instance_or_vm.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
- raise Exception(_('Instance not present %s') % instance_name)
+ raise exception.NotFound(
+ _('Instance not present %s') % instance_name)
return vm
def snapshot(self, instance, image_id):
@@ -196,7 +266,8 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
except self.XenAPI.Failure, exc:
- logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
+ logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
+ % locals())
return
try:
@@ -252,41 +323,112 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def destroy(self, instance):
- """Destroy VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
+ def inject_file(self, instance, b64_path, b64_contents):
+ """Write a file to the VM instance. The path to which it is to be
+ written and the contents of the file need to be supplied; both should
+ be base64-encoded to prevent errors with non-ASCII characters being
+ transmitted. If the agent does not support file injection, or the user
+ has disabled it, a NotImplementedError will be raised.
+ """
+ # Files/paths *should* be base64-encoded at this point, but
+ # double-check to make sure.
+ b64_path = utils.ensure_b64_encoding(b64_path)
+ b64_contents = utils.ensure_b64_encoding(b64_contents)
- def _destroy(self, instance, vm, shutdown=True):
- """ Destroy VM instance """
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ # Need to uniquely identify this request.
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id, 'b64_path': b64_path,
+ 'b64_contents': b64_contents}
+ # If the agent doesn't support file injection, a NotImplementedError
+ # will be raised with the appropriate message.
+ resp = self._make_agent_call('inject_file', instance, '', args)
+ resp_dict = json.loads(resp)
+ if resp_dict['returncode'] != '0':
+ # There was some other sort of error; the message will contain
+ # a description of the error.
+ raise RuntimeError(resp_dict['message'])
+ return resp_dict['message']
+
+ def _shutdown(self, instance, vm):
+ """Shutdown an instance """
+ state = self.get_info(instance['name'])['state']
+ if state == power_state.SHUTDOWN:
+ LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
+ locals())
return
- # Get the VDIs related to the VM
+
+ try:
+ task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ self._session.wait_for_task(instance.id, task)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+
+ def _destroy_vdis(self, instance, vm):
+ """Destroys all VDIs associated with a VM """
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
- if shutdown:
+
+ if not vdis:
+ return
+
+ for vdi in vdis:
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- # Disk clean-up
- if vdis:
- for vdi in vdis:
- try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
- except self.XenAPI.Failure, exc:
- LOG.exception(exc)
- # VM Destroy
+ def _destroy_vm(self, instance, vm):
+ """Destroys a VM record """
try:
- task = self._session.call_xenapi('Async.VM.destroy', vm)
- self._session.wait_for_task(instance.id, task)
+ kernel = None
+ ramdisk = None
+ if instance.kernel_id or instance.ramdisk_id:
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
+ self._session, vm)
+ task1 = self._session.call_xenapi('Async.VM.destroy', vm)
+ LOG.debug(_("Removing kernel/ramdisk files"))
+ fn = "remove_kernel_ramdisk"
+ args = {}
+ if kernel:
+ args['kernel-file'] = kernel
+ if ramdisk:
+ args['ramdisk-file'] = ramdisk
+ task2 = self._session.async_call_plugin('glance', fn, args)
+ self._session.wait_for_task(instance.id, task1)
+ self._session.wait_for_task(instance.id, task2)
+ LOG.debug(_("kernel/ramdisk files removed"))
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def destroy(self, instance):
+ """
+ Destroy VM instance
+
+ This is the method exposed by xenapi_conn.destroy(). The rest of the
+ destroy_* methods are internal.
+ """
+ vm = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm, shutdown=True)
+
+ def _destroy(self, instance, vm, shutdown=True):
+ """
+ Destroys VM instance by performing:
+
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying that actual VM record
+ """
+ if vm is None:
+ # Don't complain, just return. This lets us clean up instances
+ # that have already disappeared from the underlying platform.
+ return
+
+ if shutdown:
+ self._shutdown(instance, vm)
+
+ self._destroy_vdis(instance, vm)
+ self._destroy_vm(instance, vm)
+
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
@@ -341,6 +483,14 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ def reset_network(self, instance):
+ """
+ Creates uuid arg to pass to make_agent_call and calls it.
+
+ """
+ args = {'id': str(uuid.uuid4())}
+ resp = self._make_agent_call('resetnetwork', instance, '', args)
+
def list_from_xenstore(self, vm, path):
"""Runs the xenstore-ls command to get a listing of all records
from 'path' downward. Returns a dict with the sub-paths as keys,
@@ -410,6 +560,11 @@ class VMOps(object):
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
+ elif 'NOT IMPLEMENTED:' in err_msg:
+ LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
+ ' supported by the agent. VM id=%(instance_id)s;'
+ ' args=%(strargs)s') % locals())
+ raise NotImplementedError(err_msg)
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 0cd15b950..d5ebd29d5 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -71,7 +71,7 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -98,20 +98,20 @@ class VolumeHelper(HelperBase):
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
+ ' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
- exc, pbd)
+ LOG.warn(_('Ignoring exception %(exc)s when unplugging'
+ ' PBD %(pbd)s') % locals())
try:
session.get_xenapi().SR.forget(sr_ref)
LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
- sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when forgetting'
+ ' SR %(sr_ref)s') % locals())
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -172,8 +172,8 @@ class VolumeHelper(HelperBase):
(volume_id is None) or \
(target_host is None) or \
(target_iqn is None):
- raise StorageError(_('Unable to obtain target information %s, %s')
- % (device_path, mountpoint))
+ raise StorageError(_('Unable to obtain target information'
+ ' %(device_path)s, %(mountpoint)s') % locals())
volume_info = {}
volume_info['deviceNumber'] = device_number
volume_info['volumeId'] = volume_id
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 189f968c6..d89a6f995 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -48,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- LOG.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
+ " %(mountpoint)s") % locals())
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -66,9 +66,8 @@ class VolumeOps(object):
except StorageError, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to create VDI on SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
vbd_ref = VMHelper.create_vbd(self._session,
@@ -78,9 +77,8 @@ class VolumeOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to use SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to use SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
task = self._session.call_xenapi('Async.VBD.plug',
@@ -92,8 +90,8 @@ class VolumeOps(object):
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- LOG.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s attached to'
+ ' instance %(instance_name)s') % locals())
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -103,7 +101,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
+ % locals())
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
@@ -125,5 +124,5 @@ class VolumeOps(object):
LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- LOG.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s detached from'
+ ' instance %(instance_name)s') % locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c57c883c9..c2f65699f 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -109,6 +109,14 @@ flags.DEFINE_string('target_port',
flags.DEFINE_string('iqn_prefix',
'iqn.2010-10.org.openstack',
'IQN Prefix')
+# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, when we pull
+# support for it, we should remove this
+flags.DEFINE_bool('xenapi_remap_vbd_dev', False,
+ 'Used to enable the remapping of VBD dev '
+ '(Works around an issue in Ubuntu Maverick)')
+flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd',
+ 'Specify prefix to remap VBD dev to '
+ '(ex. /dev/xvdb -> /dev/sdb)')
def get_connection(_):
@@ -133,7 +141,7 @@ class XenAPIConnection(object):
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
- def init_host(self):
+ def init_host(self, host):
#FIXME(armando): implement this
#NOTE(armando): would we need a method
#to call when shutting down the host?
@@ -160,6 +168,12 @@ class XenAPIConnection(object):
"""Set the root/admin password on the VM instance"""
self._vmops.set_admin_password(instance, new_pass)
+ def inject_file(self, instance, b64_path, b64_contents):
+ """Create a file on the VM instance. The file path and contents
+ should be base64-encoded.
+ """
+ self._vmops.inject_file(instance, b64_path, b64_contents)
+
def destroy(self, instance):
"""Destroy VM instance"""
self._vmops.destroy(instance)
@@ -180,6 +194,10 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def reset_network(self, instance):
+ """reset networking for specified instance"""
+ self._vmops.reset_network(instance)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
@@ -290,19 +308,14 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%s] %s status: success %s") % (
- name,
- task,
- result))
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- LOG.warn(_("Task [%s] %s status: %s %s") % (
- name,
- task,
- status,
- error_info))
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
diff --git a/nova/volume/api.py b/nova/volume/api.py
index ce4831cc3..2f4494845 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -41,14 +41,15 @@ class API(base.Base):
def create(self, context, size, name, description):
if quota.allowed_volumes(context, 1, size) < 1:
- LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"),
- context.project_id, size)
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
+ " %(size)sG volume") % locals())
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
- "create a volume of size %s") % size)
+ "create a volume of size %sG") % size)
options = {
'size': size,
- 'user_id': context.user.id,
+ 'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': FLAGS.storage_availability_zone,
'status': "creating",
@@ -84,7 +85,7 @@ class API(base.Base):
return self.db.volume_get(context, volume_id)
def get_all(self, context):
- if context.user.is_admin():
+ if context.is_admin:
return self.db.volume_get_all(context)
return self.db.volume_get_all_by_project(context, context.project_id)
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 5fefa10cf..82f4c2f54 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -100,6 +100,14 @@ class VolumeDriver(object):
def delete_volume(self, volume):
"""Deletes a logical volume."""
+ try:
+ self._try_execute("sudo lvdisplay %s/%s" %
+ (FLAGS.volume_group,
+ volume['name']))
+ except Exception as e:
+ # If the volume isn't present, then don't attempt to delete
+ return True
+
self._try_execute("sudo lvremove -f %s/%s" %
(FLAGS.volume_group,
volume['name']))
@@ -218,8 +226,14 @@ class ISCSIDriver(VolumeDriver):
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping ensure_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
self._sync_exec("sudo ietadm --op new "
@@ -258,15 +272,32 @@ class ISCSIDriver(VolumeDriver):
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping remove_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ # ietadm show will exit with an error
+ # this export has already been removed
+ self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
+ except Exception as e:
+ LOG.info(_("Skipping remove_export. No iscsi_target " +
+ "is presently exported for volume: %d"), volume['id'])
+ return
+
self._execute("sudo ietadm --op delete --tid=%s "
"--lun=0" % iscsi_target)
self._execute("sudo ietadm --op delete --tid=%s" %
iscsi_target)
- def _get_name_and_portal(self, volume_name, host):
+ def _get_name_and_portal(self, volume):
"""Gets iscsi name and portal from volume name and host."""
+ volume_name = volume['name']
+ host = volume['host']
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
"sendtargets -p %s" % host)
for target in out.splitlines():
@@ -278,8 +309,7 @@ class ISCSIDriver(VolumeDriver):
def discover_volume(self, volume):
"""Discover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
- volume['host'])
+ iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
(iscsi_name, iscsi_portal))
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
@@ -290,8 +320,7 @@ class ISCSIDriver(VolumeDriver):
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
- volume['host'])
+ iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
"-n node.startup -v manual" %
(iscsi_name, iscsi_portal))
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 6348539c5..d2f02e4e0 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -84,7 +84,10 @@ class VolumeManager(manager.Manager):
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
- self.driver.ensure_export(ctxt, volume)
+ if volume['status'] in ['available', 'in-use']:
+ self.driver.ensure_export(ctxt, volume)
+ else:
+ LOG.info(_("volume %s: skipping export"), volume['name'])
def create_volume(self, context, volume_id):
"""Creates and exports the volume."""
@@ -99,12 +102,19 @@ class VolumeManager(manager.Manager):
# before passing it to the driver.
volume_ref['host'] = self.host
- LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'],
- volume_ref['size'])
- self.driver.create_volume(volume_ref)
+ try:
+ vol_name = volume_ref['name']
+ vol_size = volume_ref['size']
+ LOG.debug(_("volume %(vol_name)s: creating lv of"
+ " size %(vol_size)sG") % locals())
+ self.driver.create_volume(volume_ref)
- LOG.debug(_("volume %s: creating export"), volume_ref['name'])
- self.driver.create_export(context, volume_ref)
+ LOG.debug(_("volume %s: creating export"), volume_ref['name'])
+ self.driver.create_export(context, volume_ref)
+ except Exception:
+ self.db.volume_update(context,
+ volume_ref['id'], {'status': 'error'})
+ raise
now = datetime.datetime.utcnow()
self.db.volume_update(context,
@@ -121,10 +131,18 @@ class VolumeManager(manager.Manager):
raise exception.Error(_("Volume is still attached"))
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
- LOG.debug(_("volume %s: removing export"), volume_ref['name'])
- self.driver.remove_export(context, volume_ref)
- LOG.debug(_("volume %s: deleting"), volume_ref['name'])
- self.driver.delete_volume(volume_ref)
+
+ try:
+ LOG.debug(_("volume %s: removing export"), volume_ref['name'])
+ self.driver.remove_export(context, volume_ref)
+ LOG.debug(_("volume %s: deleting"), volume_ref['name'])
+ self.driver.delete_volume(volume_ref)
+ except Exception:
+ self.db.volume_update(context,
+ volume_ref['id'],
+ {'status': 'error_deleting'})
+ raise
+
self.db.volume_destroy(context, volume_id)
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True
diff --git a/nova/volume/san.py b/nova/volume/san.py
new file mode 100644
index 000000000..26d6125e7
--- /dev/null
+++ b/nova/volume/san.py
@@ -0,0 +1,335 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Drivers for san-stored volumes.
+The unique thing about a SAN is that we don't expect that we can run the volume
+ controller on the SAN hardware. We expect to access it over SSH or some API.
+"""
+
+import os
+import paramiko
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.utils import ssh_execute
+from nova.volume.driver import ISCSIDriver
+
+LOG = logging.getLogger("nova.volume.driver")
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('san_thin_provision', 'true',
+ 'Use thin provisioning for SAN volumes?')
+flags.DEFINE_string('san_ip', '',
+ 'IP address of SAN controller')
+flags.DEFINE_string('san_login', 'admin',
+ 'Username for SAN controller')
+flags.DEFINE_string('san_password', '',
+ 'Password for SAN controller')
+flags.DEFINE_string('san_privatekey', '',
+ 'Filename of private key to use for SSH authentication')
+
+
+class SanISCSIDriver(ISCSIDriver):
+ """ Base class for SAN-style storage volumes
+ (storage providers we access over SSH)"""
+ #Override because SAN ip != host ip
+ def _get_name_and_portal(self, volume):
+ """Gets iscsi name and portal from volume name and host."""
+ volume_name = volume['name']
+
+ # TODO(justinsb): store in volume, remerge with generic iSCSI code
+ host = FLAGS.san_ip
+
+ (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
+ "sendtargets -p %s" % host)
+
+ location = None
+ find_iscsi_name = self._build_iscsi_target_name(volume)
+ for target in out.splitlines():
+ if find_iscsi_name in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ if not location:
+ raise exception.Error(_("Could not find iSCSI export "
+ " for volume %s") %
+ volume_name)
+
+ iscsi_portal = location.split(",")[0]
+ LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
+ (iscsi_name, iscsi_portal))
+ return (iscsi_name, iscsi_portal)
+
+ def _build_iscsi_target_name(self, volume):
+ return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+
+ # discover_volume is still OK
+ # undiscover_volume is still OK
+
+ def _connect_to_ssh(self):
+ ssh = paramiko.SSHClient()
+ #TODO(justinsb): We need a better SSH key policy
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ if FLAGS.san_password:
+ ssh.connect(FLAGS.san_ip,
+ username=FLAGS.san_login,
+ password=FLAGS.san_password)
+ elif FLAGS.san_privatekey:
+ privatekeyfile = os.path.expanduser(FLAGS.san_privatekey)
+ # It sucks that paramiko doesn't support DSA keys
+ privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
+ ssh.connect(FLAGS.san_ip,
+ username=FLAGS.san_login,
+ pkey=privatekey)
+ else:
+ raise exception.Error("Specify san_password or san_privatekey")
+ return ssh
+
+ def _run_ssh(self, command, check_exit_code=True):
+ #TODO(justinsb): SSH connection caching (?)
+ ssh = self._connect_to_ssh()
+
+ #TODO(justinsb): Reintroduce the retry hack
+ ret = ssh_execute(ssh, command, check_exit_code=check_exit_code)
+
+ ssh.close()
+
+ return ret
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ pass
+
+ def create_export(self, context, volume):
+ """Exports the volume."""
+ pass
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ pass
+
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met"""
+ if not (FLAGS.san_password or FLAGS.san_privatekey):
+ raise exception.Error("Specify san_password or san_privatekey")
+
+ if not (FLAGS.san_ip):
+ raise exception.Error("san_ip must be set")
+
+
+def _collect_lines(data):
+ """ Split lines from data into an array, trimming them """
+ matches = []
+ for line in data.splitlines():
+ match = line.strip()
+ matches.append(match)
+
+ return matches
+
+
+def _get_prefixed_values(data, prefix):
+ """Collect lines which start with prefix; with trimming"""
+ matches = []
+ for line in data.splitlines():
+ line = line.strip()
+ if line.startswith(prefix):
+ match = line[len(prefix):]
+ match = match.strip()
+ matches.append(match)
+
+ return matches
+
+
+class SolarisISCSIDriver(SanISCSIDriver):
+ """Executes commands relating to Solaris-hosted ISCSI volumes.
+ Basic setup for a Solaris iSCSI server:
+ pkg install storage-server SUNWiscsit
+ svcadm enable stmf
+ svcadm enable -r svc:/network/iscsi/target:default
+ pfexec itadm create-tpg e1000g0 ${MYIP}
+ pfexec itadm create-target -t e1000g0
+
+ Then grant the user that will be logging on lots of permissions.
+ I'm not sure exactly which though:
+ zfs allow justinsb create,mount,destroy rpool
+ usermod -P'File System Management' justinsb
+ usermod -P'Primary Administrator' justinsb
+
+ Also make sure you can login using san_login & san_password/san_privatekey
+ """
+
+ def _view_exists(self, luid):
+ cmd = "pfexec /usr/sbin/stmfadm list-view -l %s" % (luid)
+ (out, _err) = self._run_ssh(cmd,
+ check_exit_code=False)
+ if "no views found" in out:
+ return False
+
+ if "View Entry:" in out:
+ return True
+
+ raise exception.Error("Cannot parse list-view output: %s" % (out))
+
+ def _get_target_groups(self):
+ """Gets list of target groups from host."""
+ (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg")
+ matches = _get_prefixed_values(out, 'Target group: ')
+ LOG.debug("target_groups=%s" % matches)
+ return matches
+
+ def _target_group_exists(self, target_group_name):
+ return target_group_name not in self._get_target_groups()
+
+ def _get_target_group_members(self, target_group_name):
+ (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg -v %s" %
+ (target_group_name))
+ matches = _get_prefixed_values(out, 'Member: ')
+ LOG.debug("members of %s=%s" % (target_group_name, matches))
+ return matches
+
+ def _is_target_group_member(self, target_group_name, iscsi_target_name):
+ return iscsi_target_name in (
+ self._get_target_group_members(target_group_name))
+
+ def _get_iscsi_targets(self):
+ cmd = ("pfexec /usr/sbin/itadm list-target | "
+ "awk '{print $1}' | grep -v ^TARGET")
+ (out, _err) = self._run_ssh(cmd)
+ matches = _collect_lines(out)
+ LOG.debug("_get_iscsi_targets=%s" % (matches))
+ return matches
+
+ def _iscsi_target_exists(self, iscsi_target_name):
+ return iscsi_target_name in self._get_iscsi_targets()
+
+ def _build_zfs_poolname(self, volume):
+ #TODO(justinsb): rpool should be configurable
+ zfs_poolname = 'rpool/%s' % (volume['name'])
+ return zfs_poolname
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ if int(volume['size']) == 0:
+ sizestr = '100M'
+ else:
+ sizestr = '%sG' % volume['size']
+
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ thin_provision_arg = '-s' if FLAGS.san_thin_provision else ''
+ # Create a zfs volume
+ self._run_ssh("pfexec /usr/sbin/zfs create %s -V %s %s" %
+ (thin_provision_arg,
+ sizestr,
+ zfs_poolname))
+
+ def _get_luid(self, volume):
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ cmd = ("pfexec /usr/sbin/sbdadm list-lu | "
+ "grep -w %s | awk '{print $1}'" %
+ (zfs_poolname))
+
+ (stdout, _stderr) = self._run_ssh(cmd)
+
+ luid = stdout.strip()
+ return luid
+
+ def _is_lu_created(self, volume):
+ luid = self._get_luid(volume)
+ return luid
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ zfs_poolname = self._build_zfs_poolname(volume)
+ self._run_ssh("pfexec /usr/sbin/zfs destroy %s" %
+ (zfs_poolname))
+
+ def local_path(self, volume):
+ # TODO(justinsb): Is this needed here?
+ escaped_group = FLAGS.volume_group.replace('-', '--')
+ escaped_name = volume['name'].replace('-', '--')
+ return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ #TODO(justinsb): On bootup, this is called for every volume.
+ # It then runs ~5 SSH commands for each volume,
+ # most of which fetch the same info each time
+ # This makes initial start stupid-slow
+ self._do_export(volume, force_create=False)
+
+ def create_export(self, context, volume):
+ self._do_export(volume, force_create=True)
+
+ def _do_export(self, volume, force_create):
+ # Create a Logical Unit (LU) backed by the zfs volume
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ if force_create or not self._is_lu_created(volume):
+ cmd = ("pfexec /usr/sbin/sbdadm create-lu /dev/zvol/rdsk/%s" %
+ (zfs_poolname))
+ self._run_ssh(cmd)
+
+ luid = self._get_luid(volume)
+ iscsi_name = self._build_iscsi_target_name(volume)
+ target_group_name = 'tg-%s' % volume['name']
+
+ # Create a iSCSI target, mapped to just this volume
+ if force_create or not self._target_group_exists(target_group_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm create-tg %s" %
+ (target_group_name))
+
+ # Yes, we add the initiatior before we create it!
+ # Otherwise, it complains that the target is already active
+ if force_create or not self._is_target_group_member(target_group_name,
+ iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm add-tg-member -g %s %s" %
+ (target_group_name, iscsi_name))
+ if force_create or not self._iscsi_target_exists(iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/itadm create-target -n %s" %
+ (iscsi_name))
+ if force_create or not self._view_exists(luid):
+ self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
+ (target_group_name, luid))
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+
+ # This is the reverse of _do_export
+ luid = self._get_luid(volume)
+ iscsi_name = self._build_iscsi_target_name(volume)
+ target_group_name = 'tg-%s' % volume['name']
+
+ if self._view_exists(luid):
+ self._run_ssh("pfexec /usr/sbin/stmfadm remove-view -l %s -a" %
+ (luid))
+
+ if self._iscsi_target_exists(iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm offline-target %s" %
+ (iscsi_name))
+ self._run_ssh("pfexec /usr/sbin/itadm delete-target %s" %
+ (iscsi_name))
+
+ # We don't delete the tg-member; we delete the whole tg!
+
+ if self._target_group_exists(target_group_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm delete-tg %s" %
+ (target_group_name))
+
+ if self._is_lu_created(volume):
+ self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
+ (luid))
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 4f5307d80..e01cc1e1e 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -64,7 +64,8 @@ class Server(object):
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
- logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port)
+ arg0 = sys.argv[0]
+ logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
@@ -143,7 +144,7 @@ class Application(object):
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
- raise NotImplementedError("You must implement __call__")
+ raise NotImplementedError(_("You must implement __call__"))
class Middleware(Application):