diff options
| author | Brian Waldon <brian.waldon@rackspace.com> | 2011-07-26 13:13:41 -0400 |
|---|---|---|
| committer | Brian Waldon <brian.waldon@rackspace.com> | 2011-07-26 13:13:41 -0400 |
| commit | d4803039c19a01087964c499c7e9ef9abfa82f74 (patch) | |
| tree | 6e8ac36399f6a7863eecca88ff6255af1cf46a59 /nova | |
| parent | 8501cc95aa60a0a5759cf911e8adaf624fa9e547 (diff) | |
| parent | 48a6bf42b3af5323d35f9a31bd4233712165b276 (diff) | |
| download | nova-d4803039c19a01087964c499c7e9ef9abfa82f74.tar.gz nova-d4803039c19a01087964c499c7e9ef9abfa82f74.tar.xz nova-d4803039c19a01087964c499c7e9ef9abfa82f74.zip | |
merging trunk; resolving conflicts
Diffstat (limited to 'nova')
82 files changed, 4281 insertions, 1190 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 16ca1ed2a..10720a804 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -530,7 +530,52 @@ class CloudController(object): g['ipPermissions'] += [r] return g - def _revoke_rule_args_to_dict(self, context, to_port=None, from_port=None, + def _rule_args_to_dict(self, context, kwargs): + rules = [] + if not 'groups' in kwargs and not 'ip_ranges' in kwargs: + rule = self._rule_dict_last_step(context, **kwargs) + if rule: + rules.append(rule) + return rules + if 'ip_ranges' in kwargs: + rules = self._cidr_args_split(kwargs) + finalset = [] + for rule in rules: + if 'groups' in rule: + groups_values = self._groups_args_split(rule) + for groups_value in groups_values: + finalset.append(groups_value) + else: + if rule: + finalset.append(rule) + return finalset + + def _cidr_args_split(self, kwargs): + cidr_args_split = [] + cidrs = kwargs['ip_ranges'] + for key, cidr in cidrs.iteritems(): + mykwargs = kwargs.copy() + del mykwargs['ip_ranges'] + mykwargs['cidr_ip'] = cidr['cidr_ip'] + cidr_args_split.append(mykwargs) + return cidr_args_split + + def _groups_args_split(self, kwargs): + groups_args_split = [] + groups = kwargs['groups'] + for key, group in groups.iteritems(): + mykwargs = kwargs.copy() + del mykwargs['groups'] + if 'group_name' in group: + mykwargs['source_security_group_name'] = group['group_name'] + if 'user_id' in group: + mykwargs['source_security_group_owner_id'] = group['user_id'] + if 'group_id' in group: + mykwargs['source_security_group_id'] = group['group_id'] + groups_args_split.append(mykwargs) + return groups_args_split + + def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): @@ -615,7 +660,7 @@ class CloudController(object): msg = "Revoke security group ingress %s" LOG.audit(_(msg), security_group['name'], context=context) - criteria = self._revoke_rule_args_to_dict(context, **kwargs) + criteria = self._rule_args_to_dict(context, kwargs)[0] if criteria is None: raise exception.ApiError(_("Not enough parameters to build a " "valid rule.")) @@ -656,21 +701,34 @@ class CloudController(object): msg = "Authorize security group ingress %s" LOG.audit(_(msg), security_group['name'], context=context) - values = self._revoke_rule_args_to_dict(context, **kwargs) - if values is None: - raise exception.ApiError(_("Not enough parameters to build a " - "valid rule.")) - values['parent_group_id'] = security_group.id - - if self._security_group_rule_exists(security_group, values): - raise exception.ApiError(_('This rule already exists in group %s') - % group_name) - - security_group_rule = db.security_group_rule_create(context, values) + prevalues = [] + try: + prevalues = kwargs['ip_permissions'] + except KeyError: + prevalues.append(kwargs) + postvalues = [] + for values in prevalues: + rulesvalues = self._rule_args_to_dict(context, values) + if not rulesvalues: + err = "%s Not enough parameters to build a valid rule" + raise exception.ApiError(_(err % rulesvalues)) + for values_for_rule in rulesvalues: + values_for_rule['parent_group_id'] = security_group.id + if self._security_group_rule_exists(security_group, + values_for_rule): + err = '%s - This rule already exists in group' + raise exception.ApiError(_(err) % values_for_rule) + postvalues.append(values_for_rule) + + for values_for_rule in postvalues: + security_group_rule = db.security_group_rule_create(context, + values_for_rule) self.compute_api.trigger_security_group_rules_refresh(context, - security_group_id=security_group['id']) + security_group_id=security_group['id']) + group = db.security_group_get_by_name(context, context.project_id, + security_group['name']) return True def _get_source_project_id(self, context, source_security_group_owner_id): @@ -1147,7 +1205,7 @@ class CloudController(object): def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - self._do_instance(self.compute_api.rescue, contect, instnace_id) + self._do_instance(self.compute_api.rescue, context, instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index e87d7c754..868b98a31 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -164,11 +164,17 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper, '1.1') - mapper.resource("image_meta", "meta", - controller=image_metadata.create_resource(), + image_metadata_controller = image_metadata.create_resource() + mapper.resource("image_meta", "metadata", + controller=image_metadata_controller, parent_resource=dict(member_name='image', collection_name='images')) + mapper.connect("metadata", "/images/{image_id}/metadata", + controller=image_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) + mapper.resource("server_meta", "meta", controller=server_metadata.create_resource(), parent_resource=dict(member_name='server', diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index e3201b14f..a13a758ab 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -47,10 +47,10 @@ class Controller(object): raise exception.AdminRequired() def index(self, req): - raise faults.Fault(webob.exc.HTTPNotImplemented()) + raise webob.exc.HTTPNotImplemented() def detail(self, req): - raise faults.Fault(webob.exc.HTTPNotImplemented()) + raise webob.exc.HTTPNotImplemented() def show(self, req, id): """Return data about the given account id""" @@ -65,7 +65,7 @@ class Controller(object): def create(self, req, body): """We use update with create-or-update semantics because the id comes from an external source""" - raise faults.Fault(webob.exc.HTTPNotImplemented()) + raise webob.exc.HTTPNotImplemented() def update(self, req, id, body): """This is really create or update.""" diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 3e95aedf3..7ff0d999e 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -19,7 +19,6 @@ import time from webob import exc -from nova.api.openstack import faults from nova.api.openstack import wsgi @@ -36,20 +35,20 @@ class Controller(object): def index(self, req, server_id, **kwargs): """ Returns the list of backup schedules for a given instance """ - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def show(self, req, server_id, id, **kwargs): """ Returns a single backup schedule for a given instance """ - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def create(self, req, server_id, **kwargs): """ No actual update method required, since the existing API allows both create and update through a POST """ - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def delete(self, req, server_id, id, **kwargs): """ Deletes an existing backup schedule """ - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def create_resource(): diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 8e12ce0c0..bd14a1389 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -53,10 +53,10 @@ def get_pagination_params(request): params[param] = int(request.GET[param]) except ValueError: msg = _('%s param must be an integer') % param - raise webob.exc.HTTPBadRequest(msg) + raise webob.exc.HTTPBadRequest(explanation=msg) if params[param] < 0: msg = _('%s param must be positive') % param - raise webob.exc.HTTPBadRequest(msg) + raise webob.exc.HTTPBadRequest(explanation=msg) return params @@ -77,18 +77,22 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit): try: offset = int(request.GET.get('offset', 0)) except ValueError: - raise webob.exc.HTTPBadRequest(_('offset param must be an integer')) + msg = _('offset param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) try: limit = int(request.GET.get('limit', max_limit)) except ValueError: - raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: - raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) if offset < 0: - raise webob.exc.HTTPBadRequest(_('offset param must be positive')) + msg = _('offset param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) limit = min(max_limit, limit or max_limit) range_end = offset + limit @@ -111,7 +115,8 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): start_index = i + 1 break if start_index < 0: - raise webob.exc.HTTPBadRequest(_('marker [%s] not found' % marker)) + msg = _('marker [%s] not found') % marker + raise webob.exc.HTTPBadRequest(explanation=msg) range_end = start_index + limit return items[start_index:range_end] @@ -162,3 +167,28 @@ def remove_version_from_href(href): msg = _('href does not contain version') raise ValueError(msg) return new_href + + +def get_version_from_href(href): + """Returns the api version in the href. + + Returns the api version in the href. + If no version is found, 1.0 is returned + + Given: 'http://www.nova.com/123' + Returns: '1.0' + + Given: 'http://www.nova.com/v1.1' + Returns: '1.1' + + """ + try: + #finds the first instance that matches /v#.#/ + version = re.findall(r'[/][v][0-9]+\.[0-9]+[/]', href) + #if no version was found, try finding /v#.# at the end of the string + if not version: + version = re.findall(r'[/][v][0-9]+\.[0-9]+$', href) + version = re.findall(r'[0-9]+\.[0-9]', version[0])[0] + except IndexError: + version = '1.0' + return version diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 7a43fba96..d2655acfa 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -16,10 +16,10 @@ # under the License. from webob import exc +import webob from nova import console from nova import exception -from nova.api.openstack import faults from nova.api.openstack import wsgi @@ -71,12 +71,12 @@ class Controller(object): int(server_id), int(id)) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() return _translate_detail_keys(console) def update(self, req, server_id, id): """You can't update a console""" - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def delete(self, req, server_id, id): """Deletes a console""" @@ -85,8 +85,8 @@ class Controller(object): int(server_id), int(id)) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + raise exc.HTTPNotFound() + return webob.Response(status_int=202) def create_resource(): diff --git a/nova/api/openstack/contrib/multinic.py b/nova/api/openstack/contrib/multinic.py index 841061721..da8dcee5d 100644 --- a/nova/api/openstack/contrib/multinic.py +++ b/nova/api/openstack/contrib/multinic.py @@ -16,6 +16,7 @@ """The multinic extension.""" from webob import exc +import webob from nova import compute from nova import log as logging @@ -103,7 +104,7 @@ class Multinic(extensions.ExtensionDescriptor): except Exception, e: LOG.exception(_("Error in addFixedIp %s"), e) return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + return webob.Response(status_int=202) def _remove_fixed_ip(self, input_dict, req, id): """Removes an IP from an instance.""" @@ -122,4 +123,4 @@ class Multinic(extensions.ExtensionDescriptor): except Exception, e: LOG.exception(_("Error in removeFixedIp %s"), e) return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + return webob.Response(status_int=202) diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index e5e2c5b50..827e36097 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -16,6 +16,7 @@ """The volumes extension.""" from webob import exc +import webob from nova import compute from nova import exception @@ -104,7 +105,7 @@ class VolumeController(object): self.volume_api.delete(context, volume_id=id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + return webob.Response(status_int=202) def index(self, req): """Returns a summary list of volumes.""" @@ -279,7 +280,7 @@ class VolumeAttachmentController(object): self.compute_api.detach_volume(context, volume_id=volume_id) - return exc.HTTPAccepted() + return webob.Response(status_int=202) def _items(self, req, server_id, entity_maker): """Returns a list of attachments, transformed through entity_maker.""" diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 2654e3c40..f8317565e 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -28,7 +28,6 @@ from nova import quota from nova import utils from nova.compute import instance_types -from nova.api.openstack import faults from nova.api.openstack import wsgi from nova.auth import manager as auth_manager @@ -70,11 +69,14 @@ class CreateInstanceHelper(object): return type from this method is left to the caller. """ if not body: - raise faults.Fault(exc.HTTPUnprocessableEntity()) + raise exc.HTTPUnprocessableEntity() - context = req.environ['nova.context'] + if not 'server' in body: + raise exc.HTTPUnprocessableEntity() - password = self.controller._get_server_admin_password(body['server']) + server_dict = body['server'] + context = req.environ['nova.context'] + password = self.controller._get_server_admin_password(server_dict) key_name = None key_data = None @@ -94,28 +96,32 @@ class CreateInstanceHelper(object): except Exception, e: msg = _("Cannot find requested image %(image_href)s: %(e)s" % locals()) - raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + raise exc.HTTPBadRequest(explanation=msg) - personality = body['server'].get('personality') + personality = server_dict.get('personality') injected_files = [] if personality: injected_files = self._get_injected_files(personality) - flavor_id = self.controller._flavor_id_from_req_data(body) + try: + flavor_id = self.controller._flavor_id_from_req_data(body) + except ValueError as error: + msg = _("Invalid flavorRef provided.") + raise exc.HTTPBadRequest(explanation=msg) - if not 'name' in body['server']: + if not 'name' in server_dict: msg = _("Server name is not defined") raise exc.HTTPBadRequest(explanation=msg) - zone_blob = body['server'].get('blob') - name = body['server']['name'] + zone_blob = server_dict.get('blob') + name = server_dict['name'] self._validate_server_name(name) name = name.strip() - reservation_id = body['server'].get('reservation_id') - min_count = body['server'].get('min_count') - max_count = body['server'].get('max_count') + reservation_id = server_dict.get('reservation_id') + min_count = server_dict.get('min_count') + max_count = server_dict.get('max_count') # min_count and max_count are optional. If they exist, they come # in as strings. We want to default 'min_count' to 1, and default # 'max_count' to be 'min_count'. @@ -142,7 +148,7 @@ class CreateInstanceHelper(object): display_description=name, key_name=key_name, key_data=key_data, - metadata=body['server'].get('metadata', {}), + metadata=server_dict.get('metadata', {}), injected_files=injected_files, admin_password=password, zone_blob=zone_blob, @@ -153,8 +159,10 @@ class CreateInstanceHelper(object): self._handle_quota_error(error) except exception.ImageNotFound as error: msg = _("Can not find requested image") - raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) - + raise exc.HTTPBadRequest(explanation=msg) + except exception.FlavorNotFound as error: + msg = _("Invalid flavorRef provided.") + raise exc.HTTPBadRequest(explanation=msg) # Let the caller deal with unhandled exceptions. def _handle_quota_error(self, error): @@ -277,7 +285,7 @@ class CreateInstanceHelper(object): return password -class ServerXMLDeserializer(wsgi.XMLDeserializer): +class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted server create requests. @@ -294,11 +302,12 @@ class ServerXMLDeserializer(wsgi.XMLDeserializer): def _extract_server(self, node): """Marshal the server attribute of a parsed request""" server = {} - server_node = self._find_first_child_named(node, 'server') + server_node = self.find_first_child_named(node, 'server') for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]: if server_node.getAttribute(attr): server[attr] = server_node.getAttribute(attr) - metadata = self._extract_metadata(server_node) + metadata_node = self.find_first_child_named(server_node, "metadata") + metadata = self.extract_metadata(metadata_node) if metadata is not None: server["metadata"] = metadata personality = self._extract_personality(server_node) @@ -306,49 +315,17 @@ class ServerXMLDeserializer(wsgi.XMLDeserializer): server["personality"] = personality return server - def _extract_metadata(self, server_node): - """Marshal the metadata attribute of a parsed request""" - metadata_node = self._find_first_child_named(server_node, "metadata") - if metadata_node is None: - return None - metadata = {} - for meta_node in self._find_children_named(metadata_node, "meta"): - key = meta_node.getAttribute("key") - metadata[key] = self._extract_text(meta_node) - return metadata - def _extract_personality(self, server_node): """Marshal the personality attribute of a parsed request""" personality_node = \ - self._find_first_child_named(server_node, "personality") + self.find_first_child_named(server_node, "personality") if personality_node is None: return None personality = [] - for file_node in self._find_children_named(personality_node, "file"): + for file_node in self.find_children_named(personality_node, "file"): item = {} if file_node.hasAttribute("path"): item["path"] = file_node.getAttribute("path") - item["contents"] = self._extract_text(file_node) + item["contents"] = self.extract_text(file_node) personality.append(item) return personality - - def _find_first_child_named(self, parent, name): - """Search a nodes children for the first child with a given name""" - for node in parent.childNodes: - if node.nodeName == name: - return node - return None - - def _find_children_named(self, parent, name): - """Return all of a nodes children who have the given name""" - for node in parent.childNodes: - if node.nodeName == name: - yield node - - def _extract_text(self, node): - """Get the text field contained by the given node""" - if len(node.childNodes) == 1: - child = node.childNodes[0] - if child.nodeType == child.TEXT_NODE: - return child.nodeValue - return "" diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index da06ecd15..cc889703e 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -23,6 +23,7 @@ import sys import routes import webob.dec import webob.exc +from xml.etree import ElementTree from nova import exception from nova import flags @@ -194,7 +195,7 @@ class ExtensionsResource(wsgi.Resource): def show(self, req, id): # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] - return self._translate(ext) + return dict(extension=self._translate(ext)) def delete(self, req, id): raise faults.Fault(webob.exc.HTTPNotFound()) @@ -258,15 +259,18 @@ class ExtensionMiddleware(base_wsgi.Middleware): mapper = routes.Mapper() + serializer = wsgi.ResponseSerializer( + {'application/xml': ExtensionsXMLSerializer()}) # extended resources for resource in ext_mgr.get_resources(): LOG.debug(_('Extended resource: %s'), resource.collection) mapper.resource(resource.collection, resource.collection, - controller=wsgi.Resource(resource.controller), - collection=resource.collection_actions, - member=resource.member_actions, - parent_resource=resource.parent) + controller=wsgi.Resource( + resource.controller, serializer=serializer), + collection=resource.collection_actions, + member=resource.member_actions, + parent_resource=resource.parent) # extended actions action_resources = self._action_ext_resources(application, ext_mgr, @@ -462,3 +466,40 @@ class ResourceExtension(object): self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions + + +class ExtensionsXMLSerializer(wsgi.XMLDictSerializer): + + def show(self, ext_dict): + ext = self._create_ext_elem(ext_dict['extension']) + return self._to_xml(ext) + + def index(self, exts_dict): + exts = ElementTree.Element('extensions') + for ext_dict in exts_dict['extensions']: + exts.append(self._create_ext_elem(ext_dict)) + return self._to_xml(exts) + + def _create_ext_elem(self, ext_dict): + """Create an extension xml element from a dict.""" + ext_elem = ElementTree.Element('extension') + ext_elem.set('name', ext_dict['name']) + ext_elem.set('namespace', ext_dict['namespace']) + ext_elem.set('alias', ext_dict['alias']) + ext_elem.set('updated', ext_dict['updated']) + desc = ElementTree.Element('description') + desc.text = ext_dict['description'] + ext_elem.append(desc) + for link in ext_dict.get('links', []): + elem = ElementTree.Element('atom:link') + elem.set('rel', link['rel']) + elem.set('href', link['href']) + elem.set('type', link['type']) + ext_elem.append(elem) + return ext_elem + + def _to_xml(self, root): + """Convert the xml tree object to an xml string.""" + root.set('xmlns', wsgi.XMLNS_V11) + root.set('xmlns:atom', wsgi.XMLNS_ATOM) + return ElementTree.tostring(root, encoding='UTF-8') diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index b9a23c126..1ab45d4f1 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -19,6 +19,7 @@ import webob.dec import webob.exc +from nova.api.openstack import common from nova.api.openstack import wsgi @@ -40,6 +41,7 @@ class Fault(webob.exc.HTTPException): def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception + self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): @@ -60,9 +62,13 @@ class Fault(webob.exc.HTTPException): content_type = req.best_match_content_type() + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10), + '1.1': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V11), + }[common.get_version_from_href(req.url)] + serializer = { - 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), + 'application/xml': xml_serializer, 'application/json': wsgi.JSONDictSerializer(), }[content_type] @@ -99,9 +105,13 @@ class OverLimitFault(webob.exc.HTTPException): content_type = request.best_match_content_type() metadata = {"attributes": {"overLimitFault": "code"}} + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10), + '1.1': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V11), + }[common.get_version_from_href(request.url)] + serializer = { - 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, - xmlns=wsgi.XMLNS_V10), + 'application/xml': xml_serializer, 'application/json': wsgi.JSONDictSerializer(), }[content_type] diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 6fab13147..b4bda68d4 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -16,6 +16,7 @@ # under the License. import webob +import xml.dom.minidom as minidom from nova import db from nova import exception @@ -74,19 +75,65 @@ class ControllerV11(Controller): return views.flavors.ViewBuilderV11(base_url) +class FlavorXMLSerializer(wsgi.XMLDictSerializer): + + def __init__(self): + super(FlavorXMLSerializer, self).__init__(xmlns=wsgi.XMLNS_V11) + + def _flavor_to_xml(self, xml_doc, flavor, detailed): + flavor_node = xml_doc.createElement('flavor') + flavor_node.setAttribute('id', str(flavor['id'])) + flavor_node.setAttribute('name', flavor['name']) + + if detailed: + flavor_node.setAttribute('ram', str(flavor['ram'])) + flavor_node.setAttribute('disk', str(flavor['disk'])) + + link_nodes = self._create_link_nodes(xml_doc, flavor['links']) + for link_node in link_nodes: + flavor_node.appendChild(link_node) + return flavor_node + + def _flavors_list_to_xml(self, xml_doc, flavors, detailed): + container_node = xml_doc.createElement('flavors') + + for flavor in flavors: + item_node = self._flavor_to_xml(xml_doc, flavor, detailed) + container_node.appendChild(item_node) + return container_node + + def show(self, flavor_container): + xml_doc = minidom.Document() + flavor = flavor_container['flavor'] + node = self._flavor_to_xml(xml_doc, flavor, True) + return self.to_xml_string(node, True) + + def detail(self, flavors_container): + xml_doc = minidom.Document() + flavors = flavors_container['flavors'] + node = self._flavors_list_to_xml(xml_doc, flavors, True) + return self.to_xml_string(node, True) + + def index(self, flavors_container): + xml_doc = minidom.Document() + flavors = flavors_container['flavors'] + node = self._flavors_list_to_xml(xml_doc, flavors, False) + return self.to_xml_string(node, True) + + def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, }[version]() - xmlns = { - '1.0': wsgi.XMLNS_V10, - '1.1': wsgi.XMLNS_V11, + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10), + '1.1': FlavorXMLSerializer(), }[version] body_serializers = { - 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns), + 'application/xml': xml_serializer, } serializer = wsgi.ResponseSerializer(body_serializers) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 4f33844fa..ee181c924 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -22,7 +22,6 @@ from nova import flags from nova import image from nova import quota from nova import utils -from nova.api.openstack import faults from nova.api.openstack import wsgi @@ -62,7 +61,7 @@ class Controller(object): if id in metadata: return {'meta': {id: metadata[id]}} else: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() def create(self, req, image_id, body): context = req.environ['nova.context'] @@ -97,20 +96,54 @@ class Controller(object): self._check_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) + return dict(meta=meta) - return req.body + def update_all(self, req, image_id, body): + context = req.environ['nova.context'] + img = self.image_service.show(context, image_id) + metadata = body.get('metadata', {}) + self._check_quota_limit(context, metadata) + img['properties'] = metadata + self.image_service.update(context, image_id, img, None) + return dict(metadata=metadata) def delete(self, req, image_id, id): context = req.environ['nova.context'] img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id) if not id in metadata: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() metadata.pop(id) img['properties'] = metadata self.image_service.update(context, image_id, img, None) +class ImageMetadataXMLDeserializer(wsgi.MetadataXMLDeserializer): + + def _extract_metadata_container(self, datastring): + dom = minidom.parseString(datastring) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + def create(self, datastring): + return self._extract_metadata_container(datastring) + + def update_all(self, datastring): + return self._extract_metadata_container(datastring) + + def update(self, datastring): + dom = minidom.parseString(datastring) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +class HeadersSerializer(wsgi.ResponseHeadersSerializer): + + def delete(self, response, data): + response.status_int = 204 + + class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): def __init__(self, xmlns=wsgi.XMLNS_V11): super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns) @@ -144,6 +177,9 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): def create(self, metadata_dict): return self._meta_list_to_xml_string(metadata_dict) + def update_all(self, metadata_dict): + return self._meta_list_to_xml_string(metadata_dict) + def _meta_item_to_xml_string(self, meta_item_dict): xml_doc = minidom.Document() item_key, item_value = meta_item_dict.items()[0] @@ -158,11 +194,21 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): def update(self, meta_item_dict): return self._meta_item_to_xml_string(meta_item_dict['meta']) + def default(self, *args, **kwargs): + return '' + def create_resource(): + headers_serializer = HeadersSerializer() + + body_deserializers = { + 'application/xml': ImageMetadataXMLDeserializer(), + } + body_serializers = { 'application/xml': ImageMetadataXMLSerializer(), } - serializer = wsgi.ResponseSerializer(body_serializers) + serializer = wsgi.ResponseSerializer(body_serializers, headers_serializer) + deserializer = wsgi.RequestDeserializer(body_deserializers) - return wsgi.Resource(Controller(), serializer=serializer) + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index d0317583e..30e4fd389 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -25,7 +25,6 @@ from nova import flags import nova.image from nova import log from nova.api.openstack import common -from nova.api.openstack import faults from nova.api.openstack import image_metadata from nova.api.openstack import servers from nova.api.openstack.views import images as images_view @@ -35,7 +34,13 @@ from nova.api.openstack import wsgi LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS -SUPPORTED_FILTERS = ['name', 'status'] +SUPPORTED_FILTERS = { + 'name': 'name', + 'status': 'status', + 'changes-since': 'changes-since', + 'server': 'property-instance_ref', + 'type': 'property-image_type', +} class Controller(object): @@ -62,8 +67,9 @@ class Controller(object): filters = {} for param in req.str_params: if param in SUPPORTED_FILTERS or param.startswith('property-'): - filters[param] = req.str_params.get(param) - + # map filter name or carry through if property-* + filter_name = SUPPORTED_FILTERS.get(param, param) + filters[filter_name] = req.str_params.get(param) return filters def show(self, req, id): @@ -78,7 +84,7 @@ class Controller(object): image = self._image_service.show(context, id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") - raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) + raise webob.exc.HTTPNotFound(explanation=explanation) return dict(image=self.get_builder(req).build(image, detail=True)) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 1ebfdb831..a74fae487 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -16,11 +16,11 @@ # under the License. import time +from xml.dom import minidom from webob import exc import nova -from nova.api.openstack import faults import nova.api.openstack.views.addresses from nova.api.openstack import wsgi from nova import db @@ -37,14 +37,14 @@ class Controller(object): instance = self.compute_api.get( req.environ['nova.context'], server_id) except nova.exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() return instance def create(self, req, server_id, body): - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def delete(self, req, server_id, id): - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() class ControllerV10(Controller): @@ -63,7 +63,7 @@ class ControllerV10(Controller): view = builder.build_public_parts(instance) else: msg = _("Only private and public networks available") - return faults.Fault(exc.HTTPNotFound(explanation=msg)) + raise exc.HTTPNotFound(explanation=msg) return {id: view} @@ -86,7 +86,7 @@ class ControllerV11(Controller): if network is None: msg = _("Instance is not a member of specified network") - return faults.Fault(exc.HTTPNotFound(explanation=msg)) + raise exc.HTTPNotFound(explanation=msg) return network @@ -101,17 +101,51 @@ class ControllerV11(Controller): return nova.api.openstack.views.addresses.ViewBuilderV11() +class IPXMLSerializer(wsgi.XMLDictSerializer): + def __init__(self, xmlns=wsgi.XMLNS_V11): + super(IPXMLSerializer, self).__init__(xmlns=xmlns) + + def _ip_to_xml(self, xml_doc, ip_dict): + ip_node = xml_doc.createElement('ip') + ip_node.setAttribute('addr', ip_dict['addr']) + ip_node.setAttribute('version', str(ip_dict['version'])) + return ip_node + + def _network_to_xml(self, xml_doc, network_id, ip_dicts): + network_node = xml_doc.createElement('network') + network_node.setAttribute('id', network_id) + + for ip_dict in ip_dicts: + ip_node = self._ip_to_xml(xml_doc, ip_dict) + network_node.appendChild(ip_node) + + return network_node + + def networks_to_xml(self, xml_doc, networks_container): + addresses_node = xml_doc.createElement('addresses') + for (network_id, ip_dicts) in networks_container.items(): + network_node = self._network_to_xml(xml_doc, network_id, ip_dicts) + addresses_node.appendChild(network_node) + return addresses_node + + def show(self, network_container): + (network_id, ip_dicts) = network_container.items()[0] + xml_doc = minidom.Document() + node = self._network_to_xml(xml_doc, network_id, ip_dicts) + return self.to_xml_string(node, False) + + def index(self, addresses_container): + xml_doc = minidom.Document() + node = self.networks_to_xml(xml_doc, addresses_container['addresses']) + return self.to_xml_string(node, False) + + def create_resource(version): controller = { '1.0': ControllerV10, '1.1': ControllerV11, }[version]() - xmlns = { - '1.0': wsgi.XMLNS_V10, - '1.1': wsgi.XMLNS_V11, - }[version] - metadata = { 'list_collections': { 'public': {'item_name': 'ip', 'item_key': 'addr'}, @@ -119,10 +153,11 @@ def create_resource(version): }, } - body_serializers = { - 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, - xmlns=xmlns), - } - serializer = wsgi.ResponseSerializer(body_serializers) + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(metadata=metadata, xmlns=wsgi.XMLNS_V11), + '1.1': IPXMLSerializer(), + }[version] + + serializer = wsgi.ResponseSerializer({'application/xml': xml_serializer}) return wsgi.Resource(controller, serializer=serializer) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index bc76547d8..86afa3b62 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -25,6 +25,7 @@ import re import time import urllib import webob.exc +from xml.dom import minidom from collections import defaultdict @@ -76,6 +77,58 @@ class LimitsControllerV11(LimitsController): return limits_views.ViewBuilderV11() +class LimitsXMLSerializer(wsgi.XMLDictSerializer): + + xmlns = wsgi.XMLNS_V11 + + def __init__(self): + pass + + def _create_rates_node(self, xml_doc, rates): + rates_node = xml_doc.createElement('rates') + for rate in rates: + rate_node = xml_doc.createElement('rate') + rate_node.setAttribute('uri', rate['uri']) + rate_node.setAttribute('regex', rate['regex']) + + for limit in rate['limit']: + limit_node = xml_doc.createElement('limit') + limit_node.setAttribute('value', str(limit['value'])) + limit_node.setAttribute('verb', limit['verb']) + limit_node.setAttribute('remaining', str(limit['remaining'])) + limit_node.setAttribute('unit', limit['unit']) + limit_node.setAttribute('next-available', + str(limit['next-available'])) + rate_node.appendChild(limit_node) + + rates_node.appendChild(rate_node) + return rates_node + + def _create_absolute_node(self, xml_doc, absolutes): + absolute_node = xml_doc.createElement('absolute') + for key, value in absolutes.iteritems(): + limit_node = xml_doc.createElement('limit') + limit_node.setAttribute('name', key) + limit_node.setAttribute('value', str(value)) + absolute_node.appendChild(limit_node) + return absolute_node + + def _limits_to_xml(self, xml_doc, limits): + limits_node = xml_doc.createElement('limits') + rates_node = self._create_rates_node(xml_doc, limits['rate']) + limits_node.appendChild(rates_node) + + absolute_node = self._create_absolute_node(xml_doc, limits['absolute']) + limits_node.appendChild(absolute_node) + + return limits_node + + def index(self, limits_dict): + xml_doc = minidom.Document() + node = self._limits_to_xml(xml_doc, limits_dict['limits']) + return self.to_xml_string(node, False) + + def create_resource(version='1.0'): controller = { '1.0': LimitsControllerV10, @@ -97,9 +150,13 @@ def create_resource(version='1.0'): }, } + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(xmlns=xmlns, metadata=metadata), + '1.1': LimitsXMLSerializer(), + }[version] + body_serializers = { - 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, - metadata=metadata), + 'application/xml': xml_serializer, } serializer = wsgi.ResponseSerializer(body_serializers) diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index 3b9169f81..d4f42bbf5 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -18,7 +18,6 @@ from webob import exc from nova import compute -from nova.api.openstack import faults from nova.api.openstack import wsgi from nova import exception from nova import quota diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 131937422..d7cabb067 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -17,6 +17,7 @@ import base64 import traceback from webob import exc +import webob from nova import compute from nova import db @@ -26,7 +27,6 @@ from nova import log as logging from nova import utils from nova.api.openstack import common from nova.api.openstack import create_instance_helper as helper -from nova.api.openstack import faults import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images @@ -101,17 +101,14 @@ class Controller(object): req.environ['nova.context'], id) return self._build_view(req, instance, is_detail=True) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() def create(self, req, body): """ Creates a new server for a given user """ extra_values = None result = None - try: - extra_values, instances = self.helper.create_instance( - req, body, self.compute_api.create) - except faults.Fault, f: - return f + extra_values, instances = self.helper.create_instance( + req, body, self.compute_api.create) # We can only return 1 instance via the API, if we happen to # build more than one... instances is a list, so we'll just @@ -131,7 +128,7 @@ class Controller(object): raise exc.HTTPUnprocessableEntity() if not body: - return faults.Fault(exc.HTTPUnprocessableEntity()) + raise exc.HTTPUnprocessableEntity() ctxt = req.environ['nova.context'] update_dict = {} @@ -146,7 +143,7 @@ class Controller(object): try: self.compute_api.update(ctxt, id, **update_dict) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() return exc.HTTPNoContent() @@ -170,7 +167,7 @@ class Controller(object): for key in actions.keys(): if key in body: return actions[key](body, req, id) - return faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def _action_change_password(self, input_dict, req, id): return exc.HTTPNotImplemented() @@ -180,7 +177,7 @@ class Controller(object): self.compute_api.confirm_resize(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in confirm-resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) + raise exc.HTTPBadRequest() return exc.HTTPNoContent() def _action_revert_resize(self, input_dict, req, id): @@ -188,8 +185,8 @@ class Controller(object): self.compute_api.revert_resize(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in revert-resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + raise exc.HTTPBadRequest() + return webob.Response(status_int=202) def _action_resize(self, input_dict, req, id): return exc.HTTPNotImplemented() @@ -199,23 +196,23 @@ class Controller(object): reboot_type = input_dict['reboot']['type'] else: LOG.exception(_("Missing argument 'type' for reboot")) - return faults.Fault(exc.HTTPUnprocessableEntity()) + raise exc.HTTPUnprocessableEntity() try: # TODO(gundlach): pass reboot_type, support soft reboot in # virt driver self.compute_api.reboot(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in reboot %s"), e) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) def _action_migrate(self, input_dict, req, id): try: self.compute_api.resize(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in migrate %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + raise exc.HTTPBadRequest() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def lock(self, req, id): @@ -230,8 +227,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::lock %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def unlock(self, req, id): @@ -246,8 +243,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::unlock %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def get_lock(self, req, id): @@ -261,8 +258,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::get_lock %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def reset_network(self, req, id, body): @@ -276,8 +273,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::reset_network %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def inject_network_info(self, req, id, body): @@ -291,8 +288,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::inject_network_info %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def pause(self, req, id, body): @@ -303,8 +300,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::pause %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def unpause(self, req, id, body): @@ -315,8 +312,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("Compute.api::unpause %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def suspend(self, req, id, body): @@ -327,8 +324,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("compute.api::suspend %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def resume(self, req, id, body): @@ -339,8 +336,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("compute.api::resume %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def rescue(self, req, id): @@ -351,8 +348,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("compute.api::rescue %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def unrescue(self, req, id): @@ -363,8 +360,8 @@ class Controller(object): except: readable = traceback.format_exc() LOG.exception(_("compute.api::unrescue %s"), readable) - return faults.Fault(exc.HTTPUnprocessableEntity()) - return exc.HTTPAccepted() + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def get_ajax_console(self, req, id): @@ -373,8 +370,8 @@ class Controller(object): self.compute_api.get_ajax_console(req.environ['nova.context'], int(id)) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + raise exc.HTTPNotFound() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def get_vnc_console(self, req, id): @@ -383,8 +380,8 @@ class Controller(object): self.compute_api.get_vnc_console(req.environ['nova.context'], int(id)) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + raise exc.HTTPNotFound() + return webob.Response(status_int=202) @scheduler_api.redirect_handler def diagnostics(self, req, id): @@ -415,8 +412,8 @@ class ControllerV10(Controller): try: self.compute_api.delete(req.environ['nova.context'], id) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + raise exc.HTTPNotFound() + return webob.Response(status_int=202) def _image_ref_from_req_data(self, data): return data['server']['imageId'] @@ -439,18 +436,14 @@ class ControllerV10(Controller): def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ - try: - if 'resize' in input_dict and 'flavorId' in input_dict['resize']: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing 'flavorId' argument for resize")) - return faults.Fault(exc.HTTPUnprocessableEntity()) - except Exception, e: - LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing 'flavorId' argument for resize")) + raise exc.HTTPUnprocessableEntity() + return webob.Response(status_int=202) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] @@ -460,18 +453,16 @@ class ControllerV10(Controller): except (KeyError, TypeError): msg = _("Could not parse imageId from request.") LOG.debug(msg) - return faults.Fault(exc.HTTPBadRequest(explanation=msg)) + raise exc.HTTPBadRequest(explanation=msg) try: self.compute_api.rebuild(context, instance_id, image_id) except exception.BuildInProgress: msg = _("Instance %s is currently being rebuilt.") % instance_id LOG.debug(msg) - return faults.Fault(exc.HTTPConflict(explanation=msg)) + raise exc.HTTPConflict(explanation=msg) - response = exc.HTTPAccepted() - response.empty_body = True - return response + return webob.Response(status_int=202) def _get_server_admin_password(self, server): """ Determine the admin password for a server on creation """ @@ -486,7 +477,7 @@ class ControllerV11(Controller): try: self.compute_api.delete(req.environ['nova.context'], id) except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() def _image_ref_from_req_data(self, data): return data['server']['imageRef'] @@ -518,7 +509,7 @@ class ControllerV11(Controller): msg = _("Invalid adminPass") return exc.HTTPBadRequest(explanation=msg) self.compute_api.set_admin_password(context, id, password) - return exc.HTTPAccepted() + return webob.Response(status_int=202) def _limit_items(self, items, req): return common.limited_by_marker(items, req) @@ -530,7 +521,7 @@ class ControllerV11(Controller): except AttributeError as ex: msg = _("Unable to parse metadata key/value pairs.") LOG.debug(msg) - raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + raise exc.HTTPBadRequest(explanation=msg) def _decode_personalities(self, personalities): """Decode the Base64-encoded personalities.""" @@ -541,14 +532,14 @@ class ControllerV11(Controller): except (KeyError, TypeError): msg = _("Unable to parse personality path/contents.") LOG.info(msg) - raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + raise exc.HTTPBadRequest(explanation=msg) try: personality["contents"] = base64.b64decode(contents) except TypeError: msg = _("Personality content could not be Base64 decoded.") LOG.info(msg) - raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + raise exc.HTTPBadRequest(explanation=msg) def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ @@ -560,11 +551,11 @@ class ControllerV11(Controller): flavor_id) else: LOG.exception(_("Missing 'flavorRef' argument for resize")) - return faults.Fault(exc.HTTPUnprocessableEntity()) + raise exc.HTTPUnprocessableEntity() except Exception, e: LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + raise exc.HTTPBadRequest() + return webob.Response(status_int=202) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] @@ -574,7 +565,7 @@ class ControllerV11(Controller): except (KeyError, TypeError): msg = _("Could not parse imageRef from request.") LOG.debug(msg) - return faults.Fault(exc.HTTPBadRequest(explanation=msg)) + raise exc.HTTPBadRequest(explanation=msg) personalities = info["rebuild"].get("personality", []) metadata = info["rebuild"].get("metadata") @@ -590,11 +581,9 @@ class ControllerV11(Controller): except exception.BuildInProgress: msg = _("Instance %s is currently being rebuilt.") % instance_id LOG.debug(msg) - return faults.Fault(exc.HTTPConflict(explanation=msg)) + raise exc.HTTPConflict(explanation=msg) - response = exc.HTTPAccepted() - response.empty_body = True - return response + return webob.Response(status_int=202) def get_default_xmlns(self, req): return common.XML_NS_V11 diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index cf2ddbabb..54d0a8334 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -17,7 +17,6 @@ from webob import exc -from nova.api.openstack import faults from nova.api.openstack import wsgi @@ -26,27 +25,27 @@ class Controller(object): def index(self, req, **kwargs): """ Returns a list of Shared IP Groups for the user """ - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def show(self, req, id, **kwargs): """ Shows in-depth information on a specific Shared IP Group """ - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def update(self, req, id, **kwargs): """ You can't update a Shared IP Group """ - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def delete(self, req, id, **kwargs): """ Deletes a Shared IP Group """ - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def detail(self, req, **kwargs): """ Returns a complete list of Shared IP Groups """ - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def create(self, req, **kwargs): """ Creates a new Shared IP group """ - raise faults.Fault(exc.HTTPNotImplemented()) + raise exc.HTTPNotImplemented() def create_resource(): diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 6ae1eaf2a..8dd72d559 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -19,7 +19,6 @@ from nova import exception from nova import flags from nova import log as logging from nova.api.openstack import common -from nova.api.openstack import faults from nova.api.openstack import wsgi from nova.auth import manager @@ -69,7 +68,7 @@ class Controller(object): user = None if user is None: - raise faults.Fault(exc.HTTPNotFound()) + raise exc.HTTPNotFound() return dict(user=_translate_keys(user)) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index a634c3267..df7a94b7e 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -15,13 +15,18 @@ # License for the specific language governing permissions and limitations # under the License. +from datetime import datetime import webob import webob.dec +from xml.dom import minidom import nova.api.openstack.views.versions from nova.api.openstack import wsgi +ATOM_XMLNS = "http://www.w3.org/2005/Atom" + + class Versions(wsgi.Resource): def __init__(self): metadata = { @@ -32,11 +37,19 @@ class Versions(wsgi.Resource): } body_serializers = { - 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), + 'application/atom+xml': VersionsAtomSerializer(metadata=metadata), + 'application/xml': VersionsXMLSerializer(metadata=metadata), } serializer = wsgi.ResponseSerializer(body_serializers) - wsgi.Resource.__init__(self, None, serializer=serializer) + supported_content_types = ('application/json', + 'application/xml', + 'application/atom+xml') + deserializer = wsgi.RequestDeserializer( + supported_content_types=supported_content_types) + + wsgi.Resource.__init__(self, None, serializer=serializer, + deserializer=deserializer) def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" @@ -44,13 +57,143 @@ class Versions(wsgi.Resource): { "id": "v1.1", "status": "CURRENT", + #TODO(wwolf) get correct value for these + "updated": "2011-07-18T11:30:00Z", }, { "id": "v1.0", "status": "DEPRECATED", + #TODO(wwolf) get correct value for these + "updated": "2010-10-09T11:30:00Z", }, ] builder = nova.api.openstack.views.versions.get_view_builder(request) versions = [builder.build(version) for version in version_objs] return dict(versions=versions) + + +class VersionsXMLSerializer(wsgi.XMLDictSerializer): + def _versions_to_xml(self, versions): + root = self._xml_doc.createElement('versions') + + for version in versions: + root.appendChild(self._create_version_node(version)) + + return root + + def _create_version_node(self, version): + version_node = self._xml_doc.createElement('version') + version_node.setAttribute('id', version['id']) + version_node.setAttribute('status', version['status']) + version_node.setAttribute('updated', version['updated']) + + for link in version['links']: + link_node = self._xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + version_node.appendChild(link_node) + + return version_node + + def default(self, data): + self._xml_doc = minidom.Document() + node = self._versions_to_xml(data['versions']) + + return self.to_xml_string(node) + + +class VersionsAtomSerializer(wsgi.XMLDictSerializer): + def __init__(self, metadata=None, xmlns=None): + if not xmlns: + self.xmlns = ATOM_XMLNS + else: + self.xmlns = xmlns + + def _create_text_elem(self, name, text, type=None): + elem = self._xml_doc.createElement(name) + if type: + elem.setAttribute('type', type) + elem_text = self._xml_doc.createTextNode(text) + elem.appendChild(elem_text) + return elem + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = datetime.strptime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_meta(self, root, versions): + title = self._create_text_elem('title', 'Available API Versions', + type='text') + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + updated = self._create_text_elem('updated', recent) + + base_url = self._get_base_url(versions[0]['links'][0]['href']) + id = self._create_text_elem('id', base_url) + link = self._xml_doc.createElement('link') + link.setAttribute('rel', 'self') + link.setAttribute('href', base_url) + + author = self._xml_doc.createElement('author') + author_name = self._create_text_elem('name', 'Rackspace') + author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') + author.appendChild(author_name) + author.appendChild(author_uri) + + root.appendChild(title) + root.appendChild(updated) + root.appendChild(id) + root.appendChild(author) + root.appendChild(link) + + def _create_version_entries(self, root, versions): + for version in versions: + entry = self._xml_doc.createElement('entry') + + id = self._create_text_elem('id', version['links'][0]['href']) + title = self._create_text_elem('title', + 'Version %s' % version['id'], + type='text') + updated = self._create_text_elem('updated', version['updated']) + + entry.appendChild(id) + entry.appendChild(title) + entry.appendChild(updated) + + for link in version['links']: + link_node = self._xml_doc.createElement('link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + entry.appendChild(link_node) + + content = self._create_text_elem('content', + 'Version %s %s (%s)' % + (version['id'], + version['status'], + version['updated']), + type='text') + + entry.appendChild(content) + root.appendChild(entry) + + def default(self, data): + self._xml_doc = minidom.Document() + node = self._xml_doc.createElementNS(self.xmlns, 'feed') + self._create_meta(node, data['versions']) + self._create_version_entries(node, data['versions']) + + return self.to_xml_string(node) diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index a242efa45..ddbf7a144 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -15,9 +15,12 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import flags from nova import utils from nova.api.openstack import common +FLAGS = flags.FLAGS + class ViewBuilder(object): """Models a server addresses response as a python dictionary.""" @@ -50,22 +53,37 @@ class ViewBuilderV11(ViewBuilder): if network_label not in networks: networks[network_label] = [] - networks[network_label].extend(self._extract_ipv4(interface)) + ip_addresses = list(self._extract_ipv4_addresses(interface)) + + if FLAGS.use_ipv6: + ipv6_address = self._extract_ipv6_address(interface) + if ipv6_address is not None: + ip_addresses.append(ipv6_address) + + networks[network_label].extend(ip_addresses) return networks def build_network(self, interfaces, network_label): for interface in interfaces: if interface['network']['label'] == network_label: - ips = self._extract_ipv4(interface) - return {network_label: list(ips)} + ips = list(self._extract_ipv4_addresses(interface)) + ipv6 = self._extract_ipv6_address(interface) + if ipv6 is not None: + ips.append(ipv6) + return {network_label: ips} return None - def _extract_ipv4(self, interface): + def _extract_ipv4_addresses(self, interface): for fixed_ip in interface['fixed_ips']: yield self._build_ip_entity(fixed_ip['address'], 4) for floating_ip in fixed_ip.get('floating_ips', []): yield self._build_ip_entity(floating_ip['address'], 4) + def _extract_ipv6_address(self, interface): + fixed_ipv6 = interface.get('fixed_ipv6') + if fixed_ipv6 is not None: + return self._build_ip_entity(fixed_ipv6, 6) + def _build_ip_entity(self, address, version): return {'addr': address, 'version': version} diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index 934b4921a..f603d7cb4 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -15,9 +15,11 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import time from nova.api.openstack import common +from nova import utils class ViewBuilder(object): @@ -113,10 +115,12 @@ class ViewBuilderV11(ViewBuilder): return limits def _build_rate_limit(self, rate_limit): + next_avail = \ + datetime.datetime.utcfromtimestamp(rate_limit["resetTime"]) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], - "next-available": rate_limit["resetTime"], + "next-available": utils.isotime(at=next_avail), } diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ab7e8da61..be25e1e40 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -50,7 +50,7 @@ class ViewBuilder(object): else: server = self._build_simple(inst) - self._build_extra(server, inst) + self._build_extra(server['server'], inst) return server @@ -82,7 +82,7 @@ class ViewBuilder(object): ctxt = nova.context.get_admin_context() compute_api = nova.compute.API() - if compute_api.has_finished_migration(ctxt, inst['id']): + if compute_api.has_finished_migration(ctxt, inst['uuid']): inst_dict['status'] = 'RESIZE-CONFIRM' # Return the metadata as a dictionary @@ -99,7 +99,6 @@ class ViewBuilder(object): self._build_flavor(inst_dict, inst) self._build_addresses(inst_dict, inst) - inst_dict['uuid'] = inst['uuid'] return dict(server=inst_dict) def _build_addresses(self, response, inst): @@ -121,6 +120,9 @@ class ViewBuilder(object): class ViewBuilderV10(ViewBuilder): """Model an Openstack API V1.0 server response.""" + def _build_extra(self, response, inst): + response['uuid'] = inst['uuid'] + def _build_image(self, response, inst): if 'image_ref' in dict(inst): image_ref = inst['image_ref'] @@ -145,18 +147,46 @@ class ViewBuilderV11(ViewBuilder): self.image_builder = image_builder self.base_url = base_url + def _build_detail(self, inst): + response = super(ViewBuilderV11, self)._build_detail(inst) + response['server']['created'] = inst['created_at'] + response['server']['updated'] = inst['updated_at'] + if 'status' in response['server']: + if response['server']['status'] == "ACTIVE": + response['server']['progress'] = 100 + elif response['server']['status'] == "BUILD": + response['server']['progress'] = 0 + return response + def _build_image(self, response, inst): if 'image_ref' in dict(inst): image_href = inst['image_ref'] - if str(image_href).isdigit(): - image_href = int(image_href) - response['imageRef'] = image_href + image_id = str(common.get_id_from_href(image_href)) + _bookmark = self.image_builder.generate_bookmark(image_id) + response['image'] = { + "id": image_id, + "links": [ + { + "rel": "bookmark", + "href": _bookmark, + }, + ] + } def _build_flavor(self, response, inst): if "instance_type" in dict(inst): flavor_id = inst["instance_type"]['flavorid'] flavor_ref = self.flavor_builder.generate_href(flavor_id) - response["flavorRef"] = flavor_ref + flavor_bookmark = self.flavor_builder.generate_bookmark(flavor_id) + response["flavor"] = { + "id": str(common.get_id_from_href(flavor_ref)), + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ] + } def _build_addresses(self, response, inst): interfaces = inst.get('virtual_interfaces', []) @@ -164,6 +194,7 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) + response['uuid'] = inst['uuid'] def _build_links(self, response, inst): href = self.generate_href(inst["id"]) @@ -180,7 +211,7 @@ class ViewBuilderV11(ViewBuilder): }, ] - response["server"]["links"] = links + response["links"] = links def generate_href(self, server_id): """Create an url that refers to a specific server id.""" diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py index d0145c94a..9fa8f49dc 100644 --- a/nova/api/openstack/views/versions.py +++ b/nova/api/openstack/views/versions.py @@ -36,6 +36,7 @@ class ViewBuilder(object): version = { "id": version_data["id"], "status": version_data["status"], + "updated": version_data["updated"], "links": self._build_links(version_data), } @@ -56,4 +57,4 @@ class ViewBuilder(object): def generate_href(self, version_number): """Create an url that refers to a specific version_number.""" - return os.path.join(self.base_url, version_number) + return os.path.join(self.base_url, version_number) + '/' diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index c3f841aa5..a28443d12 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -13,6 +13,7 @@ from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') @@ -20,21 +21,22 @@ LOG = logging.getLogger('nova.api.openstack.wsgi') class Request(webob.Request): """Add some Openstack API-specific logic to the base webob.Request.""" - def best_match_content_type(self): + def best_match_content_type(self, supported_content_types=None): """Determine the requested response content-type. Based on the query extension then the Accept header. """ - supported = ('application/json', 'application/xml') + supported_content_types = supported_content_types or \ + ('application/json', 'application/xml') parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) - if ctype in supported: + if ctype in supported_content_types: return ctype - bm = self.accept.best_match(supported) + bm = self.accept.best_match(supported_content_types) # default to application/json if we don't find a preference return bm or 'application/json' @@ -134,10 +136,44 @@ class XMLDeserializer(TextDeserializer): listnames) return result + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" + def default(self, datastring): return {'body': self._from_xml(datastring)} +class MetadataXMLDeserializer(XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + if metadata_node is None: + return None + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer""" @@ -151,7 +187,12 @@ class RequestHeadersDeserializer(ActionDispatcher): class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" - def __init__(self, body_deserializers=None, headers_deserializer=None): + def __init__(self, body_deserializers=None, headers_deserializer=None, + supported_content_types=None): + + self.supported_content_types = supported_content_types or \ + ('application/json', 'application/xml') + self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), @@ -213,7 +254,7 @@ class RequestDeserializer(object): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): - return request.best_match_content_type() + return request.best_match_content_type(self.supported_content_types) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" @@ -390,8 +431,9 @@ class ResponseSerializer(object): def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type - serializer = self.get_body_serializer(content_type) - response.body = serializer.serialize(data, action) + if data is not None: + serializer = self.get_body_serializer(content_type) + response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: @@ -412,6 +454,7 @@ class Resource(wsgi.Application): serialized by requested content type. """ + def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib @@ -436,14 +479,17 @@ class Resource(wsgi.Application): action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") - return webob.exc.HTTPBadRequest(explanation=msg) + return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) - action_result = self.dispatch(request, action, args) + try: + action_result = self.dispatch(request, action, args) + except webob.exc.HTTPException as ex: + LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) + action_result = faults.Fault(ex) - #TODO(bcwaldon): find a more elegant way to pass through non-dict types if type(action_result) is dict or action_result is None: response = self.serializer.serialize(action_result, accept, diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 2e02ec380..f7fd87bcd 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -27,7 +27,6 @@ from nova.scheduler import api from nova.api.openstack import create_instance_helper as helper from nova.api.openstack import common -from nova.api.openstack import faults from nova.api.openstack import wsgi @@ -127,11 +126,8 @@ class Controller(object): Returns a reservation ID (a UUID). """ result = None - try: - extra_values, result = self.helper.create_instance(req, body, - self.compute_api.create_all_at_once) - except faults.Fault, f: - return f + extra_values, result = self.helper.create_instance(req, body, + self.compute_api.create_all_at_once) reservation_id = result return {'reservation_id': reservation_id} diff --git a/nova/compute/api.py b/nova/compute/api.py index b13bd5013..d1e5647d2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -127,7 +127,7 @@ class API(base.Base): quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id - msg = _("Quota exceeeded for %(pid)s, tried to set " + msg = _("Quota exceeded for %(pid)s, tried to set " "%(num_metadata)s metadata properties") % locals() LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") @@ -138,7 +138,7 @@ class API(base.Base): for k, v in metadata.iteritems(): if len(k) > 255 or len(v) > 255: pid = context.project_id - msg = _("Quota exceeeded for %(pid)s, metadata property " + msg = _("Quota exceeded for %(pid)s, metadata property " "key or value too long") % locals() LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") @@ -165,7 +165,7 @@ class API(base.Base): instance_type) if num_instances < min_count: pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s," + LOG.warn(_("Quota exceeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " @@ -467,10 +467,10 @@ class API(base.Base): return [dict(x.iteritems()) for x in instances] - def has_finished_migration(self, context, instance_id): + def has_finished_migration(self, context, instance_uuid): """Returns true if an instance has a finished migration.""" try: - db.migration_get_by_instance_and_status(context, instance_id, + db.migration_get_by_instance_and_status(context, instance_uuid, 'finished') return True except exception.NotFound: @@ -873,39 +873,50 @@ class API(base.Base): instance_id, params=rebuild_params) + @scheduler_api.reroute_compute("revert_resize") def revert_resize(self, context, instance_id): """Reverts a resize, deleting the 'new' instance in the process.""" context = context.elevated() + instance_ref = self._get_instance(context, instance_id, + 'revert_resize') migration_ref = self.db.migration_get_by_instance_and_status(context, - instance_id, 'finished') + instance_ref['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') params = {'migration_id': migration_ref['id']} - self._cast_compute_message('revert_resize', context, instance_id, - migration_ref['dest_compute'], params=params) + self._cast_compute_message('revert_resize', context, + instance_ref['uuid'], + migration_ref['source_compute'], + params=params) + self.db.migration_update(context, migration_ref['id'], {'status': 'reverted'}) + @scheduler_api.reroute_compute("confirm_resize") def confirm_resize(self, context, instance_id): """Confirms a migration/resize and deletes the 'old' instance.""" context = context.elevated() + instance_ref = self._get_instance(context, instance_id, + 'confirm_resize') migration_ref = self.db.migration_get_by_instance_and_status(context, - instance_id, 'finished') + instance_ref['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') - instance_ref = self.db.instance_get(context, instance_id) params = {'migration_id': migration_ref['id']} - self._cast_compute_message('confirm_resize', context, instance_id, - migration_ref['source_compute'], params=params) + self._cast_compute_message('confirm_resize', context, + instance_ref['uuid'], + migration_ref['dest_compute'], + params=params) self.db.migration_update(context, migration_ref['id'], {'status': 'confirmed'}) self.db.instance_update(context, instance_id, {'host': migration_ref['dest_compute'], }) + @scheduler_api.reroute_compute("resize") def resize(self, context, instance_id, flavor_id=None): """Resize (ie, migrate) a running instance. @@ -913,8 +924,8 @@ class API(base.Base): the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ - instance = self.db.instance_get(context, instance_id) - current_instance_type = instance['instance_type'] + instance_ref = self._get_instance(context, instance_id, 'resize') + current_instance_type = instance_ref['instance_type'] # If flavor_id is not provided, only migrate the instance. if not flavor_id: @@ -942,10 +953,11 @@ class API(base.Base): raise exception.ApiError(_("Invalid flavor: cannot use" "the same flavor. ")) + instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, {"method": "prep_resize", "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id, + "instance_id": instance_ref['uuid'], "flavor_id": new_instance_type['id']}}) @scheduler_api.reroute_compute("add_fixed_ip") diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 1d246e445..c13a629a9 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -112,7 +112,7 @@ def get_instance_type(id): return get_default_instance_type() try: ctxt = context.get_admin_context() - return db.instance_type_get_by_id(ctxt, id) + return db.instance_type_get(ctxt, id) except exception.DBError: raise exception.ApiError(_("Unknown instance type: %s") % id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 04609d7c5..c79abd696 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,8 +77,6 @@ flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") -flags.DEFINE_bool('auto_assign_floating_ip', False, - 'Autoassigning floating ip to VM') flags.DEFINE_integer('host_state_interval', 120, 'Interval in seconds for querying the host status') @@ -93,6 +91,10 @@ def checks_instance_lock(function): """Decorator to prevent action against locked instances for non-admins.""" @functools.wraps(function) def decorated_function(self, context, instance_id, *args, **kwargs): + #TODO(anyone): this being called instance_id is forcing a slightly + # confusing convention of pushing instance_uuids + # through an "instance_id" key in the queue args dict when + # casting through the compute API LOG.info(_("check_instance_lock: decorating: |%s|"), function, context=context) LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|" @@ -212,6 +214,15 @@ class ComputeManager(manager.SchedulerDependentManager): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() + def _get_instance_nw_info(self, context, instance): + """Get a list of dictionaries of network data of an instance. + Returns an empty list if stub_network flag is set.""" + network_info = [] + if not FLAGS.stub_network: + network_info = self.network_api.get_instance_nw_info(context, + instance) + return network_info + def _setup_block_device_mapping(self, context, instance_id): """setup volumes for block device mapping""" self.db.instance_set_state(context, @@ -274,16 +285,19 @@ class ComputeManager(manager.SchedulerDependentManager): """Launch a new instance with specified options.""" context = context.elevated() instance = self.db.instance_get(context, instance_id) - instance.injected_files = kwargs.get('injected_files', []) - instance.admin_pass = kwargs.get('admin_password', None) if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, context=context) - self.db.instance_update(context, - instance_id, - {'host': self.host, 'launched_on': self.host}) - + updates = {} + updates['host'] = self.host + updates['launched_on'] = self.host + # NOTE(vish): used by virt but not in database + updates['injected_files'] = kwargs.get('injected_files', []) + updates['admin_pass'] = kwargs.get('admin_password', None) + instance = self.db.instance_update(context, + instance_id, + updates) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -299,8 +313,6 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.allocate_for_instance(context, instance, vpn=is_vpn) LOG.debug(_("instance network_info: |%s|"), network_info) - self.network_manager.setup_compute_network(context, - instance_id) else: # TODO(tr3buchet) not really sure how this should be handled. # virt requires network_info to be passed in but stub_network @@ -354,6 +366,7 @@ class ComputeManager(manager.SchedulerDependentManager): {'action_str': action_str, 'instance_id': instance_id}, context=context) + network_info = self._get_instance_nw_info(context, instance) if not FLAGS.stub_network: self.network_api.deallocate_for_instance(context, instance) @@ -366,7 +379,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) - self.driver.destroy(instance) + self.driver.destroy(instance, network_info) if action_str == 'Terminating': terminate_volumes(self.db, context, instance_id) @@ -411,7 +424,9 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) - self.driver.destroy(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + + self.driver.destroy(instance_ref, network_info) image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) @@ -451,8 +466,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state.NOSTATE, 'rebooting') - self.network_manager.setup_compute_network(context, instance_id) - self.driver.reboot(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.reboot(instance_ref, network_info) self._update_state(context, instance_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -642,10 +657,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state.NOSTATE, 'rescuing') - self.network_manager.setup_compute_network(context, instance_id) _update_state = lambda result: self._update_state_callback( self, context, instance_id, result) - self.driver.rescue(instance_ref, _update_state) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.rescue(instance_ref, _update_state, network_info) self._update_state(context, instance_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -661,7 +676,8 @@ class ComputeManager(manager.SchedulerDependentManager): 'unrescuing') _update_state = lambda result: self._update_state_callback( self, context, instance_id, result) - self.driver.unrescue(instance_ref, _update_state) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.unrescue(instance_ref, _update_state, network_info) self._update_state(context, instance_id) @staticmethod @@ -673,9 +689,12 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): """Destroys the source instance.""" - context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) - self.driver.destroy(instance_ref) + migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get_by_uuid(context, + migration_ref.instance_uuid) + + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.destroy(instance_ref, network_info) usage_info = utils.usage_from_instance(instance_ref) notifier.notify('compute.%s' % self.host, 'compute.instance.resize.confirm', @@ -691,17 +710,17 @@ class ComputeManager(manager.SchedulerDependentManager): source machine. """ - instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get_by_uuid(context, + migration_ref.instance_uuid) - self.driver.destroy(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.destroy(instance_ref, network_info) topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) rpc.cast(context, topic, {'method': 'finish_revert_resize', - 'args': { - 'migration_id': migration_ref['id'], - 'instance_id': instance_id, }, + 'args': {'migration_id': migration_ref['id']}, }) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -713,17 +732,20 @@ class ComputeManager(manager.SchedulerDependentManager): in the database. """ - instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get_by_uuid(context, + migration_ref.instance_uuid) + instance_type = self.db.instance_type_get_by_flavor_id(context, migration_ref['old_flavor_id']) # Just roll back the record. There's no need to resize down since # the 'old' VM already has the preferred attributes - self.db.instance_update(context, instance_id, + self.db.instance_update(context, instance_ref['uuid'], dict(memory_mb=instance_type['memory_mb'], vcpus=instance_type['vcpus'], - local_gb=instance_type['local_gb'])) + local_gb=instance_type['local_gb'], + instance_type_id=instance_type['id'])) self.driver.revert_resize(instance_ref) self.db.migration_update(context, migration_id, @@ -743,35 +765,42 @@ class ComputeManager(manager.SchedulerDependentManager): """ context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + + # Because of checks_instance_lock, this must currently be called + # instance_id. However, the compute API is always passing the UUID + # of the instance down + instance_ref = self.db.instance_get_by_uuid(context, instance_id) + if instance_ref['host'] == FLAGS.host: raise exception.Error(_( 'Migration error: destination same as source!')) - instance_type = self.db.instance_type_get_by_flavor_id(context, + old_instance_type = self.db.instance_type_get(context, + instance_ref['instance_type_id']) + new_instance_type = self.db.instance_type_get_by_flavor_id(context, flavor_id) + migration_ref = self.db.migration_create(context, - {'instance_id': instance_id, + {'instance_uuid': instance_ref['uuid'], 'source_compute': instance_ref['host'], 'dest_compute': FLAGS.host, 'dest_host': self.driver.get_host_ip_addr(), - 'old_flavor_id': instance_type['flavorid'], + 'old_flavor_id': old_instance_type['flavorid'], 'new_flavor_id': flavor_id, 'status': 'pre-migrating'}) - LOG.audit(_('instance %s: migrating to '), instance_id, + LOG.audit(_('instance %s: migrating'), instance_ref['uuid'], context=context) topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) rpc.cast(context, topic, {'method': 'resize_instance', - 'args': { - 'migration_id': migration_ref['id'], - 'instance_id': instance_id, }, - }) + 'args': {'instance_id': instance_ref['uuid'], + 'migration_id': migration_ref['id']}}) + usage_info = utils.usage_from_instance(instance_ref, - new_instance_type=instance_type['name'], - new_instance_type_id=instance_type['id']) + new_instance_type=new_instance_type['name'], + new_instance_type_id=new_instance_type['id']) notifier.notify('compute.%s' % self.host, 'compute.instance.resize.prep', notifier.INFO, @@ -782,7 +811,9 @@ class ComputeManager(manager.SchedulerDependentManager): def resize_instance(self, context, instance_id, migration_id): """Starts the migration of a running instance to another host.""" migration_ref = self.db.migration_get(context, migration_id) - instance_ref = self.db.instance_get(context, instance_id) + instance_ref = self.db.instance_get_by_uuid(context, + migration_ref.instance_uuid) + self.db.migration_update(context, migration_id, {'status': 'migrating'}) @@ -798,10 +829,11 @@ class ComputeManager(manager.SchedulerDependentManager): topic = self.db.queue_get_for(context, FLAGS.compute_topic, migration_ref['dest_compute']) + params = {'migration_id': migration_id, + 'disk_info': disk_info, + 'instance_id': instance_ref['uuid']} rpc.cast(context, topic, {'method': 'finish_resize', - 'args': {'migration_id': migration_id, - 'instance_id': instance_id, - 'disk_info': disk_info}}) + 'args': params}) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock @@ -813,24 +845,20 @@ class ComputeManager(manager.SchedulerDependentManager): """ migration_ref = self.db.migration_get(context, migration_id) - instance_ref = self.db.instance_get(context, - migration_ref['instance_id']) - # TODO(mdietz): apply the rest of the instance_type attributes going - # after they're supported + instance_ref = self.db.instance_get_by_uuid(context, + migration_ref.instance_uuid) instance_type = self.db.instance_type_get_by_flavor_id(context, migration_ref['new_flavor_id']) - self.db.instance_update(context, instance_id, + self.db.instance_update(context, instance_ref.uuid, dict(instance_type_id=instance_type['id'], memory_mb=instance_type['memory_mb'], vcpus=instance_type['vcpus'], local_gb=instance_type['local_gb'])) - # reload the updated instance ref - # FIXME(mdietz): is there reload functionality? - instance = self.db.instance_get(context, instance_id) - network_info = self.network_api.get_instance_nw_info(context, - instance) - self.driver.finish_resize(instance, disk_info, network_info) + instance_ref = self.db.instance_get_by_uuid(context, + instance_ref.uuid) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.finish_resize(instance_ref, disk_info, network_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) @@ -962,7 +990,11 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() LOG.debug(_('instance %s: getting locked state'), instance_id, context=context) - instance_ref = self.db.instance_get(context, instance_id) + if utils.is_uuid_like(instance_id): + uuid = instance_id + instance_ref = self.db.instance_get_by_uuid(context, uuid) + else: + instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] @checks_instance_lock @@ -979,8 +1011,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_('instance %s: inject network info'), instance_id, context=context) instance = self.db.instance_get(context, instance_id) - network_info = self.network_api.get_instance_nw_info(context, - instance) + network_info = self._get_instance_nw_info(context, instance) LOG.debug(_("network_info to inject: |%s|"), network_info) self.driver.inject_network_info(instance, network_info) @@ -1198,17 +1229,17 @@ class ComputeManager(manager.SchedulerDependentManager): # # Retry operation is necessary because continuously request comes, # concorrent request occurs to iptables, then it complains. + network_info = self._get_instance_nw_info(context, instance_ref) max_retry = FLAGS.live_migration_retry_count for cnt in range(max_retry): try: - self.network_manager.setup_compute_network(context, - instance_id) + self.driver.plug_vifs(instance_ref, network_info) break except exception.ProcessExecutionError: if cnt == max_retry - 1: raise else: - LOG.warn(_("setup_compute_network() failed %(cnt)d." + LOG.warn(_("plug_vifs() failed %(cnt)d." "Retry up to %(max_retry)d for %(hostname)s.") % locals()) time.sleep(1) @@ -1286,8 +1317,9 @@ class ComputeManager(manager.SchedulerDependentManager): # Releasing vlan. # (not necessary in current implementation?) + network_info = self._get_instance_nw_info(ctxt, instance_ref) # Releasing security group ingress rule. - self.driver.unfilter_instance(instance_ref) + self.driver.unfilter_instance(instance_ref, network_info) # Database updating. i_name = instance_ref.name diff --git a/nova/db/api.py b/nova/db/api.py index cb4da169c..47308bdba 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -314,9 +314,9 @@ def migration_get(context, migration_id): return IMPL.migration_get(context, migration_id) -def migration_get_by_instance_and_status(context, instance_id, status): - """Finds a migration by the instance id its migrating.""" - return IMPL.migration_get_by_instance_and_status(context, instance_id, +def migration_get_by_instance_and_status(context, instance_uuid, status): + """Finds a migration by the instance uuid its migrating.""" + return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) @@ -332,13 +332,14 @@ def fixed_ip_associate(context, address, instance_id): return IMPL.fixed_ip_associate(context, address, instance_id) -def fixed_ip_associate_pool(context, network_id, instance_id): - """Find free ip in network and associate it to instance. +def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): + """Find free ip in network and associate it to instance or host. Raises if one is not available. """ - return IMPL.fixed_ip_associate_pool(context, network_id, instance_id) + return IMPL.fixed_ip_associate_pool(context, network_id, + instance_id, host) def fixed_ip_create(context, values): @@ -361,9 +362,9 @@ def fixed_ip_get_all(context): return IMPL.fixed_ip_get_all(context) -def fixed_ip_get_all_by_host(context, host): - """Get all defined fixed ips used by a host.""" - return IMPL.fixed_ip_get_all_by_host(context, host) +def fixed_ip_get_all_by_instance_host(context, host): + """Get all allocated fixed ips filtered by instance host.""" + return IMPL.fixed_ip_get_all_instance_by_host(context, host) def fixed_ip_get_by_address(context, address): @@ -376,6 +377,11 @@ def fixed_ip_get_by_instance(context, instance_id): return IMPL.fixed_ip_get_by_instance(context, instance_id) +def fixed_ip_get_by_network_host(context, network_id, host): + """Get fixed ip for a host in a network.""" + return IMPL.fixed_ip_get_by_network_host(context, network_id, host) + + def fixed_ip_get_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id) @@ -1305,9 +1311,9 @@ def instance_type_get_all(context, inactive=False): return IMPL.instance_type_get_all(context, inactive) -def instance_type_get_by_id(context, id): +def instance_type_get(context, id): """Get instance type by id.""" - return IMPL.instance_type_get_by_id(context, id) + return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 189be0714..d7810098a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -18,7 +18,6 @@ """ Implementation of SQLAlchemy backend. """ -import traceback import warnings from nova import db @@ -33,7 +32,6 @@ from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql import exists from sqlalchemy.sql import func from sqlalchemy.sql.expression import literal_column @@ -672,7 +670,7 @@ def fixed_ip_associate(context, address, instance_id): @require_admin_context -def fixed_ip_associate_pool(context, network_id, instance_id): +def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, @@ -682,6 +680,7 @@ def fixed_ip_associate_pool(context, network_id, instance_id): filter_by(reserved=False).\ filter_by(deleted=False).\ filter_by(instance=None).\ + filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, @@ -692,9 +691,12 @@ def fixed_ip_associate_pool(context, network_id, instance_id): fixed_ip_ref.network = network_get(context, network_id, session=session) - fixed_ip_ref.instance = instance_get(context, - instance_id, - session=session) + if instance_id: + fixed_ip_ref.instance = instance_get(context, + instance_id, + session=session) + if host: + fixed_ip_ref.host = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @@ -750,7 +752,7 @@ def fixed_ip_get_all(context, session=None): @require_admin_context -def fixed_ip_get_all_by_host(context, host=None): +def fixed_ip_get_all_by_instance_host(context, host=None): session = get_session() result = session.query(models.FixedIp).\ @@ -800,6 +802,20 @@ def fixed_ip_get_by_instance(context, instance_id): @require_context +def fixed_ip_get_by_network_host(context, network_id, host): + session = get_session() + rv = session.query(models.FixedIp).\ + filter_by(network_id=network_id).\ + filter_by(host=host).\ + filter_by(deleted=False).\ + first() + if not rv: + raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, + host=host) + return rv + + +@require_context def fixed_ip_get_by_virtual_interface(context, vif_id): session = get_session() rv = session.query(models.FixedIp).\ @@ -1157,9 +1173,9 @@ def instance_get_active_by_window(context, begin, end=None): """Return instances that were continuously active over the given window""" session = get_session() query = session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ options(joinedload('instance_type')).\ filter(models.Instance.launched_at < begin) if end: @@ -1253,7 +1269,7 @@ def instance_get_project_vpn(context, project_id): options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ @@ -1333,7 +1349,11 @@ def instance_update(context, instance_id, values): instance_metadata_update_or_create(context, instance_id, values.pop('metadata')) with session.begin(): - instance_ref = instance_get(context, instance_id, session=session) + if utils.is_uuid_like(instance_id): + instance_ref = instance_get_by_uuid(context, instance_id, + session=session) + else: + instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) instance_ref.save(session=session) return instance_ref @@ -1480,8 +1500,6 @@ def network_associate(context, project_id, force=False): called by project_get_networks under certain conditions and network manager add_network_to_project() - only associates projects with networks that have configured hosts - only associate if the project doesn't already have a network or if force is True @@ -1497,7 +1515,6 @@ def network_associate(context, project_id, force=False): def network_query(project_filter): return session.query(models.Network).\ filter_by(deleted=False).\ - filter(models.Network.host != None).\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() @@ -1704,9 +1721,16 @@ def network_get_all_by_instance(_context, instance_id): def network_get_all_by_host(context, host): session = get_session() with session.begin(): + # NOTE(vish): return networks that have host set + # or that have a fixed ip with host set + host_filter = or_(models.Network.host == host, + models.FixedIp.host == host) + return session.query(models.Network).\ filter_by(deleted=False).\ - filter_by(host=host).\ + join(models.Network.fixed_ips).\ + filter(host_filter).\ + filter_by(deleted=False).\ all() @@ -1738,6 +1762,7 @@ def network_update(context, network_id, values): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) + return network_ref ################### @@ -2798,13 +2823,13 @@ def migration_get(context, id, session=None): @require_admin_context -def migration_get_by_instance_and_status(context, instance_id, status): +def migration_get_by_instance_and_status(context, instance_uuid, status): session = get_session() result = session.query(models.Migration).\ - filter_by(instance_id=instance_id).\ + filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).first() if not result: - raise exception.MigrationNotFoundByStatus(instance_id=instance_id, + raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @@ -2985,7 +3010,7 @@ def instance_type_get_all(context, inactive=False): @require_context -def instance_type_get_by_id(context, id): +def instance_type_get(context, id): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py b/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py new file mode 100644 index 000000000..3a5f7eba8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/033_ha_network.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Boolean, String + +meta = MetaData() + +fixed_ips_host = Column('host', String(255)) + +networks_multi_host = Column('multi_host', Boolean, default=False) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + fixed_ips.create_column(fixed_ips_host) + + networks = Table('networks', meta, autoload=True) + networks.create_column(networks_multi_host) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + fixed_ips.drop_column(fixed_ips_host) + + networks = Table('networks', meta, autoload=True) + networks.drop_column(networks_multi_host) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py new file mode 100644 index 000000000..b002ba064 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import Column, Integer, String, MetaData, Table + +meta = MetaData() + + +# +# Tables to alter +# +# + +instance_id = Column('instance_id', Integer()) +instance_uuid = Column('instance_uuid', String(255)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + migrations = Table('migrations', meta, autoload=True) + migrations.create_column(instance_uuid) + migrations.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + migrations = Table('migrations', meta, autoload=True) + migrations.c.instance_uuid.drop() + migrations.create_column(instance_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py b/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py new file mode 100644 index 000000000..c938eb716 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Boolean, String + +meta = MetaData() + +dns2 = Column('dns2', String(255)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + networks.c.dns.alter(Column('dns1', String(255))) + networks.create_column(dns2) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + networks.c.dns1.alter(Column('dns', String(255))) + networks.drop_column(dns2) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 1bcc8eaec..7e35c2cba 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -31,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception from nova import flags +from nova import ipv6 from nova import utils @@ -528,7 +529,8 @@ class Migration(BASE, NovaBase): dest_host = Column(String(255)) old_flavor_id = Column(Integer()) new_flavor_id = Column(Integer()) - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance_uuid = Column(String(255), ForeignKey('instances.uuid'), + nullable=True) #TODO(_cerberus_): enum status = Column(String(255)) @@ -545,6 +547,7 @@ class Network(BASE, NovaBase): injected = Column(Boolean, default=False) cidr = Column(String(255), unique=True) cidr_v6 = Column(String(255), unique=True) + multi_host = Column(Boolean, default=False) gateway_v6 = Column(String(255)) netmask_v6 = Column(String(255)) @@ -553,7 +556,8 @@ class Network(BASE, NovaBase): bridge_interface = Column(String(255)) gateway = Column(String(255)) broadcast = Column(String(255)) - dns = Column(String(255)) + dns1 = Column(String(255)) + dns2 = Column(String(255)) vlan = Column(Integer) vpn_public_address = Column(String(255)) @@ -577,6 +581,18 @@ class VirtualInterface(BASE, NovaBase): instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) instance = relationship(Instance, backref=backref('virtual_interfaces')) + @property + def fixed_ipv6(self): + cidr_v6 = self.network.cidr_v6 + if cidr_v6 is None: + ipv6_address = None + else: + project_id = self.instance.project_id + mac = self.address + ipv6_address = ipv6.to_global(cidr_v6, mac, project_id) + + return ipv6_address + # TODO(vish): can these both come from the same baseclass? class FixedIp(BASE, NovaBase): @@ -603,6 +619,7 @@ class FixedIp(BASE, NovaBase): # leased means dhcp bridge has leased the ip leased = Column(Boolean, default=False) reserved = Column(Boolean, default=False) + host = Column(String(255)) class FloatingIp(BASE, NovaBase): diff --git a/nova/exception.py b/nova/exception.py index ad6c005f8..38e705417 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -78,8 +78,8 @@ def wrap_db_error(f): except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) - return _wrap _wrap.func_name = f.func_name + return _wrap def wrap_exception(notifier=None, publisher_id=None, event_type=None, @@ -408,6 +408,11 @@ class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_id)s has zero fixed ips.") +class FixedIpNotFoundForNetworkHost(FixedIpNotFound): + message = _("Network host %(host)s has zero fixed ips " + "in network %(network_id)s.") + + class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.") diff --git a/nova/image/glance.py b/nova/image/glance.py index 55d948a32..5c2dc957b 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -89,6 +89,10 @@ class GlanceImageService(service.BaseImageService): # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] + filters = filters or {} + if 'is_public' not in filters: + # NOTE(vish): don't filter out private images + filters['is_public'] = 'none' image_metas = self.client.get_images_detailed(filters=filters, marker=marker, limit=limit) @@ -101,6 +105,10 @@ class GlanceImageService(service.BaseImageService): def detail(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] + filters = filters or {} + if 'is_public' not in filters: + # NOTE(vish): don't filter out private images + filters['is_public'] = 'none' image_metas = self.client.get_images_detailed(filters=filters, marker=marker, limit=limit) diff --git a/nova/image/s3.py b/nova/image/s3.py index 4a3df98ba..c313c7a13 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -168,7 +168,7 @@ class S3ImageService(service.BaseImageService): metadata.update({'disk_format': image_format, 'container_format': image_format, 'status': 'queued', - 'is_public': True, + 'is_public': False, 'properties': properties}) metadata['properties']['image_state'] = 'pending' image = self.service.create(context, metadata) diff --git a/nova/network/api.py b/nova/network/api.py index 70b1099f0..33a9fe239 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -18,7 +18,6 @@ """Handles all requests relating to instances (guest vms).""" -from nova import db from nova import exception from nova import flags from nova import log as logging @@ -46,6 +45,10 @@ class API(base.Base): context.project_id) return ips + def get_vifs_by_instance(self, context, instance_id): + vifs = self.db.virtual_interface_get_by_instance(context, instance_id) + return vifs + def allocate_floating_ip(self, context): """Adds a floating ip to a project.""" # NOTE(vish): We don't know which network host should get the ip @@ -61,6 +64,9 @@ class API(base.Base): affect_auto_assigned=False): """Removes floating ip with address from a project.""" floating_ip = self.db.floating_ip_get_by_address(context, address) + if floating_ip['fixed_ip']: + raise exception.ApiError(_('Floating ip is in use. ' + 'Disassociate it before releasing.')) if not affect_auto_assigned and floating_ip.get('auto_assigned'): return # NOTE(vish): We don't know which network host should get the ip @@ -105,7 +111,11 @@ class API(base.Base): '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) - host = fixed_ip['network']['host'] + # NOTE(vish): if we are multi_host, send to the instances host + if fixed_ip['network']['multi_host']: + host = fixed_ip['instance']['host'] + else: + host = fixed_ip['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), {'method': 'associate_floating_ip', @@ -120,7 +130,11 @@ class API(base.Base): return if not floating_ip.get('fixed_ip'): raise exception.ApiError('Address is not associated.') - host = floating_ip['fixed_ip']['network']['host'] + # NOTE(vish): if we are multi_host, send to the instances host + if floating_ip['fixed_ip']['network']['multi_host']: + host = floating_ip['fixed_ip']['instance']['host'] + else: + host = floating_ip['fixed_ip']['network']['host'] rpc.call(context, self.db.queue_get_for(context, FLAGS.network_topic, host), {'method': 'disassociate_floating_ip', @@ -134,7 +148,9 @@ class API(base.Base): args = kwargs args['instance_id'] = instance['id'] args['project_id'] = instance['project_id'] + args['host'] = instance['host'] args['instance_type_id'] = instance['instance_type_id'] + return rpc.call(context, FLAGS.network_topic, {'method': 'allocate_for_instance', 'args': args}) @@ -173,7 +189,8 @@ class API(base.Base): def get_instance_nw_info(self, context, instance): """Returns all network info related to an instance.""" args = {'instance_id': instance['id'], - 'instance_type_id': instance['instance_type_id']} + 'instance_type_id': instance['instance_type_id'], + 'host': instance['host']} return rpc.call(context, FLAGS.network_topic, {'method': 'get_instance_nw_info', 'args': args}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 283a5aca1..8ace07884 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -455,6 +455,7 @@ def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): """Create a vlan and bridge unless they already exist.""" interface = ensure_vlan(vlan_num, bridge_interface) ensure_bridge(bridge, interface, net_attrs) + return interface @utils.synchronized('ensure_vlan', external=True) @@ -497,7 +498,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): suffix = net_attrs['cidr'].rpartition('/')[2] out, err = _execute('sudo', 'ip', 'addr', 'add', '%s/%s' % - (net_attrs['gateway'], suffix), + (net_attrs['dhcp_server'], suffix), 'brd', net_attrs['broadcast'], 'dev', @@ -551,21 +552,27 @@ def ensure_bridge(bridge, interface, net_attrs=None): bridge) -def get_dhcp_leases(context, network_id): +def get_dhcp_leases(context, network_ref): """Return a network's hosts config in dnsmasq leasefile format.""" hosts = [] - for fixed_ip_ref in db.network_get_associated_fixed_ips(context, - network_id): - hosts.append(_host_lease(fixed_ip_ref)) + for fixed_ref in db.network_get_associated_fixed_ips(context, + network_ref['id']): + host = fixed_ref['instance']['host'] + if network_ref['multi_host'] and FLAGS.host != host: + continue + hosts.append(_host_lease(fixed_ref)) return '\n'.join(hosts) -def get_dhcp_hosts(context, network_id): +def get_dhcp_hosts(context, network_ref): """Get network's hosts config in dhcp-host format.""" hosts = [] - for fixed_ip_ref in db.network_get_associated_fixed_ips(context, - network_id): - hosts.append(_host_dhcp(fixed_ip_ref)) + for fixed_ref in db.network_get_associated_fixed_ips(context, + network_ref['id']): + host = fixed_ref['instance']['host'] + if network_ref['multi_host'] and FLAGS.host != host: + continue + hosts.append(_host_dhcp(fixed_ref)) return '\n'.join(hosts) @@ -573,18 +580,16 @@ def get_dhcp_hosts(context, network_id): # configuration options (like dchp-range, vlan, ...) # aren't reloaded. @utils.synchronized('dnsmasq_start') -def update_dhcp(context, network_id): +def update_dhcp(context, network_ref): """(Re)starts a dnsmasq server for a given network. If a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance. """ - network_ref = db.network_get(context, network_id) - conffile = _dhcp_file(network_ref['bridge'], 'conf') with open(conffile, 'w') as f: - f.write(get_dhcp_hosts(context, network_id)) + f.write(get_dhcp_hosts(context, network_ref)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) @@ -612,9 +617,7 @@ def update_dhcp(context, network_id): @utils.synchronized('radvd_start') -def update_ra(context, network_id): - network_ref = db.network_get(context, network_id) - +def update_ra(context, network_ref): conffile = _ra_file(network_ref['bridge'], 'conf') with open(conffile, 'w') as f: conf_str = """ @@ -650,9 +653,6 @@ interface %s LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) command = _ra_cmd(network_ref) _execute(*command) - db.network_update(context, network_id, - {'gateway_v6': - utils.get_my_linklocal(network_ref['bridge'])}) def _host_lease(fixed_ip_ref): @@ -701,10 +701,11 @@ def _dnsmasq_cmd(net): cmd = ['sudo', '-E', 'dnsmasq', '--strict-order', '--bind-interfaces', + '--interface=%s' % net['bridge'], '--conf-file=%s' % FLAGS.dnsmasq_config_file, '--domain=%s' % FLAGS.dhcp_domain, '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), - '--listen-address=%s' % net['gateway'], + '--listen-address=%s' % net['dhcp_server'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])), diff --git a/nova/network/manager.py b/nova/network/manager.py index 24736f53d..6f7573f66 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -28,7 +28,6 @@ topologies. All of the network commands are issued to a subclass of :flat_network_bridge: Bridge device for simple network instances :flat_interface: FlatDhcp will bridge into this interface if set :flat_network_dns: Dns for simple network -:flat_network_dhcp_start: Dhcp start for FlatDhcp :vlan_start: First VLAN for private networks :vpn_ip: Public IP for the cloudpipe VPN servers :vpn_start: First Vpn port for private networks @@ -49,7 +48,6 @@ import datetime import math import netaddr import socket -import pickle from eventlet import greenpool from nova import context @@ -78,8 +76,6 @@ flags.DEFINE_bool('flat_injected', True, 'Whether to attempt to inject network setup into guest') flags.DEFINE_string('flat_interface', None, 'FlatDhcp will bridge into this interface if set') -flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', - 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_string('vlan_interface', None, 'vlans will bridge into this interface if set') @@ -87,6 +83,8 @@ flags.DEFINE_integer('num_networks', 1, 'Number of networks to support') flags.DEFINE_string('vpn_ip', '$my_ip', 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') +flags.DEFINE_bool('multi_host', False, + 'Default value for multi_host in networks') flags.DEFINE_integer('network_size', 256, 'Number of addresses in each private subnet') flags.DEFINE_string('floating_range', '4.4.4.0/24', @@ -104,7 +102,8 @@ flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') flags.DEFINE_integer('create_unique_mac_address_attempts', 5, 'Number of attempts to create unique mac address') - +flags.DEFINE_bool('auto_assign_floating_ip', False, + 'Autoassigning floating ip to VM') flags.DEFINE_bool('use_ipv6', False, 'use the ipv6') flags.DEFINE_string('network_host', socket.gethostname(), @@ -124,16 +123,26 @@ class RPCAllocateFixedIP(object): used since they share code to RPC.call allocate_fixed_ip on the correct network host to configure dnsmasq """ - def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs): + def _allocate_fixed_ips(self, context, instance_id, host, networks, + **kwargs): """Calls allocate_fixed_ip once for each network.""" green_pool = greenpool.GreenPool() vpn = kwargs.pop('vpn') for network in networks: - if network['host'] != self.host: + # NOTE(vish): if we are not multi_host pass to the network host + if not network['multi_host']: + host = network['host'] + # NOTE(vish): if there is no network host, set one + if host == None: + host = rpc.call(context, FLAGS.network_topic, + {'method': 'set_network_host', + 'args': {'network_ref': network}}) + if host != self.host: # need to call allocate_fixed_ip to correct network host - topic = self.db.queue_get_for(context, FLAGS.network_topic, - network['host']) + topic = self.db.queue_get_for(context, + FLAGS.network_topic, + host) args = {} args['instance_id'] = instance_id args['network_id'] = network['id'] @@ -149,12 +158,13 @@ class RPCAllocateFixedIP(object): # wait for all of the allocates (if any) to finish green_pool.waitall() - def _rpc_allocate_fixed_ip(self, context, instance_id, network_id): + def _rpc_allocate_fixed_ip(self, context, instance_id, network_id, + **kwargs): """Sits in between _allocate_fixed_ips and allocate_fixed_ip to perform network lookup on the far side of rpc. """ network = self.db.network_get(context, network_id) - self.allocate_fixed_ip(context, instance_id, network) + self.allocate_fixed_ip(context, instance_id, network, **kwargs) class FloatingIP(object): @@ -193,7 +203,7 @@ class FloatingIP(object): # which is currently the NetworkManager version # do this first so fixed ip is already allocated ips = super(FloatingIP, self).allocate_for_instance(context, **kwargs) - if hasattr(FLAGS, 'auto_assign_floating_ip'): + if FLAGS.auto_assign_floating_ip: # allocate a floating ip (public_ip is just the address string) public_ip = self.allocate_floating_ip(context, project_id) # set auto_assigned column to true for the floating ip @@ -248,7 +258,7 @@ class FloatingIP(object): # NOTE(tr3buchet): all networks hosts in zone now use the same pool LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1)) if quota.allowed_floating_ips(context, 1) < 1: - LOG.warn(_('Quota exceeeded for %s, tried to allocate ' + LOG.warn(_('Quota exceeded for %s, tried to allocate ' 'address'), context.project_id) raise quota.QuotaError(_('Address quota exceeded. You cannot ' @@ -290,6 +300,12 @@ class NetworkManager(manager.SchedulerDependentManager): The one at a time part is to flatten the layout to help scale """ + # If True, this manager requires VIF to create a bridge. + SHOULD_CREATE_BRIDGE = False + + # If True, this manager requires VIF to create VLAN tag. + SHOULD_CREATE_VLAN = False + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): @@ -300,15 +316,36 @@ class NetworkManager(manager.SchedulerDependentManager): super(NetworkManager, self).__init__(service_name='network', *args, **kwargs) + @utils.synchronized('get_dhcp') + def _get_dhcp_ip(self, context, network_ref, host=None): + """Get the proper dhcp address to listen on.""" + # NOTE(vish): this is for compatibility + if not network_ref['multi_host']: + return network_ref['gateway'] + + if not host: + host = self.host + network_id = network_ref['id'] + try: + fip = self.db.fixed_ip_get_by_network_host(context, + network_id, + host) + return fip['address'] + except exception.FixedIpNotFoundForNetworkHost: + elevated = context.elevated() + return self.db.fixed_ip_associate_pool(elevated, + network_id, + host=host) + def init_host(self): """Do any initialization that needs to be run if this is a standalone service. """ - # Set up this host for networks in which it's already - # the designated network host. + # NOTE(vish): Set up networks for which this host already has + # an ip address. ctxt = context.get_admin_context() for network in self.db.network_get_all_by_host(ctxt, self.host): - self._on_set_network_host(ctxt, network['id']) + self._setup_network(ctxt, network) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" @@ -323,33 +360,14 @@ class NetworkManager(manager.SchedulerDependentManager): if num: LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num) - # setup any new networks which have been created - self.set_network_hosts(context) - - def set_network_host(self, context, network_id): + def set_network_host(self, context, network_ref): """Safely sets the host of the network.""" LOG.debug(_('setting network host'), context=context) host = self.db.network_set_host(context, - network_id, + network_ref['id'], self.host) - if host == self.host: - self._on_set_network_host(context, network_id) return host - def set_network_hosts(self, context): - """Set the network hosts for any networks which are unset.""" - try: - networks = self.db.network_get_all(context) - except exception.NoNetworksFound: - # we don't care if no networks are found - pass - - for network in networks: - host = network['host'] - if not host: - # return so worker will only grab 1 (to help scale flatter) - return self.set_network_host(context, network['id']) - def _get_networks_for_instance(self, context, instance_id, project_id): """Determine & return which networks an instance should connect to.""" # TODO(tr3buchet) maybe this needs to be updated in the future if @@ -358,12 +376,11 @@ class NetworkManager(manager.SchedulerDependentManager): try: networks = self.db.network_get_all(context) except exception.NoNetworksFound: - # we don't care if no networks are found - pass + return [] - # return only networks which are not vlan networks and have host set + # return only networks which are not vlan networks return [network for network in networks if - not network['vlan'] and network['host']] + not network['vlan']] def allocate_for_instance(self, context, **kwargs): """Handles allocating the various network resources for an instance. @@ -371,6 +388,7 @@ class NetworkManager(manager.SchedulerDependentManager): rpc.called by network_api """ instance_id = kwargs.pop('instance_id') + host = kwargs.pop('host') project_id = kwargs.pop('project_id') type_id = kwargs.pop('instance_type_id') vpn = kwargs.pop('vpn') @@ -379,9 +397,11 @@ class NetworkManager(manager.SchedulerDependentManager): context=context) networks = self._get_networks_for_instance(admin_context, instance_id, project_id) + LOG.warn(networks) self._allocate_mac_addresses(context, instance_id, networks) - self._allocate_fixed_ips(admin_context, instance_id, networks, vpn=vpn) - return self.get_instance_nw_info(context, instance_id, type_id) + self._allocate_fixed_ips(admin_context, instance_id, host, networks, + vpn=vpn) + return self.get_instance_nw_info(context, instance_id, type_id, host) def deallocate_for_instance(self, context, **kwargs): """Handles deallocating various network resources for an instance. @@ -401,7 +421,8 @@ class NetworkManager(manager.SchedulerDependentManager): # deallocate vifs (mac addresses) self.db.virtual_interface_delete_by_instance(context, instance_id) - def get_instance_nw_info(self, context, instance_id, instance_type_id): + def get_instance_nw_info(self, context, instance_id, + instance_type_id, host): """Creates network info list for instance. called by allocate_for_instance and netowrk_api @@ -411,10 +432,14 @@ class NetworkManager(manager.SchedulerDependentManager): and info = dict containing pertinent networking data """ # TODO(tr3buchet) should handle floating IPs as well? - fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id) + try: + fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id) + except exception.FixedIpNotFoundForInstance: + LOG.warn(_('No fixed IPs for instance %s'), instance_id) + fixed_ips = [] + vifs = self.db.virtual_interface_get_by_instance(context, instance_id) - flavor = self.db.instance_type_get_by_id(context, - instance_type_id) + flavor = self.db.instance_type_get(context, instance_type_id) network_info = [] # a vif has an address, instance_id, and network_id # it is also joined to the instance and network given by those IDs @@ -444,20 +469,38 @@ class NetworkManager(manager.SchedulerDependentManager): 'id': network['id'], 'cidr': network['cidr'], 'cidr_v6': network['cidr_v6'], - 'injected': network['injected']} + 'injected': network['injected'], + 'vlan': network['vlan'], + 'bridge_interface': network['bridge_interface'], + 'multi_host': network['multi_host']} + if network['multi_host']: + dhcp_server = self._get_dhcp_ip(context, network, host) + else: + dhcp_server = self._get_dhcp_ip(context, + network, + network['host']) info = { 'label': network['label'], 'gateway': network['gateway'], + 'dhcp_server': dhcp_server, 'broadcast': network['broadcast'], 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_IPs]} + 'dns': [], + 'ips': [ip_dict(ip) for ip in network_IPs], + 'should_create_bridge': self.SHOULD_CREATE_BRIDGE, + 'should_create_vlan': self.SHOULD_CREATE_VLAN} + if network['cidr_v6']: info['ip6s'] = [ip6_dict()] # TODO(tr3buchet): handle ip6 routes here as well if network['gateway_v6']: info['gateway6'] = network['gateway_v6'] + if network['dns1']: + info['dns'].append(network['dns1']) + if network['dns2']: + info['dns'].append(network['dns2']) + network_info.append((network_dict, info)) return network_info @@ -487,10 +530,10 @@ class NetworkManager(manager.SchedulerDependentManager): random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) - def add_fixed_ip_to_instance(self, context, instance_id, network_id): + def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to an instance from specified network.""" networks = [self.db.network_get(context, network_id)] - self._allocate_fixed_ips(context, instance_id, networks) + self._allocate_fixed_ips(context, instance_id, host, networks) def remove_fixed_ip_from_instance(self, context, instance_id, address): """Removes a fixed ip from an instance from specified network.""" @@ -517,6 +560,7 @@ class NetworkManager(manager.SchedulerDependentManager): values = {'allocated': True, 'virtual_interface_id': vif['id']} self.db.fixed_ip_update(context, address, values) + self._setup_network(context, network) return address def deallocate_fixed_ip(self, context, address, **kwargs): @@ -562,12 +606,12 @@ class NetworkManager(manager.SchedulerDependentManager): # means there will stale entries in the conf file # the code below will update the file if necessary if FLAGS.update_dhcp_on_disassociate: - network = self.db.fixed_ip_get_network(context, address) - self.driver.update_dhcp(context, network['id']) + network_ref = self.db.fixed_ip_get_network(context, address) + self._setup_network(context, network_ref) - def create_networks(self, context, label, cidr, num_networks, + def create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway_v6, bridge, - bridge_interface, **kwargs): + bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" fixed_net = netaddr.IPNetwork(cidr) fixed_net_v6 = netaddr.IPNetwork(cidr_v6) @@ -582,8 +626,10 @@ class NetworkManager(manager.SchedulerDependentManager): net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface - net['dns'] = FLAGS.flat_network_dns + net['dns1'] = dns1 + net['dns2'] = dns2 net['cidr'] = cidr + net['multi_host'] = multi_host net['netmask'] = str(project_net.netmask) net['gateway'] = str(project_net[1]) net['broadcast'] = str(project_net.broadcast) @@ -610,7 +656,8 @@ class NetworkManager(manager.SchedulerDependentManager): if kwargs.get('vpn', False): # this bit here is for vlan-manager - del net['dns'] + del net['dns1'] + del net['dns2'] vlan = kwargs['vlan_start'] + index net['vpn_private_address'] = str(project_net[2]) net['dhcp_start'] = str(project_net[3]) @@ -659,20 +706,13 @@ class NetworkManager(manager.SchedulerDependentManager): 'address': address, 'reserved': reserved}) - def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs): + def _allocate_fixed_ips(self, context, instance_id, host, networks, + **kwargs): """Calls allocate_fixed_ip once for each network.""" raise NotImplementedError() - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a network.""" - raise NotImplementedError() - - def setup_compute_network(self, context, instance_id): - """Sets up matching network for compute hosts. - - this code is run on and by the compute host, not on network - hosts - """ + def _setup_network(self, context, network_ref): + """Sets up network on this host.""" raise NotImplementedError() @@ -706,7 +746,8 @@ class FlatManager(NetworkManager): timeout_fixed_ips = False - def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs): + def _allocate_fixed_ips(self, context, instance_id, host, networks, + **kwargs): """Calls allocate_fixed_ip once for each network.""" for network in networks: self.allocate_fixed_ip(context, instance_id, network) @@ -717,19 +758,11 @@ class FlatManager(NetworkManager): **kwargs) self.db.fixed_ip_disassociate(context, address) - def setup_compute_network(self, context, instance_id): - """Network is created manually. - - this code is run on and by the compute host, not on network hosts - """ - pass - - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a network.""" + def _setup_network(self, context, network_ref): + """Setup Network on this host.""" net = {} net['injected'] = FLAGS.flat_injected - net['dns'] = FLAGS.flat_network_dns - self.db.network_update(context, network_id, net) + self.db.network_update(context, network_ref['id'], net) class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): @@ -741,6 +774,8 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): """ + SHOULD_CREATE_BRIDGE = True + def init_host(self): """Do any initialization that needs to be run if this is a standalone service. @@ -753,37 +788,19 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): self.driver.metadata_forward() - def setup_compute_network(self, context, instance_id): - """Sets up matching networks for compute hosts. - - this code is run on and by the compute host, not on network hosts - """ - networks = db.network_get_all_by_instance(context, instance_id) - for network in networks: - self.driver.ensure_bridge(network['bridge'], - network['bridge_interface']) - - def allocate_fixed_ip(self, context, instance_id, network, **kwargs): - """Allocate flat_network fixed_ip, then setup dhcp for this network.""" - address = super(FlatDHCPManager, self).allocate_fixed_ip(context, - instance_id, - network) - if not FLAGS.fake_network: - self.driver.update_dhcp(context, network['id']) - - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a project.""" - net = {} - net['dhcp_start'] = FLAGS.flat_network_dhcp_start - self.db.network_update(context, network_id, net) - network = db.network_get(context, network_id) - self.driver.ensure_bridge(network['bridge'], - network['bridge_interface'], - network) + def _setup_network(self, context, network_ref): + """Sets up network on this host.""" + network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) + self.driver.ensure_bridge(network_ref['bridge'], + network_ref['bridge_interface'], + network_ref) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_id) + self.driver.update_dhcp(context, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_id) + self.driver.update_ra(context, network_ref) + gateway = utils.get_my_linklocal(network_ref['bridge']) + self.db.network_update(context, network_ref['id'], + {'gateway_v6': gateway}) class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): @@ -801,6 +818,9 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): """ + SHOULD_CREATE_BRIDGE = True + SHOULD_CREATE_VLAN = True + def init_host(self): """Do any initialization that needs to be run if this is a standalone service. @@ -832,30 +852,17 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): values = {'allocated': True, 'virtual_interface_id': vif['id']} self.db.fixed_ip_update(context, address, values) - if not FLAGS.fake_network: - self.driver.update_dhcp(context, network['id']) + self._setup_network(context, network) + return address def add_network_to_project(self, context, project_id): """Force adds another network to a project.""" self.db.network_associate(context, project_id, force=True) - def setup_compute_network(self, context, instance_id): - """Sets up matching network for compute hosts. - this code is run on and by the compute host, not on network hosts - """ - networks = self.db.network_get_all_by_instance(context, instance_id) - for network in networks: - self.driver.ensure_vlan_bridge(network['vlan'], - network['bridge'], - network['bridge_interface']) - def _get_networks_for_instance(self, context, instance_id, project_id): """Determine which networks an instance should connect to.""" # get networks associated with project - networks = self.db.project_get_networks(context, project_id) - - # return only networks which have host set - return [network for network in networks if network['host']] + return self.db.project_get_networks(context, project_id) def create_networks(self, context, **kwargs): """Create networks based on parameters.""" @@ -874,32 +881,35 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): NetworkManager.create_networks(self, context, vpn=True, **kwargs) - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a network.""" - network = self.db.network_get(context, network_id) - if not network['vpn_public_address']: + def _setup_network(self, context, network_ref): + """Sets up network on this host.""" + if not network_ref['vpn_public_address']: net = {} address = FLAGS.vpn_ip net['vpn_public_address'] = address - db.network_update(context, network_id, net) + network_ref = db.network_update(context, network_ref['id'], net) else: - address = network['vpn_public_address'] - self.driver.ensure_vlan_bridge(network['vlan'], - network['bridge'], - network['bridge_interface'], - network) + address = network_ref['vpn_public_address'] + network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) + self.driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge'], + network_ref['bridge_interface'], + network_ref) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == FLAGS.vpn_ip and hasattr(self.driver, "ensure_vlan_forward"): self.driver.ensure_vlan_forward(FLAGS.vpn_ip, - network['vpn_public_port'], - network['vpn_private_address']) + network_ref['vpn_public_port'], + network_ref['vpn_private_address']) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_id) + self.driver.update_dhcp(context, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_id) + self.driver.update_ra(context, network_ref) + gateway = utils.get_my_linklocal(network_ref['bridge']) + self.db.network_update(context, network_ref['id'], + {'gateway_v6': gateway}) @property def _bottom_reserved_ips(self): diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py deleted file mode 100644 index b32cf3303..000000000 --- a/nova/network/vmwareapi_net.py +++ /dev/null @@ -1,82 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implements vlans for vmwareapi.""" - -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils -from nova.virt.vmwareapi_conn import VMWareAPISession -from nova.virt.vmwareapi import network_utils - - -LOG = logging.getLogger("nova.network.vmwareapi_net") - - -FLAGS = flags.FLAGS -FLAGS['vlan_interface'].SetDefault('vmnic0') - - -def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): - """Create a vlan and bridge unless they already exist.""" - # Open vmwareapi session - host_ip = FLAGS.vmwareapi_host_ip - host_username = FLAGS.vmwareapi_host_username - host_password = FLAGS.vmwareapi_host_password - if not host_ip or host_username is None or host_password is None: - raise Exception(_('Must specify vmwareapi_host_ip, ' - 'vmwareapi_host_username ' - 'and vmwareapi_host_password to use ' - 'connection_type=vmwareapi')) - session = VMWareAPISession(host_ip, host_username, host_password, - FLAGS.vmwareapi_api_retry_count) - vlan_interface = bridge_interface - # Check if the vlan_interface physical network adapter exists on the host - if not network_utils.check_if_vlan_interface_exists(session, - vlan_interface): - raise exception.NetworkAdapterNotFound(adapter=vlan_interface) - - # Get the vSwitch associated with the Physical Adapter - vswitch_associated = network_utils.get_vswitch_for_vlan_interface( - session, vlan_interface) - if vswitch_associated is None: - raise exception.SwicthNotFoundForNetworkAdapter(adapter=vlan_interface) - # Check whether bridge already exists and retrieve the the ref of the - # network whose name_label is "bridge" - network_ref = network_utils.get_network_with_the_name(session, bridge) - if network_ref is None: - # Create a port group on the vSwitch associated with the vlan_interface - # corresponding physical network adapter on the ESX host - network_utils.create_port_group(session, bridge, vswitch_associated, - vlan_num) - else: - # Get the vlan id and vswitch corresponding to the port group - pg_vlanid, pg_vswitch = \ - network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge) - - # Check if the vswitch associated is proper - if pg_vswitch != vswitch_associated: - raise exception.InvalidVLANPortGroup(bridge=bridge, - expected=vswitch_associated, - actual=pg_vswitch) - - # Check if the vlan id is proper for the port group - if pg_vlanid != vlan_num: - raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, - pgroup=pg_vlanid) diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py deleted file mode 100644 index e86f4017d..000000000 --- a/nova/network/xenapi_net.py +++ /dev/null @@ -1,87 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implements vlans, bridges, and iptables rules using linux utilities.""" - -import os - -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils -from nova.virt import xenapi_conn -from nova.virt.xenapi import network_utils - - -LOG = logging.getLogger("nova.xenapi_net") - - -FLAGS = flags.FLAGS - - -def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): - """Create a vlan and bridge unless they already exist.""" - # Open xenapi session - LOG.debug('ENTERING ensure_vlan_bridge in xenapi net') - url = FLAGS.xenapi_connection_url - username = FLAGS.xenapi_connection_username - password = FLAGS.xenapi_connection_password - session = xenapi_conn.XenAPISession(url, username, password) - # Check whether bridge already exists - # Retrieve network whose name_label is "bridge" - network_ref = network_utils.NetworkHelper.find_network_with_name_label( - session, - bridge) - if network_ref is None: - # If bridge does not exists - # 1 - create network - description = 'network for nova bridge %s' % bridge - network_rec = {'name_label': bridge, - 'name_description': description, - 'other_config': {}} - network_ref = session.call_xenapi('network.create', network_rec) - # 2 - find PIF for VLAN - # NOTE(salvatore-orlando): using double quotes inside single quotes - # as xapi filter only support tokens in double quotes - expr = 'field "device" = "%s" and \ - field "VLAN" = "-1"' % bridge_interface - pifs = session.call_xenapi('PIF.get_all_records_where', expr) - pif_ref = None - # Multiple PIF are ok: we are dealing with a pool - if len(pifs) == 0: - raise Exception( - _('Found no PIF for device %s') % bridge_interface) - # 3 - create vlan for network - for pif_ref in pifs.keys(): - session.call_xenapi('VLAN.create', - pif_ref, - str(vlan_num), - network_ref) - else: - # Check VLAN tag is appropriate - network_rec = session.call_xenapi('network.get_record', network_ref) - # Retrieve PIFs from network - for pif_ref in network_rec['PIFs']: - # Retrieve VLAN from PIF - pif_rec = session.call_xenapi('PIF.get_record', pif_ref) - pif_vlan = int(pif_rec['VLAN']) - # Raise an exception if VLAN != vlan_num - if pif_vlan != vlan_num: - raise Exception(_("PIF %(pif_rec['uuid'])s for network " - "%(bridge)s has VLAN id %(pif_vlan)d. " - "Expected %(vlan_num)d") % locals()) diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index e4ed75d37..720d5b0e6 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -59,6 +59,7 @@ def setup(): network.create_networks(ctxt, label='test', cidr=FLAGS.fixed_range, + multi_host=FLAGS.multi_host, num_networks=FLAGS.num_networks, network_size=FLAGS.network_size, cidr_v6=FLAGS.fixed_range_v6, @@ -66,9 +67,10 @@ def setup(): bridge=FLAGS.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=FLAGS.vpn_start, - vlan_start=FLAGS.vlan_start) + vlan_start=FLAGS.vlan_start, + dns1=FLAGS.flat_network_dns) for net in db.network_get_all(ctxt): - network.set_network_host(ctxt, net['id']) + network.set_network_host(ctxt, net) cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 4c4d03995..f09270b34 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -247,3 +247,21 @@ class MiscFunctionsTest(test.TestCase): self.assertRaises(ValueError, common.get_id_from_href, fixture) + + def test_get_version_from_href(self): + fixture = 'http://www.testsite.com/v1.1/images' + expected = '1.1' + actual = common.get_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_get_version_from_href_2(self): + fixture = 'http://www.testsite.com/v1.1' + expected = '1.1' + actual = common.get_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_get_version_from_href_default(self): + fixture = 'http://www.testsite.com/images' + expected = '1.0' + actual = common.get_version_from_href(fixture) + self.assertEqual(actual, expected) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 697c62e5c..d459c694f 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -16,10 +16,11 @@ # under the License. import json +import os.path import stubout import unittest import webob -import os.path +from xml.etree import ElementTree from nova import context from nova import flags @@ -30,7 +31,8 @@ from nova.api.openstack import wsgi from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS - +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" response_body = "Try to say this Mr. Knox, sir..." @@ -80,20 +82,99 @@ class StubExtensionManager(object): class ExtensionControllerTest(unittest.TestCase): - def test_index(self): + def setUp(self): + FLAGS.osapi_extensions_path = os.path.join( + os.path.dirname(__file__), "extensions") + + def test_list_extensions_json(self): app = openstack.APIRouterV11() ext_midware = extensions.ExtensionMiddleware(app) request = webob.Request.blank("/extensions") response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) - def test_get_by_alias(self): + # Make sure we have all the extensions. + data = json.loads(response.body) + names = [x['name'] for x in data['extensions']] + names.sort() + self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", + "Fox In Socks", "Hosts", "Multinic", "Volumes"]) + + # Make sure that at least Fox in Sox is correct. + (fox_ext,) = [ + x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] + self.assertEqual(fox_ext, { + 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', + 'name': 'Fox In Socks', + 'updated': '2011-01-22T13:25:27-06:00', + 'description': 'The Fox In Socks Extension', + 'alias': 'FOXNSOX', + 'links': [] + } + ) + + def test_get_extension_json(self): app = openstack.APIRouterV11() ext_midware = extensions.ExtensionMiddleware(app) request = webob.Request.blank("/extensions/FOXNSOX") response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) + data = json.loads(response.body) + self.assertEqual(data['extension'], { + "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", + "name": "Fox In Socks", + "updated": "2011-01-22T13:25:27-06:00", + "description": "The Fox In Socks Extension", + "alias": "FOXNSOX", + "links": [] + } + ) + + def test_list_extensions_xml(self): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank("/extensions") + request.accept = "application/xml" + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + print response.body + + root = ElementTree.XML(response.body) + self.assertEqual(root.tag.split('extensions')[0], NS) + + # Make sure we have all the extensions. + exts = root.findall('{0}extension'.format(NS)) + self.assertEqual(len(exts), 6) + + # Make sure that at least Fox in Sox is correct. + (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] + self.assertEqual(fox_ext.get('name'), 'Fox In Socks') + self.assertEqual(fox_ext.get('namespace'), + 'http://www.fox.in.socks/api/ext/pie/v1.0') + self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00') + self.assertEqual(fox_ext.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension') + + def test_get_extension_xml(self): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank("/extensions/FOXNSOX") + request.accept = "application/xml" + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + print response.body + + root = ElementTree.XML(response.body) + self.assertEqual(root.tag.split('extension')[0], NS) + self.assertEqual(root.get('alias'), 'FOXNSOX') + self.assertEqual(root.get('name'), 'Fox In Socks') + self.assertEqual(root.get('namespace'), + 'http://www.fox.in.socks/api/ext/pie/v1.0') + self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00') + self.assertEqual(root.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension') + class ResourceExtensionTest(unittest.TestCase): @@ -192,7 +273,7 @@ class ActionExtensionTest(unittest.TestCase): def test_invalid_action(self): body = dict(blah=dict(name="test")) - response = self._send_server_action_request("/asdf/1/action", body) + response = self._send_server_action_request("/fdsa/1/action", body) self.assertEqual(404, response.status_int) @@ -244,3 +325,109 @@ class RequestExtensionTest(unittest.TestCase): response_data = json.loads(response.body) self.assertEqual('newblue', response_data['flavor']['googoose']) self.assertEqual("Pig Bands!", response_data['big_bands']) + + +class ExtensionsXMLSerializerTest(unittest.TestCase): + + def test_serialize_extenstion(self): + serializer = extensions.ExtensionsXMLSerializer() + data = { + 'extension': { + 'name': 'ext1', + 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0', + 'alias': 'RS-PIE', + 'updated': '2011-01-22T13:25:27-06:00', + 'description': 'Adds the capability to share an image.', + 'links': [ + { + 'rel': 'describedby', + 'type': 'application/pdf', + 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf' + }, + { + 'rel': 'describedby', + 'type': 'application/vnd.sun.wadl+xml', + 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl' + } + ] + } + } + + xml = serializer.serialize(data, 'show') + root = ElementTree.XML(xml) + ext_dict = data['extension'] + self.assertEqual(root.findtext('{0}description'.format(NS)), + ext_dict['description']) + + for key in ['name', 'namespace', 'alias', 'updated']: + self.assertEqual(root.get(key), ext_dict[key]) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(ext_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + def test_serialize_extensions(self): + serializer = extensions.ExtensionsXMLSerializer() + data = { + "extensions": [ + { + "name": "Public Image Extension", + "namespace": "http://foo.com/api/ext/pie/v1.0", + "alias": "RS-PIE", + "updated": "2011-01-22T13:25:27-06:00", + "description": "Adds the capability to share an image.", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://foo.com/api/ext/cs-pie.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://foo.com/api/ext/cs-pie.wadl" + } + ] + }, + { + "name": "Cloud Block Storage", + "namespace": "http://foo.com/api/ext/cbs/v1.0", + "alias": "RS-CBS", + "updated": "2011-01-12T11:22:33-06:00", + "description": "Allows mounting cloud block storage.", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://foo.com/api/ext/cs-cbs.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://foo.com/api/ext/cs-cbs.wadl" + } + ] + } + ] + } + + xml = serializer.serialize(data, 'index') + print xml + root = ElementTree.XML(xml) + ext_elems = root.findall('{0}extension'.format(NS)) + self.assertEqual(len(ext_elems), 2) + for i, ext_elem in enumerate(ext_elems): + ext_dict = data['extensions'][i] + self.assertEqual(ext_elem.findtext('{0}description'.format(NS)), + ext_dict['description']) + + for key in ['name', 'namespace', 'alias', 'updated']: + self.assertEqual(ext_elem.get(key), ext_dict[key]) + + link_nodes = ext_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(ext_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py index 4d86ffb26..6da27540a 100644 --- a/nova/tests/api/openstack/test_faults.py +++ b/nova/tests/api/openstack/test_faults.py @@ -16,6 +16,7 @@ # under the License. import json +from xml.dom import minidom import webob import webob.dec @@ -24,6 +25,7 @@ import webob.exc from nova import test from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi class TestFaults(test.TestCase): @@ -139,3 +141,113 @@ class TestFaults(test.TestCase): self.assertEqual(resp.content_type, "application/xml") self.assertEqual(resp.status_int, 404) self.assertTrue('whut?' in resp.body) + + def test_fault_has_status_int(self): + """Ensure the status_int is set correctly on faults""" + fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='what?')) + self.assertEqual(fault.status_int, 400) + + def test_v10_xml_serializer(self): + """Ensure that a v1.0 request responds with a v1.0 xmlns""" + request = webob.Request.blank('/', + headers={"Accept": "application/xml"}) + + fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + self.assertTrue(common.XML_NS_V10 in response.body) + self.assertEqual(response.content_type, "application/xml") + self.assertEqual(response.status_int, 400) + + def test_v11_xml_serializer(self): + """Ensure that a v1.1 request responds with a v1.1 xmlns""" + request = webob.Request.blank('/v1.1', + headers={"Accept": "application/xml"}) + + fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + self.assertTrue(common.XML_NS_V11 in response.body) + self.assertEqual(response.content_type, "application/xml") + self.assertEqual(response.status_int, 400) + + +class FaultsXMLSerializationTestV11(test.TestCase): + """Tests covering `nova.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault(self): + metadata = {'attributes': {"badRequest": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V11) + + fixture = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + <badRequest code="400" xmlns="%s"> + <message>scram</message> + </badRequest> + """) % common.XML_NS_V11) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_413_fault(self): + metadata = {'attributes': {"overLimit": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V11) + + fixture = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + <overLimit code="413" xmlns="%s"> + <message>sorry</message> + <retryAfter>4</retryAfter> + </overLimit> + """) % common.XML_NS_V11) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_404_fault(self): + metadata = {'attributes': {"itemNotFound": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V11) + + fixture = { + "itemNotFound": { + "message": "sorry", + "code": 404, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + <itemNotFound code="404" xmlns="%s"> + <message>sorry</message> + </itemNotFound> + """) % common.XML_NS_V11) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 689647cc6..4ac35b26b 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -18,12 +18,14 @@ import json import stubout import webob +import xml.dom.minidom as minidom +from nova.api.openstack import flavors import nova.db.api -from nova import context from nova import exception from nova import test from nova.tests.api.openstack import fakes +from nova import wsgi def stub_flavor(flavorid, name, memory_mb="256", local_gb="10"): @@ -64,7 +66,6 @@ class FlavorsTest(test.TestCase): return_instance_types) self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id", return_instance_type_by_flavor_id) - self.context = context.get_admin_context() def tearDown(self): self.stubs.UnsetAll() @@ -146,61 +147,65 @@ class FlavorsTest(test.TestCase): req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - flavor = json.loads(res.body)["flavor"] + flavor = json.loads(res.body) expected = { - "id": "12", - "name": "flavor 12", - "ram": "256", - "disk": "10", - "links": [ - { - "rel": "self", - "href": "http://localhost/v1.1/flavors/12", - }, - { - "rel": "bookmark", - "href": "http://localhost/flavors/12", - }, - ], - } - self.assertEqual(flavor, expected) - - def test_get_flavor_list_v1_1(self): - req = webob.Request.blank('/v1.1/flavors') - req.environ['api.version'] = '1.1' - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - flavor = json.loads(res.body)["flavors"] - expected = [ - { - "id": "1", - "name": "flavor 1", - "links": [ - { - "rel": "self", - "href": "http://localhost/v1.1/flavors/1", - }, - { - "rel": "bookmark", - "href": "http://localhost/flavors/1", - }, - ], - }, - { - "id": "2", - "name": "flavor 2", + "flavor": { + "id": "12", + "name": "flavor 12", + "ram": "256", + "disk": "10", "links": [ { "rel": "self", - "href": "http://localhost/v1.1/flavors/2", + "href": "http://localhost/v1.1/flavors/12", }, { "rel": "bookmark", - "href": "http://localhost/flavors/2", + "href": "http://localhost/flavors/12", }, ], }, - ] + } + self.assertEqual(flavor, expected) + + def test_get_flavor_list_v1_1(self): + req = webob.Request.blank('/v1.1/flavors') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavor = json.loads(res.body) + expected = { + "flavors": [ + { + "id": "1", + "name": "flavor 1", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/1", + }, + ], + }, + { + "id": "2", + "name": "flavor 2", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/2", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/2", + }, + ], + }, + ], + } self.assertEqual(flavor, expected) def test_get_flavor_list_detail_v1_1(self): @@ -208,52 +213,273 @@ class FlavorsTest(test.TestCase): req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - flavor = json.loads(res.body)["flavors"] - expected = [ - { - "id": "1", - "name": "flavor 1", + flavor = json.loads(res.body) + expected = { + "flavors": [ + { + "id": "1", + "name": "flavor 1", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/1", + }, + ], + }, + { + "id": "2", + "name": "flavor 2", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/2", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/2", + }, + ], + }, + ], + } + self.assertEqual(flavor, expected) + + def test_get_empty_flavor_list_v1_1(self): + def _return_empty(self): + return {} + self.stubs.Set(nova.db.api, "instance_type_get_all", _return_empty) + + req = webob.Request.blank('/v1.1/flavors') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavors = json.loads(res.body)["flavors"] + expected = [] + self.assertEqual(flavors, expected) + + +class FlavorsXMLSerializationTest(test.TestCase): + + def test_show(self): + serializer = flavors.FlavorXMLSerializer() + + input = { + "flavor": { + "id": "12", + "name": "asdf", "ram": "256", "disk": "10", "links": [ { "rel": "self", - "href": "http://localhost/v1.1/flavors/1", + "href": "http://localhost/v1.1/flavors/12", }, { "rel": "bookmark", - "href": "http://localhost/flavors/1", + "href": "http://localhost/flavors/12", }, ], }, - { - "id": "2", - "name": "flavor 2", - "ram": "256", - "disk": "10", + } + + output = serializer.serialize(input, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <flavor xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + id="12" + name="asdf" + ram="256" + disk="10"> + <atom:link href="http://localhost/v1.1/flavors/12" rel="self"/> + <atom:link href="http://localhost/flavors/12" rel="bookmark"/> + </flavor> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_handles_integers(self): + serializer = flavors.FlavorXMLSerializer() + + input = { + "flavor": { + "id": 12, + "name": "asdf", + "ram": 256, + "disk": 10, "links": [ { "rel": "self", - "href": "http://localhost/v1.1/flavors/2", + "href": "http://localhost/v1.1/flavors/12", }, { "rel": "bookmark", - "href": "http://localhost/flavors/2", + "href": "http://localhost/flavors/12", }, ], }, - ] - self.assertEqual(flavor, expected) + } - def test_get_empty_flavor_list_v1_1(self): - def _return_empty(self): - return {} - self.stubs.Set(nova.db.api, "instance_type_get_all", - _return_empty) + output = serializer.serialize(input, 'show') + actual = minidom.parseString(output.replace(" ", "")) - req = webob.Request.blank('/v1.1/flavors') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - flavors = json.loads(res.body)["flavors"] - expected = [] - self.assertEqual(flavors, expected) + expected = minidom.parseString(""" + <flavor xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + id="12" + name="asdf" + ram="256" + disk="10"> + <atom:link href="http://localhost/v1.1/flavors/12" rel="self"/> + <atom:link href="http://localhost/flavors/12" rel="bookmark"/> + </flavor> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_detail(self): + serializer = flavors.FlavorXMLSerializer() + + input = { + "flavors": [ + { + "id": "23", + "name": "flavor 23", + "ram": "512", + "disk": "20", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/23", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/23", + }, + ], + }, { + "id": "13", + "name": "flavor 13", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/13", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/13", + }, + ], + }, + ], + } + + output = serializer.serialize(input, 'detail') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <flavors xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom"> + <flavor id="23" + name="flavor 23" + ram="512" + disk="20"> + <atom:link href="http://localhost/v1.1/flavors/23" rel="self"/> + <atom:link href="http://localhost/flavors/23" rel="bookmark"/> + </flavor> + <flavor id="13" + name="flavor 13" + ram="256" + disk="10"> + <atom:link href="http://localhost/v1.1/flavors/13" rel="self"/> + <atom:link href="http://localhost/flavors/13" rel="bookmark"/> + </flavor> + </flavors> + """.replace(" ", "") % locals()) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index(self): + serializer = flavors.FlavorXMLSerializer() + + input = { + "flavors": [ + { + "id": "23", + "name": "flavor 23", + "ram": "512", + "disk": "20", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/23", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/23", + }, + ], + }, { + "id": "13", + "name": "flavor 13", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/13", + }, + { + "rel": "bookmark", + "href": "http://localhost/flavors/13", + }, + ], + }, + ], + } + + output = serializer.serialize(input, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <flavors xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom"> + <flavor id="23" name="flavor 23"> + <atom:link href="http://localhost/v1.1/flavors/23" rel="self"/> + <atom:link href="http://localhost/flavors/23" rel="bookmark"/> + </flavor> + <flavor id="13" name="flavor 13"> + <atom:link href="http://localhost/v1.1/flavors/13" rel="self"/> + <atom:link href="http://localhost/flavors/13" rel="bookmark"/> + </flavor> + </flavors> + """.replace(" ", "") % locals()) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_empty(self): + serializer = flavors.FlavorXMLSerializer() + + input = { + "flavors": [], + } + + output = serializer.serialize(input, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <flavors xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" /> + """.replace(" ", "") % locals()) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index d9fb61e2a..31ca18497 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -103,8 +103,7 @@ class ImageMetaDataTest(test.TestCase): super(ImageMetaDataTest, self).tearDown() def test_index(self): - req = webob.Request.blank('/v1.1/images/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) @@ -114,8 +113,7 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(value, res_dict['metadata'][key]) def test_show(self): - req = webob.Request.blank('/v1.1/images/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata/key1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) @@ -124,42 +122,66 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual('value1', res_dict['meta']['key1']) def test_show_not_found(self): - req = webob.Request.blank('/v1.1/images/1/meta/key9') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata/key9') res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) def test_create(self): - req = webob.Request.blank('/v1.1/images/2/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/2/metadata') req.method = 'POST' req.body = '{"metadata": {"key9": "value9"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) + + self.assertEqual(200, res.status_int) + actual_output = json.loads(res.body) + + expected_output = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key9': 'value9', + }, + } + + self.assertEqual(expected_output, actual_output) + + def test_update_all(self): + req = webob.Request.blank('/v1.1/images/2/metadata') + req.method = 'PUT' + req.body = '{"metadata": {"key9": "value9"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) - self.assertEqual('value9', res_dict['metadata']['key9']) - # other items should not be modified - self.assertEqual('value1', res_dict['metadata']['key1']) - self.assertEqual('value2', res_dict['metadata']['key2']) - self.assertEqual(1, len(res_dict)) + actual_output = json.loads(res.body) + + expected_output = { + 'metadata': { + 'key9': 'value9', + }, + } + + self.assertEqual(expected_output, actual_output) def test_update_item(self): - req = webob.Request.blank('/v1.1/images/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata/key1') req.method = 'PUT' req.body = '{"meta": {"key1": "zz"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) - res_dict = json.loads(res.body) - self.assertTrue('meta' in res_dict) - self.assertEqual(len(res_dict['meta']), 1) - self.assertEqual('zz', res_dict['meta']['key1']) + actual_output = json.loads(res.body) + expected_output = { + 'meta': { + 'key1': 'zz', + }, + } + self.assertEqual(actual_output, expected_output) def test_update_item_bad_body(self): - req = webob.Request.blank('/v1.1/images/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata/key1') req.method = 'PUT' req.body = '{"key1": "zz"}' req.headers["content-type"] = "application/json" @@ -167,8 +189,7 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_item_too_many_keys(self): - req = webob.Request.blank('/v1.1/images/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata/key1') req.method = 'PUT' req.body = '{"meta": {"key1": "value1", "key2": "value2"}}' req.headers["content-type"] = "application/json" @@ -176,24 +197,38 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_item_body_uri_mismatch(self): - req = webob.Request.blank('/v1.1/images/1/meta/bad') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/1/metadata/bad') req.method = 'PUT' req.body = '{"meta": {"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) + def test_update_item_xml(self): + req = webob.Request.blank('/v1.1/images/1/metadata/key1') + req.method = 'PUT' + req.body = '<meta key="key1">five</meta>' + req.headers["content-type"] = "application/xml" + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(200, res.status_int) + actual_output = json.loads(res.body) + expected_output = { + 'meta': { + 'key1': 'five', + }, + } + self.assertEqual(actual_output, expected_output) + def test_delete(self): - req = webob.Request.blank('/v1.1/images/2/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/2/metadata/key1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, res.status_int) + self.assertEqual(204, res.status_int) + self.assertEqual('', res.body) def test_delete_not_found(self): - req = webob.Request.blank('/v1.1/images/2/meta/blah') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/2/metadata/blah') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) @@ -203,8 +238,7 @@ class ImageMetaDataTest(test.TestCase): for num in range(FLAGS.quota_metadata_items + 1): data['metadata']['key%i' % num] = "blah" json_string = str(data).replace("\'", "\"") - req = webob.Request.blank('/v1.1/images/2/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/2/metadata') req.method = 'POST' req.body = json_string req.headers["content-type"] = "application/json" @@ -212,8 +246,7 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_too_many_metadata_items_on_put(self): - req = webob.Request.blank('/v1.1/images/3/meta/blah') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/images/3/metadata/blah') req.method = 'PUT' req.body = '{"meta": {"blah": "blah"}}' req.headers["content-type"] = "application/json" @@ -221,9 +254,49 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) +class ImageMetadataXMLDeserializationTest(test.TestCase): + + deserializer = openstack.image_metadata.ImageMetadataXMLDeserializer() + + def test_create(self): + request_body = """ + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key='123'>asdf</meta> + <meta key='567'>jkl;</meta> + </metadata>""" + output = self.deserializer.deserialize(request_body, 'create') + expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}} + self.assertEquals(output, expected) + + def test_create_empty(self): + request_body = """ + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>""" + output = self.deserializer.deserialize(request_body, 'create') + expected = {"body": {"metadata": {}}} + self.assertEquals(output, expected) + + def test_update_all(self): + request_body = """ + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key='123'>asdf</meta> + <meta key='567'>jkl;</meta> + </metadata>""" + output = self.deserializer.deserialize(request_body, 'update_all') + expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}} + self.assertEquals(output, expected) + + def test_update(self): + request_body = """ + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" + key='123'>asdf</meta>""" + output = self.deserializer.deserialize(request_body, 'update') + expected = {"body": {"meta": {"123": "asdf"}}} + self.assertEquals(output, expected) + + class ImageMetadataXMLSerializationTest(test.TestCase): - def test_index_xml(self): + def test_index(self): serializer = openstack.image_metadata.ImageMetadataXMLSerializer() fixture = { 'metadata': { @@ -247,7 +320,7 @@ class ImageMetadataXMLSerializationTest(test.TestCase): self.assertEqual(expected.toxml(), actual.toxml()) - def test_index_xml_null(self): + def test_index_null(self): serializer = openstack.image_metadata.ImageMetadataXMLSerializer() fixture = { 'metadata': { @@ -267,7 +340,7 @@ class ImageMetadataXMLSerializationTest(test.TestCase): self.assertEqual(expected.toxml(), actual.toxml()) - def test_index_xml_unicode(self): + def test_index_unicode(self): serializer = openstack.image_metadata.ImageMetadataXMLSerializer() fixture = { 'metadata': { @@ -287,7 +360,7 @@ class ImageMetadataXMLSerializationTest(test.TestCase): self.assertEqual(expected.toxml(), actual.toxml()) - def test_show_xml(self): + def test_show(self): serializer = openstack.image_metadata.ImageMetadataXMLSerializer() fixture = { 'meta': { @@ -305,7 +378,31 @@ class ImageMetadataXMLSerializationTest(test.TestCase): self.assertEqual(expected.toxml(), actual.toxml()) - def test_update_item_xml(self): + def test_update_all(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + 'key6': 'value6', + 'key4': 'value4', + }, + } + output = serializer.serialize(fixture, 'update_all') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="key6"> + value6 + </meta> + <meta key="key4"> + value4 + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_update_item(self): serializer = openstack.image_metadata.ImageMetadataXMLSerializer() fixture = { 'meta': { @@ -323,7 +420,7 @@ class ImageMetadataXMLSerializationTest(test.TestCase): self.assertEqual(expected.toxml(), actual.toxml()) - def test_create_xml(self): + def test_create(self): serializer = openstack.image_metadata.ImageMetadataXMLSerializer() fixture = { 'metadata': { @@ -350,3 +447,8 @@ class ImageMetadataXMLSerializationTest(test.TestCase): """.replace(" ", "")) self.assertEqual(expected.toxml(), actual.toxml()) + + def test_delete(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + output = serializer.serialize(None, 'delete') + self.assertEqual(output, '') diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 534460d46..87a695dde 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -538,7 +538,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): # because the element hasn't changed definition expected = minidom.parseString(""" <itemNotFound code="404" - xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + xmlns="http://docs.openstack.org/compute/api/v1.1"> <message> Image not found. </message> @@ -803,154 +803,206 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_image_filter_with_name(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'name': 'testname'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?name=testname') + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?name=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_filter_with_status(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?status=ACTIVE') + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?status=ACTIVE') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_filter_with_property(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'property-test': '3'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?property-test=3') + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_filter_server(self): + image_service = self.mox.CreateMockAnything() + context = object() + # 'server' should be converted to 'property-instance_ref' + filters = {'property-instance_ref': 'http://localhost:8774/servers/12'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?server=' + 'http://localhost:8774/servers/12') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_filter_changes_since(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'changes-since': '2011-01-24T17:08Z'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?changes-since=' + '2011-01-24T17:08Z') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_filter_with_type(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'property-image_type': 'BASE'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?type=BASE') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_filter_not_supported(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?status=ACTIVE&' + 'UNSUPPORTEDFILTER=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) - controller.index(request) - mocker.VerifyAll() + controller.detail(request) + self.mox.VerifyAll() def test_image_no_filters(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {} image_service.index( context, filters=filters).AndReturn([]) - mocker.ReplayAll() + self.mox.ReplayAll() request = webob.Request.blank( '/v1.1/images') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_filter_with_name(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'name': 'testname'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?name=testname') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?name=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_filter_with_status(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?status=ACTIVE') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_filter_with_property(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'property-test': '3'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?property-test=3') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?property-test=3') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() + + def test_image_detail_filter_server(self): + image_service = self.mox.CreateMockAnything() + context = object() + # 'server' should be converted to 'property-instance_ref' + filters = {'property-instance_ref': 'http://localhost:8774/servers/12'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?server=' + 'http://localhost:8774/servers/12') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_detail_filter_changes_since(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'changes-since': '2011-01-24T17:08Z'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?changes-since=' + '2011-01-24T17:08Z') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_detail_filter_with_type(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'property-image_type': 'BASE'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?type=BASE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() def test_image_detail_filter_not_supported(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?status=ACTIVE&' + 'UNSUPPORTEDFILTER=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_no_filters(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 76363450d..8a3fe681a 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -24,11 +24,12 @@ import stubout import time import unittest import webob - -from xml.dom.minidom import parseString +from xml.dom import minidom import nova.context from nova.api.openstack import limits +from nova.api.openstack import views +from nova import test TEST_LIMITS = [ @@ -166,7 +167,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): request = self._get_index_request("application/xml") response = request.get_response(self.controller) - expected = parseString(""" + expected = minidom.parseString(""" <limits xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> <rate/> @@ -174,7 +175,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): </limits> """.replace(" ", "")) - body = parseString(response.body.replace(" ", "")) + body = minidom.parseString(response.body.replace(" ", "")) self.assertEqual(expected.toxml(), body.toxml()) @@ -184,7 +185,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): request = self._populate_limits(request) response = request.get_response(self.controller) - expected = parseString(""" + expected = minidom.parseString(""" <limits xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> <rate> @@ -196,7 +197,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): <absolute/> </limits> """.replace(" ", "")) - body = parseString(response.body.replace(" ", "")) + body = minidom.parseString(response.body.replace(" ", "")) self.assertEqual(expected.toxml(), body.toxml()) @@ -210,6 +211,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): """Run before each test.""" BaseLimitTestSuite.setUp(self) self.controller = limits.create_resource('1.1') + self.maxDiff = None def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -266,14 +268,14 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "HOUR", "value": 5, "remaining": 5, @@ -286,7 +288,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 5, "remaining": 5, @@ -328,7 +330,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, @@ -341,7 +343,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, @@ -458,7 +460,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite): response = request.get_response(self.app) self.assertEqual(response.status_int, 403) - root = parseString(response.body).childNodes[0] + root = minidom.parseString(response.body).childNodes[0] expected = "Only 1 GET request(s) can be made to * every minute." details = root.getElementsByTagName("details") @@ -904,3 +906,195 @@ class WsgiLimiterProxyTest(BaseLimitTestSuite): "made to /delayed every minute.") self.assertEqual((delay, error), expected) + + +class LimitsViewBuilderV11Test(test.TestCase): + + def setUp(self): + self.view_builder = views.limits.ViewBuilderV11() + self.rate_limits = [ + { + "URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226 + }, + { + "URI": "*/servers", + "regex": "^/servers", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226 + }, + ] + self.absolute_limits = { + "metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5, + } + + def tearDown(self): + pass + + def test_build_limits(self): + expected_limits = { + "limits": { + "rate": [ + { + "uri": "*", + "regex": ".*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-07-21T18:17:06Z" + }, + ] + }, + { + "uri": "*/servers", + "regex": "^/servers", + "limit": [ + { + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-07-21T18:17:06Z" + }, + ] + }, + ], + "absolute": { + "maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5 + } + } + } + + output = self.view_builder.build(self.rate_limits, + self.absolute_limits) + self.assertDictMatch(output, expected_limits) + + def test_build_limits_empty_limits(self): + expected_limits = { + "limits": { + "rate": [], + "absolute": {} + } + } + + abs_limits = {} + rate_limits = [] + output = self.view_builder.build(rate_limits, abs_limits) + self.assertDictMatch(output, expected_limits) + + +class LimitsXMLSerializationTest(test.TestCase): + + def setUp(self): + self.maxDiff = None + + def tearDown(self): + pass + + def test_index(self): + serializer = limits.LimitsXMLSerializer() + + fixture = { + "limits": { + "rate": [ + { + "uri": "*", + "regex": ".*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z" + }, + ] + }, + { + "uri": "*/servers", + "regex": "^/servers", + "limit": [ + { + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z" + }, + ] + }, + ], + "absolute": { + "maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240 + } + } + } + + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <limits xmlns="http://docs.openstack.org/compute/api/v1.1"> + <rates> + <rate uri="*" regex=".*"> + <limit value="10" verb="POST" remaining="2" + unit="MINUTE" + next-available="2011-12-15T22:42:45Z"/> + </rate> + <rate uri="*/servers" regex="^/servers"> + <limit value="50" verb="POST" remaining="10" + unit="DAY" + next-available="2011-12-15T22:42:45Z"/> + </rate> + </rates> + <absolute> + <limit name="maxServerMeta" value="1"/> + <limit name="maxPersonality" value="5"/> + <limit name="maxImageMeta" value="1"/> + <limit name="maxPersonalitySize" value="10240"/> + </absolute> + </limits> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_no_limits(self): + serializer = limits.LimitsXMLSerializer() + + fixture = { + "limits": { + "rate": [], + "absolute": {} + } + } + + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <limits xmlns="http://docs.openstack.org/compute/api/v1.1"> + <rates /> + <absolute /> + </limits> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 7b048507e..4ca79434f 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -30,8 +30,9 @@ from nova import flags from nova import test from nova import utils import nova.api.openstack -from nova.api.openstack import servers from nova.api.openstack import create_instance_helper +from nova.api.openstack import servers +from nova.api.openstack import wsgi import nova.compute.api from nova.compute import instance_types from nova.compute import power_state @@ -77,16 +78,16 @@ def return_virtual_interface_instance_nonexistant(interfaces): return _return_virtual_interface_by_instance -def return_server_with_addresses(private, public): +def return_server_with_attributes(**kwargs): def _return_server(context, id): - return stub_instance(id, private_address=private, - public_addresses=public) + return stub_instance(id, **kwargs) return _return_server -def return_server_with_interfaces(interfaces): +def return_server_with_addresses(private, public): def _return_server(context, id): - return stub_instance(id, interfaces=interfaces) + return stub_instance(id, private_address=private, + public_addresses=public) return _return_server @@ -148,14 +149,15 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id=1, private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", - uuid=FAKE_UUID, interfaces=None): + uuid=FAKE_UUID, image_ref="10", flavor_id="1", + interfaces=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) if interfaces is None: interfaces = [] - inst_type = instance_types.get_instance_type_by_flavor_id(1) + inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) if public_addresses is None: public_addresses = list() @@ -170,10 +172,12 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, instance = { "id": int(id), + "created_at": "2010-10-10T12:00:00Z", + "updated_at": "2010-11-11T11:00:00Z", "admin_pass": "", "user_id": user_id, "project_id": "", - "image_ref": "10", + "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, @@ -229,6 +233,7 @@ class MockSetAdminPassword(object): class ServersTest(test.TestCase): def setUp(self): + self.maxDiff = None super(ServersTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.reset_fake_data() @@ -305,24 +310,274 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict['server']['name'], 'server1') def test_get_server_by_id_v1_1(self): + image_bookmark = "http://localhost/images/10" + flavor_ref = "http://localhost/v1.1/flavors/1" + flavor_id = "1" + flavor_bookmark = "http://localhost/flavors/1" + + public_ip = '192.168.0.3' + private_ip = '172.19.0.1' + interfaces = [ + { + 'network': {'label': 'public'}, + 'fixed_ips': [ + {'address': public_ip}, + ], + }, + { + 'network': {'label': 'private'}, + 'fixed_ips': [ + {'address': private_ip}, + ], + }, + ] + new_return_server = return_server_with_attributes( + interfaces=interfaces) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.1/servers/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['id'], 1) - self.assertEqual(res_dict['server']['name'], 'server1') + expected_server = { + "server": { + "id": 1, + "uuid": FAKE_UUID, + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 0, + "name": "server1", + "status": "BUILD", + "hostId": '', + "image": { + "id": "10", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": { + "public": [ + { + "version": 4, + "addr": public_ip, + }, + ], + "private": [ + { + "version": 4, + "addr": private_ip, + }, + ], + }, + "metadata": { + "seq": "1", + }, + "links": [ + { + "rel": "self", + #FIXME(wwolf) Do we want the links to be id or uuid? + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } - expected_links = [ + self.assertDictMatch(res_dict, expected_server) + + def test_get_server_with_active_status_by_id_v1_1(self): + image_bookmark = "http://localhost/images/10" + flavor_ref = "http://localhost/v1.1/flavors/1" + flavor_id = "1" + flavor_bookmark = "http://localhost/flavors/1" + private_ip = "192.168.0.3" + public_ip = "1.2.3.4" + + interfaces = [ + { + 'network': {'label': 'public'}, + 'fixed_ips': [ + {'address': public_ip}, + ], + }, + { + 'network': {'label': 'private'}, + 'fixed_ips': [ + {'address': private_ip}, + ], + }, + ] + new_return_server = return_server_with_attributes( + interfaces=interfaces, power_state=1) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.1/servers/1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + expected_server = { + "server": { + "id": 1, + "uuid": FAKE_UUID, + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 100, + "name": "server1", + "status": "ACTIVE", + "hostId": '', + "image": { + "id": "10", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": { + "public": [ + { + "version": 4, + "addr": public_ip, + }, + ], + "private": [ + { + "version": 4, + "addr": private_ip, + }, + ], + }, + "metadata": { + "seq": "1", + }, + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + self.assertDictMatch(res_dict, expected_server) + + def test_get_server_with_id_image_ref_by_id_v1_1(self): + image_ref = "10" + image_bookmark = "http://localhost/images/10" + flavor_ref = "http://localhost/v1.1/flavors/1" + flavor_id = "1" + flavor_bookmark = "http://localhost/flavors/1" + private_ip = "192.168.0.3" + public_ip = "1.2.3.4" + + interfaces = [ { - "rel": "self", - "href": "http://localhost/v1.1/servers/1", + 'network': {'label': 'public'}, + 'fixed_ips': [ + {'address': public_ip}, + ], }, { - "rel": "bookmark", - "href": "http://localhost/servers/1", + 'network': {'label': 'private'}, + 'fixed_ips': [ + {'address': private_ip}, + ], }, ] + new_return_server = return_server_with_attributes( + interfaces=interfaces, power_state=1, image_ref=image_ref, + flavor_id=flavor_id) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.1/servers/1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + expected_server = { + "server": { + "id": 1, + "uuid": FAKE_UUID, + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 100, + "name": "server1", + "status": "ACTIVE", + "hostId": '', + "image": { + "id": "10", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": { + "public": [ + { + "version": 4, + "addr": public_ip, + }, + ], + "private": [ + { + "version": 4, + "addr": private_ip, + }, + ], + }, + "metadata": { + "seq": "1", + }, + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } - self.assertEqual(res_dict['server']['links'], expected_links) + self.assertDictMatch(res_dict, expected_server) def test_get_server_by_id_with_addresses_xml(self): private = "192.168.0.3" @@ -439,6 +694,51 @@ class ServersTest(test.TestCase): self.assertEquals(ip.getAttribute('addr'), private) def test_get_server_by_id_with_addresses_v1_1(self): + FLAGS.use_ipv6 = True + interfaces = [ + { + 'network': {'label': 'network_1'}, + 'fixed_ips': [ + {'address': '192.168.0.3'}, + {'address': '192.168.0.4'}, + ], + }, + { + 'network': {'label': 'network_2'}, + 'fixed_ips': [ + {'address': '172.19.0.1'}, + {'address': '172.19.0.2'}, + ], + 'fixed_ipv6': '2001:4860::12', + }, + ] + new_return_server = return_server_with_attributes( + interfaces=interfaces) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.1/servers/1') + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server1') + addresses = res_dict['server']['addresses'] + expected = { + 'network_1': [ + {'addr': '192.168.0.3', 'version': 4}, + {'addr': '192.168.0.4', 'version': 4}, + ], + 'network_2': [ + {'addr': '172.19.0.1', 'version': 4}, + {'addr': '172.19.0.2', 'version': 4}, + {'addr': '2001:4860::12', 'version': 6}, + ], + } + + self.assertEqual(addresses, expected) + + def test_get_server_by_id_with_addresses_v1_1_ipv6_disabled(self): + FLAGS.use_ipv6 = False interfaces = [ { 'network': {'label': 'network_1'}, @@ -453,9 +753,11 @@ class ServersTest(test.TestCase): {'address': '172.19.0.1'}, {'address': '172.19.0.2'}, ], + 'fixed_ipv6': '2001:4860::12', }, ] - new_return_server = return_server_with_interfaces(interfaces) + new_return_server = return_server_with_attributes( + interfaces=interfaces) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/servers/1') @@ -479,6 +781,7 @@ class ServersTest(test.TestCase): self.assertEqual(addresses, expected) def test_get_server_addresses_v1_1(self): + FLAGS.use_ipv6 = True interfaces = [ { 'network': {'label': 'network_1'}, @@ -498,6 +801,7 @@ class ServersTest(test.TestCase): }, {'address': '172.19.0.2'}, ], + 'fixed_ipv6': '2001:4860::12', }, ] @@ -520,6 +824,7 @@ class ServersTest(test.TestCase): {'version': 4, 'addr': '172.19.0.1'}, {'version': 4, 'addr': '1.2.3.4'}, {'version': 4, 'addr': '172.19.0.2'}, + {'version': 6, 'addr': '2001:4860::12'}, ], }, } @@ -527,6 +832,7 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict, expected) def test_get_server_addresses_single_network_v1_1(self): + FLAGS.use_ipv6 = True interfaces = [ { 'network': {'label': 'network_1'}, @@ -546,6 +852,7 @@ class ServersTest(test.TestCase): }, {'address': '172.19.0.2'}, ], + 'fixed_ipv6': '2001:4860::12', }, ] _return_vifs = return_virtual_interface_by_instance(interfaces) @@ -562,6 +869,7 @@ class ServersTest(test.TestCase): {'version': 4, 'addr': '172.19.0.1'}, {'version': 4, 'addr': '1.2.3.4'}, {'version': 4, 'addr': '172.19.0.2'}, + {'version': 6, 'addr': '2001:4860::12'}, ], } self.assertEqual(res_dict, expected) @@ -657,20 +965,20 @@ class ServersTest(test.TestCase): for i, s in enumerate(res_dict['servers']): self.assertEqual(s['id'], i) self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s.get('imageId', None), None) + self.assertEqual(s.get('image', None), None) expected_links = [ - { - "rel": "self", - "href": "http://localhost/v1.1/servers/%d" % (i,), - }, - { - "rel": "bookmark", - "href": "http://localhost/servers/%d" % (i,), - }, - ] + { + "rel": "self", + "href": "http://localhost/v1.1/servers/%s" % s['id'], + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/%s" % s['id'], + }, + ] - self.assertEqual(s['links'], expected_links) + self.assertEqual(s['links'], expected_links) def test_get_servers_with_limit(self): req = webob.Request.blank('/v1.0/servers?limit=3') @@ -716,13 +1024,13 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.1/servers?marker=2') res = req.get_response(fakes.wsgi_app()) servers = json.loads(res.body)['servers'] - self.assertEqual([s['id'] for s in servers], [3, 4]) + self.assertEqual([s['name'] for s in servers], ["server3", "server4"]) def test_get_servers_with_limit_and_marker(self): req = webob.Request.blank('/v1.1/servers?limit=2&marker=1') res = req.get_response(fakes.wsgi_app()) servers = json.loads(res.body)['servers'] - self.assertEqual([s['id'] for s in servers], [2, 3]) + self.assertEqual([s['name'] for s in servers], ['server2', 'server3']) def test_get_servers_with_bad_marker(self): req = webob.Request.blank('/v1.1/servers?limit=2&marker=asdf') @@ -733,8 +1041,16 @@ class ServersTest(test.TestCase): def _setup_for_create_instance(self): """Shared implementation for tests below that create instance""" def instance_create(context, inst): - return {'id': 1, 'display_name': 'server_test', - 'uuid': FAKE_UUID} + inst_type = instance_types.get_instance_type_by_flavor_id(3) + image_ref = 'http://localhost/images/2' + return {'id': 1, + 'display_name': 'server_test', + 'uuid': FAKE_UUID, + 'instance_type': dict(inst_type), + 'image_ref': image_ref, + 'created_at': '2010-10-10T12:00:00Z', + 'updated_at': '2010-11-11T11:00:00Z', + } def server_update(context, id, params): return instance_create(context, id) @@ -896,6 +1212,18 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_create_instance_no_server_entity(self): + self._setup_for_create_instance() + + body = {} + + req = webob.Request.blank('/v1.0/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 422) + def test_create_instance_whitespace_name(self): self._setup_for_create_instance() @@ -922,8 +1250,26 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1(self): self._setup_for_create_instance() - image_href = 'http://localhost/v1.1/images/2' - flavor_ref = 'http://localhost/v1.1/flavors/3' + image_href = 'http://localhost/images/2' + flavor_ref = 'http://localhost/flavors/3' + expected_flavor = { + "id": "3", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/flavors/3', + }, + ], + } + expected_image = { + "id": "2", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/images/2', + }, + ], + } body = { 'server': { 'name': 'server_test', @@ -948,9 +1294,42 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) - self.assertEqual(1, server['id']) - self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_href, server['imageRef']) + self.assertEqual(expected_flavor, server['flavor']) + self.assertEqual(expected_image, server['image']) + self.assertEqual(res.status_int, 200) + #self.assertEqual(1, server['id']) + + def test_create_instance_v1_1_invalid_flavor_href(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/asdf' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + metadata={'hello': 'world', 'open': 'stack'}, + personality={})) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_bad_flavor_href(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/17' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + metadata={'hello': 'world', 'open': 'stack'}, + personality={})) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() @@ -971,8 +1350,26 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1_local_href(self): self._setup_for_create_instance() - image_id = 2 - flavor_ref = 'http://localhost/v1.1/flavors/3' + image_id = "2" + flavor_ref = 'http://localhost/flavors/3' + expected_flavor = { + "id": "3", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/flavors/3', + }, + ], + } + expected_image = { + "id": "2", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/images/2', + }, + ], + } body = { 'server': { 'name': 'server_test', @@ -989,9 +1386,8 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) server = json.loads(res.body)['server'] - self.assertEqual(1, server['id']) - self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_id, server['imageRef']) + self.assertEqual(expected_flavor, server['flavor']) + self.assertEqual(expected_image, server['image']) self.assertEqual(res.status_int, 200) def test_create_instance_with_admin_pass_v1_0(self): @@ -1216,6 +1612,24 @@ class ServersTest(test.TestCase): self.assertEqual(s['metadata']['seq'], str(i)) def test_get_all_server_details_v1_1(self): + expected_flavor = { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/flavors/1', + }, + ], + } + expected_image = { + "id": "10", + "links": [ + { + "rel": "bookmark", + "href": 'http://localhost/images/10', + }, + ], + } req = webob.Request.blank('/v1.1/servers/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -1224,8 +1638,8 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageRef'], 10) - self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1') + self.assertEqual(s['image'], expected_image) + self.assertEqual(s['flavor'], expected_flavor) self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -1717,7 +2131,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) + self.assertEqual(res.status_int, 500) def test_resized_server_has_correct_status(self): req = self.webreq('/1', 'GET') @@ -2115,6 +2529,62 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", "http://localhost:8774/v1.1/images/1") +class TextAddressesXMLSerialization(test.TestCase): + + serializer = nova.api.openstack.ips.IPXMLSerializer() + + def test_show(self): + fixture = { + 'network_2': [ + {'addr': '192.168.0.1', 'version': 4}, + {'addr': 'fe80::beef', 'version': 6}, + ], + } + output = self.serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <network xmlns="http://docs.openstack.org/compute/api/v1.1" + id="network_2"> + <ip version="4" addr="192.168.0.1"/> + <ip version="6" addr="fe80::beef"/> + </network> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index(self): + fixture = { + 'addresses': { + 'network_1': [ + {'addr': '192.168.0.3', 'version': 4}, + {'addr': '192.168.0.5', 'version': 4}, + ], + 'network_2': [ + {'addr': '192.168.0.1', 'version': 4}, + {'addr': 'fe80::beef', 'version': 6}, + ], + }, + } + output = self.serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <addresses xmlns="http://docs.openstack.org/compute/api/v1.1"> + <network id="network_2"> + <ip version="4" addr="192.168.0.1"/> + <ip version="6" addr="fe80::beef"/> + </network> + <network id="network_1"> + <ip version="4" addr="192.168.0.3"/> + <ip version="4" addr="192.168.0.5"/> + </network> + </addresses> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + class TestServerInstanceCreation(test.TestCase): def setUp(self): @@ -2417,3 +2887,249 @@ class TestGetKernelRamdiskFromImage(test.TestCase): kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \ _do_get_kernel_ramdisk_from_image(image_meta) return kernel_id, ramdisk_id + + +class ServersViewBuilderV11Test(test.TestCase): + + def setUp(self): + self.instance = self._get_instance() + self.view_builder = self._get_view_builder() + + def tearDown(self): + pass + + def _get_instance(self): + instance = { + "id": 1, + "created_at": "2010-10-10T12:00:00Z", + "updated_at": "2010-11-11T11:00:00Z", + "admin_pass": "", + "user_id": "", + "project_id": "", + "image_ref": "5", + "kernel_id": "", + "ramdisk_id": "", + "launch_index": 0, + "key_name": "", + "key_data": "", + "state": 0, + "state_description": "", + "memory_mb": 0, + "vcpus": 0, + "local_gb": 0, + "hostname": "", + "host": "", + "instance_type": { + "flavorid": 1, + }, + "user_data": "", + "reservation_id": "", + "mac_address": "", + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), + "availability_zone": "", + "display_name": "test_server", + "display_description": "", + "locked": False, + "metadata": [], + #"address": , + #"floating_ips": [{"address":ip} for ip in public_addresses]} + "uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"} + + return instance + + def _get_view_builder(self): + base_url = "http://localhost/v1.1" + views = nova.api.openstack.views + address_builder = views.addresses.ViewBuilderV11() + flavor_builder = views.flavors.ViewBuilderV11(base_url) + image_builder = views.images.ViewBuilderV11(base_url) + + view_builder = nova.api.openstack.views.servers.ViewBuilderV11( + address_builder, + flavor_builder, + image_builder, + base_url + ) + return view_builder + + def test_build_server(self): + expected_server = { + "server": { + "id": 1, + "uuid": self.instance['uuid'], + "name": "test_server", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + output = self.view_builder.build(self.instance, False) + self.assertDictMatch(output, expected_server) + + def test_build_server_detail(self): + image_bookmark = "http://localhost/images/5" + flavor_bookmark = "http://localhost/flavors/1" + expected_server = { + "server": { + "id": 1, + "uuid": self.instance['uuid'], + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": '', + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": {}, + "metadata": {}, + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + output = self.view_builder.build(self.instance, True) + self.assertDictMatch(output, expected_server) + + def test_build_server_detail_active_status(self): + #set the power state of the instance to running + self.instance['state'] = 1 + image_bookmark = "http://localhost/images/5" + flavor_bookmark = "http://localhost/flavors/1" + expected_server = { + "server": { + "id": 1, + "uuid": self.instance['uuid'], + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 100, + "name": "test_server", + "status": "ACTIVE", + "hostId": '', + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": {}, + "metadata": {}, + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + output = self.view_builder.build(self.instance, True) + self.assertDictMatch(output, expected_server) + + def test_build_server_detail_with_metadata(self): + + metadata = [] + metadata.append(InstanceMetadata(key="Open", value="Stack")) + metadata.append(InstanceMetadata(key="Number", value=1)) + self.instance['metadata'] = metadata + + image_bookmark = "http://localhost/images/5" + flavor_bookmark = "http://localhost/flavors/1" + expected_server = { + "server": { + "id": 1, + "uuid": self.instance['uuid'], + "updated": "2010-11-11T11:00:00Z", + "created": "2010-10-10T12:00:00Z", + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": '', + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": image_bookmark, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": flavor_bookmark, + }, + ], + }, + "addresses": {}, + "metadata": { + "Open": "Stack", + "Number": "1", + }, + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/servers/1", + }, + { + "rel": "bookmark", + "href": "http://localhost/servers/1", + }, + ], + } + } + + output = self.view_builder.build(self.instance, True) + self.assertDictMatch(output, expected_server) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index fd8d50904..da964ee1f 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -21,6 +21,7 @@ import webob from nova import context from nova import test from nova.tests.api.openstack import fakes +from nova.api.openstack import versions from nova.api.openstack import views @@ -43,19 +44,21 @@ class VersionsTest(test.TestCase): { "id": "v1.1", "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.1", + "href": "http://localhost/v1.1/", }], }, { "id": "v1.0", "status": "DEPRECATED", + "updated": "2010-10-09T11:30:00Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.0", + "href": "http://localhost/v1.0/", }], }, ] @@ -69,15 +72,12 @@ class VersionsTest(test.TestCase): self.assertEqual(res.content_type, "application/xml") expected = """<versions> - <version id="v1.1" status="CURRENT"> - <links> - <link href="http://localhost/v1.1" rel="self"/> - </links> + <version id="v1.1" status="CURRENT" updated="2011-07-18T11:30:00Z"> + <atom:link href="http://localhost/v1.1/" rel="self"/> </version> - <version id="v1.0" status="DEPRECATED"> - <links> - <link href="http://localhost/v1.0" rel="self"/> - </links> + <version id="v1.0" status="DEPRECATED" + updated="2010-10-09T11:30:00Z"> + <atom:link href="http://localhost/v1.0/" rel="self"/> </version> </versions>""".replace(" ", "").replace("\n", "") @@ -85,21 +85,64 @@ class VersionsTest(test.TestCase): self.assertEqual(expected, actual) + def test_get_version_list_atom(self): + req = webob.Request.blank('/') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/atom+xml") + + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text">Available API Versions</title> + <updated>2011-07-18T11:30:00Z</updated> + <id>http://localhost/</id> + <author> + <name>Rackspace</name> + <uri>http://www.rackspace.com/</uri> + </author> + <link href="http://localhost/" rel="self"/> + <entry> + <id>http://localhost/v1.1/</id> + <title type="text">Version v1.1</title> + <updated>2011-07-18T11:30:00Z</updated> + <link href="http://localhost/v1.1/" rel="self"/> + <content type="text"> + Version v1.1 CURRENT (2011-07-18T11:30:00Z) + </content> + </entry> + <entry> + <id>http://localhost/v1.0/</id> + <title type="text">Version v1.0</title> + <updated>2010-10-09T11:30:00Z</updated> + <link href="http://localhost/v1.0/" rel="self"/> + <content type="text"> + Version v1.0 DEPRECATED (2010-10-09T11:30:00Z) + </content> + </entry> + </feed> + """.replace(" ", "").replace("\n", "") + + actual = res.body.replace(" ", "").replace("\n", "") + + self.assertEqual(expected, actual) + def test_view_builder(self): base_url = "http://example.org/" version_data = { "id": "3.2.1", "status": "CURRENT", - } + "updated": "2011-07-18T11:30:00Z"} expected = { "id": "3.2.1", "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", "links": [ { "rel": "self", - "href": "http://example.org/3.2.1", + "href": "http://example.org/3.2.1/", }, ], } @@ -113,9 +156,99 @@ class VersionsTest(test.TestCase): base_url = "http://example.org/app/" version_number = "v1.4.6" - expected = "http://example.org/app/v1.4.6" + expected = "http://example.org/app/v1.4.6/" builder = views.versions.ViewBuilder(base_url) actual = builder.generate_href(version_number) self.assertEqual(actual, expected) + + def test_xml_serializer(self): + versions_data = { + 'versions': [ + { + "id": "2.7.1", + "updated": "2011-07-18T11:30:00Z", + "status": "DEPRECATED", + "links": [ + { + "rel": "self", + "href": "http://test/2.7.1", + }, + ], + }, + ] + } + + expected = """ + <versions> + <version id="2.7.1" status="DEPRECATED" + updated="2011-07-18T11:30:00Z"> + <atom:link href="http://test/2.7.1" rel="self"/> + </version> + </versions>""".replace(" ", "").replace("\n", "") + + serializer = versions.VersionsXMLSerializer() + response = serializer.default(versions_data) + response = response.replace(" ", "").replace("\n", "") + self.assertEqual(expected, response) + + def test_atom_serializer(self): + versions_data = { + 'versions': [ + { + "id": "2.9.8", + "updated": "2011-07-20T11:40:00Z", + "status": "CURRENT", + "links": [ + { + "rel": "self", + "href": "http://test/2.9.8", + }, + ], + }, + ] + } + + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text"> + Available API Versions + </title> + <updated> + 2011-07-20T11:40:00Z + </updated> + <id> + http://test/ + </id> + <author> + <name> + Rackspace + </name> + <uri> + http://www.rackspace.com/ + </uri> + </author> + <link href="http://test/" rel="self"/> + <entry> + <id> + http://test/2.9.8 + </id> + <title type="text"> + Version 2.9.8 + </title> + <updated> + 2011-07-20T11:40:00Z + </updated> + <link href="http://test/2.9.8" rel="self"/> + <content type="text"> + Version 2.9.8 CURRENT (2011-07-20T11:40:00Z) + </content> + </entry> + </feed>""".replace(" ", "").replace("\n", "") + + serializer = versions.VersionsAtomSerializer() + response = serializer.default(versions_data) + print response + response = response.replace(" ", "").replace("\n", "") + self.assertEqual(expected, response) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 5bdda7c7e..6dea78d17 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -256,6 +256,13 @@ class ResponseSerializerTest(test.TestCase): self.assertEqual(response.body, 'pew_json') self.assertEqual(response.status_int, 404) + def test_serialize_response_None(self): + response = self.serializer.serialize(None, 'application/json') + print response + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, '') + self.assertEqual(response.status_int, 404) + def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, self.serializer.serialize, diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 7762df41c..19028a451 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -230,7 +230,7 @@ def stub_out_db_network_api(stubs): continue fixed_ip_fields['virtual_interface'] = FakeModel(vif[0]) - def fake_instance_type_get_by_id(context, id): + def fake_instance_type_get(context, id): if flavor_fields['id'] == id: return FakeModel(flavor_fields) @@ -323,7 +323,7 @@ def stub_out_db_network_api(stubs): fake_fixed_ip_get_by_address, fake_fixed_ip_get_network, fake_fixed_ip_update, - fake_instance_type_get_by_id, + fake_instance_type_get, fake_virtual_interface_create, fake_virtual_interface_delete_by_instance, fake_virtual_interface_get_by_instance, @@ -415,7 +415,7 @@ def stub_out_db_instance_api(stubs, injected=True): def fake_instance_type_get_by_name(context, name): return INSTANCE_TYPES[name] - def fake_instance_type_get_by_id(context, id): + def fake_instance_type_get(context, id): for name, inst_type in INSTANCE_TYPES.iteritems(): if str(inst_type['id']) == str(id): return inst_type @@ -448,7 +448,7 @@ def stub_out_db_instance_api(stubs, injected=True): fake_network_get_all_by_instance, fake_instance_type_get_all, fake_instance_type_get_by_name, - fake_instance_type_get_by_id, + fake_instance_type_get, fake_instance_get_fixed_addresses, fake_instance_get_fixed_addresses_v6, fake_network_get_all_by_instance, diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a0d50b287..136082cc1 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -15,6 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import mox from base64 import b64decode from M2Crypto import BIO @@ -29,6 +30,7 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import network from nova import rpc from nova import test from nova import utils @@ -132,6 +134,33 @@ class CloudTestCase(test.TestCase): allocate, self.context) + def test_release_address(self): + address = "10.10.10.10" + allocate = self.cloud.allocate_address + db.floating_ip_create(self.context, + {'address': address, + 'host': self.network.host}) + result = self.cloud.release_address(self.context, address) + self.assertEqual(result['releaseResponse'], ['Address released.']) + + def test_release_address_still_associated(self): + address = "10.10.10.10" + fixed_ip = {'instance': {'id': 1}} + floating_ip = {'id': 0, + 'address': address, + 'fixed_ip_id': 0, + 'fixed_ip': fixed_ip, + 'project_id': None, + 'auto_assigned': False} + network_api = network.api.API() + self.mox.StubOutWithMock(network_api.db, 'floating_ip_get_by_address') + network_api.db.floating_ip_get_by_address(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(floating_ip) + self.mox.ReplayAll() + release = self.cloud.release_address + # ApiError: Floating ip is in use. Disassociate it before releasing. + self.assertRaises(exception.ApiError, release, self.context, address) + @test.skip_test("Skipping this pending future merge") def test_associate_disassociate_address(self): """Verifies associate runs cleanly without raising an exception""" @@ -240,25 +269,64 @@ class CloudTestCase(test.TestCase): delete = self.cloud.delete_security_group self.assertRaises(exception.ApiError, delete, self.context) - def test_authorize_revoke_security_group_ingress(self): + def test_authorize_security_group_ingress(self): kwargs = {'project_id': self.context.project_id, 'name': 'test'} sec = db.security_group_create(self.context, kwargs) authz = self.cloud.authorize_security_group_ingress kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - authz(self.context, group_name=sec['name'], **kwargs) + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'ip_ranges': + {'1': {'cidr_ip': u'0.0.0.0/0'}, + '2': {'cidr_ip': u'10.10.10.10/32'}}, + 'ip_protocol': u'tcp'}]} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_security_group_ingress_ip_permissions_groups(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'}, + '2': {'cidr_ip': u'10.10.10.10/32'}}, + 'groups': {'1': {'user_id': u'someuser', + 'group_name': u'somegroup1'}, + '2': {'user_id': u'someuser', + 'group_name': u'othergroup2'}}, + 'ip_protocol': u'tcp'}]} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_revoke_security_group_ingress(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_id=sec['id'], **kwargs) revoke = self.cloud.revoke_security_group_ingress self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs)) - def test_authorize_revoke_security_group_ingress_by_id(self): - sec = db.security_group_create(self.context, - {'project_id': self.context.project_id, - 'name': 'test'}) + def test_revoke_security_group_ingress_by_id(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) authz = self.cloud.authorize_security_group_ingress kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} authz(self.context, group_id=sec['id'], **kwargs) revoke = self.cloud.revoke_security_group_ingress self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) + def test_authorize_security_group_ingress_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + self.assertTrue(authz(self.context, group_id=sec['id'], **kwargs)) + def test_authorize_security_group_ingress_missing_protocol_params(self): sec = db.security_group_create(self.context, {'project_id': self.context.project_id, @@ -879,6 +947,21 @@ class CloudTestCase(test.TestCase): self._wait_for_running(ec2_instance_id) return ec2_instance_id + def test_rescue_unrescue_instance(self): + instance_id = self._run_instance( + image_id='ami-1', + instance_type=FLAGS.default_instance_type, + max_count=1) + self.cloud.rescue_instance(context=self.context, + instance_id=instance_id) + # NOTE(vish): This currently does no validation, it simply makes sure + # that the code path doesn't throw an exception. + self.cloud.unrescue_instance(context=self.context, + instance_id=instance_id) + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + self.cloud.terminate_instances(self.context, [instance_id]) + def test_console_output(self): instance_id = self._run_instance( image_id='ami-1', diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2900c594e..2a8f33dd3 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -424,11 +424,12 @@ class ComputeTestCase(test.TestCase): self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) context = self.context.elevated() instance_id = self._create_instance() - self.compute.prep_resize(context, instance_id, 1) + instance_ref = db.instance_get(context, instance_id) + self.compute.prep_resize(context, instance_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status(context, - instance_id, 'pre-migrating') + instance_ref['uuid'], 'pre-migrating') try: - self.compute.finish_resize(context, instance_id, + self.compute.finish_resize(context, instance_ref['uuid'], int(migration_ref['id']), {}) except KeyError, e: # Only catch key errors. We want other reasons for the test to @@ -441,14 +442,15 @@ class ComputeTestCase(test.TestCase): """Ensure notifications on instance migrate/resize""" instance_id = self._create_instance() context = self.context.elevated() + inst_ref = db.instance_get(context, instance_id) self.compute.run_instance(self.context, instance_id) test_notifier.NOTIFICATIONS = [] db.instance_update(self.context, instance_id, {'host': 'foo'}) - self.compute.prep_resize(context, instance_id, 1) + self.compute.prep_resize(context, inst_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status(context, - instance_id, 'pre-migrating') + inst_ref['uuid'], 'pre-migrating') self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] @@ -471,13 +473,15 @@ class ComputeTestCase(test.TestCase): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() context = self.context.elevated() + inst_ref = db.instance_get(context, instance_id) self.compute.run_instance(self.context, instance_id) - db.instance_update(self.context, instance_id, {'host': 'foo'}) - self.compute.prep_resize(context, instance_id, 1) + db.instance_update(self.context, inst_ref['uuid'], + {'host': 'foo'}) + self.compute.prep_resize(context, inst_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status(context, - instance_id, 'pre-migrating') - self.compute.resize_instance(context, instance_id, + inst_ref['uuid'], 'pre-migrating') + self.compute.resize_instance(context, inst_ref['uuid'], migration_ref['id']) self.compute.terminate_instance(context, instance_id) @@ -519,6 +523,57 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(context, instance_id) + def test_finish_revert_resize(self): + """Ensure that the flavor is reverted to the original on revert""" + context = self.context.elevated() + instance_id = self._create_instance() + + def fake(*args, **kwargs): + pass + + self.stubs.Set(self.compute.driver, 'finish_resize', fake) + self.stubs.Set(self.compute.driver, 'revert_resize', fake) + self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) + + self.compute.run_instance(self.context, instance_id) + + # Confirm the instance size before the resize starts + inst_ref = db.instance_get(context, instance_id) + instance_type_ref = db.instance_type_get(context, + inst_ref['instance_type_id']) + self.assertEqual(instance_type_ref['flavorid'], 1) + + db.instance_update(self.context, instance_id, {'host': 'foo'}) + + self.compute.prep_resize(context, inst_ref['uuid'], 3) + + migration_ref = db.migration_get_by_instance_and_status(context, + inst_ref['uuid'], 'pre-migrating') + + self.compute.resize_instance(context, inst_ref['uuid'], + migration_ref['id']) + self.compute.finish_resize(context, inst_ref['uuid'], + int(migration_ref['id']), {}) + + # Prove that the instance size is now the new size + inst_ref = db.instance_get(context, instance_id) + instance_type_ref = db.instance_type_get(context, + inst_ref['instance_type_id']) + self.assertEqual(instance_type_ref['flavorid'], 3) + + # Finally, revert and confirm the old flavor has been applied + self.compute.revert_resize(context, inst_ref['uuid'], + migration_ref['id']) + self.compute.finish_revert_resize(context, inst_ref['uuid'], + migration_ref['id']) + + inst_ref = db.instance_get(context, instance_id) + instance_type_ref = db.instance_type_get(context, + inst_ref['instance_type_id']) + self.assertEqual(instance_type_ref['flavorid'], 1) + + self.compute.terminate_instance(context, instance_id) + def test_get_by_flavor_id(self): type = instance_types.get_instance_type_by_flavor_id(1) self.assertEqual(type['name'], 'm1.tiny') @@ -569,7 +624,6 @@ class ComputeTestCase(test.TestCase): self._setup_other_managers() dbmock = self.mox.CreateMock(db) volmock = self.mox.CreateMock(self.volume_manager) - netmock = self.mox.CreateMock(self.network_manager) drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) @@ -577,12 +631,11 @@ class ComputeTestCase(test.TestCase): for i in range(len(i_ref['volumes'])): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') - netmock.setup_compute_network(c, i_ref['id']) + drivermock.plug_vifs(i_ref, []) drivermock.ensure_filtering_rules_for_instance(i_ref) self.compute.db = dbmock self.compute.volume_manager = volmock - self.compute.network_manager = netmock self.compute.driver = drivermock self.mox.ReplayAll() @@ -597,18 +650,16 @@ class ComputeTestCase(test.TestCase): self._setup_other_managers() dbmock = self.mox.CreateMock(db) - netmock = self.mox.CreateMock(self.network_manager) drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) - netmock.setup_compute_network(c, i_ref['id']) + drivermock.plug_vifs(i_ref, []) drivermock.ensure_filtering_rules_for_instance(i_ref) self.compute.db = dbmock - self.compute.network_manager = netmock self.compute.driver = drivermock self.mox.ReplayAll() @@ -629,18 +680,20 @@ class ComputeTestCase(test.TestCase): dbmock = self.mox.CreateMock(db) netmock = self.mox.CreateMock(self.network_manager) volmock = self.mox.CreateMock(self.volume_manager) + drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): volmock.setup_compute_volume(c, i_ref['volumes'][i]['id']) for i in range(FLAGS.live_migration_retry_count): - netmock.setup_compute_network(c, i_ref['id']).\ + drivermock.plug_vifs(i_ref, []).\ AndRaise(exception.ProcessExecutionError()) self.compute.db = dbmock self.compute.network_manager = netmock self.compute.volume_manager = volmock + self.compute.driver = drivermock self.mox.ReplayAll() self.assertRaises(exception.ProcessExecutionError, @@ -775,7 +828,7 @@ class ComputeTestCase(test.TestCase): for v in i_ref['volumes']: self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') - self.compute.driver.unfilter_instance(i_ref) + self.compute.driver.unfilter_instance(i_ref, []) # executing self.mox.ReplayAll() diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py new file mode 100644 index 000000000..107fd03e3 --- /dev/null +++ b/nova/tests/test_db_api.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the DB API""" + +from nova import test +from nova import context +from nova import db +from nova import flags +from nova.auth import manager + +FLAGS = flags.FLAGS + + +def _setup_networking(instance_id, ip='1.2.3.4', flo_addr='1.2.1.2'): + ctxt = context.get_admin_context() + network_ref = db.project_get_networks(ctxt, + 'fake', + associate=True)[0] + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_id} + vif_ref = db.virtual_interface_create(ctxt, vif) + + fixed_ip = {'address': ip, + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id'], + 'allocated': True, + 'instance_id': instance_id} + db.fixed_ip_create(ctxt, fixed_ip) + fix_ref = db.fixed_ip_get_by_address(ctxt, ip) + db.floating_ip_create(ctxt, {'address': flo_addr, + 'fixed_ip_id': fix_ref.id}) + + +class DbApiTestCase(test.TestCase): + def setUp(self): + super(DbApiTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('admin', 'admin', 'admin', True) + self.project = self.manager.create_project('proj', 'admin', 'proj') + self.context = context.RequestContext(user=self.user, + project=self.project) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(DbApiTestCase, self).tearDown() + + def test_instance_get_project_vpn(self): + result = db.fixed_ip_get_all(self.context) + values = {'instance_type_id': FLAGS.default_instance_type, + 'image_ref': FLAGS.vpn_image_id, + 'project_id': self.project.id + } + instance = db.instance_create(self.context, values) + result = db.instance_get_project_vpn(self.context, self.project.id) + self.assertEqual(instance.id, result.id) + + def test_instance_get_project_vpn_joins(self): + result = db.fixed_ip_get_all(self.context) + values = {'instance_type_id': FLAGS.default_instance_type, + 'image_ref': FLAGS.vpn_image_id, + 'project_id': self.project.id + } + instance = db.instance_create(self.context, values) + _setup_networking(instance.id) + result = db.instance_get_project_vpn(self.context, self.project.id) + self.assertEqual(instance.id, result.id) + self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address, + '1.2.1.2') diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py index c26cf82ff..393ed1e36 100644 --- a/nova/tests/test_instance_types_extra_specs.py +++ b/nova/tests/test_instance_types_extra_specs.py @@ -105,8 +105,8 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): self.instance_type_id) self.assertEquals(expected_specs, actual_specs) - def test_instance_type_get_by_id_with_extra_specs(self): - instance_type = db.api.instance_type_get_by_id( + def test_instance_type_get_with_extra_specs(self): + instance_type = db.api.instance_type_get( context.get_admin_context(), self.instance_type_id) self.assertEquals(instance_type['extra_specs'], @@ -115,7 +115,7 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): xpu_arch="fermi", xpus="2", xpu_model="Tesla 2050")) - instance_type = db.api.instance_type_get_by_id( + instance_type = db.api.instance_type_get( context.get_admin_context(), 5) self.assertEquals(instance_type['extra_specs'], {}) @@ -136,7 +136,7 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): "m1.small") self.assertEquals(instance_type['extra_specs'], {}) - def test_instance_type_get_by_id_with_extra_specs(self): + def test_instance_type_get_with_extra_specs(self): instance_type = db.api.instance_type_get_by_flavor_id( context.get_admin_context(), 105) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f99e1713d..ad0931a89 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -54,10 +54,15 @@ def _create_network_info(count=1, ipv6=None): fake_ip = '0.0.0.0/0' fake_ip_2 = '0.0.0.1/0' fake_ip_3 = '0.0.0.1/0' + fake_vlan = 100 + fake_bridge_interface = 'eth0' network = {'bridge': fake, 'cidr': fake_ip, - 'cidr_v6': fake_ip} + 'cidr_v6': fake_ip, + 'vlan': fake_vlan, + 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, + 'dhcp_server': fake, 'gateway': fake, 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} @@ -218,9 +223,19 @@ class LibvirtConnTestCase(test.TestCase): def setattr(self, key, val): self.__setattr__(key, val) + # A fake VIF driver + class FakeVIFDriver(object): + + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + # Creating mocks fake = FakeLibvirtConnection() fakeip = FakeIptablesFirewallDriver + fakevif = FakeVIFDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) @@ -228,6 +243,8 @@ class LibvirtConnTestCase(test.TestCase): # Inevitable mocks for connection.LibvirtConnection self.mox.StubOutWithMock(connection.utils, 'import_class') connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.utils, 'import_object') + connection.utils.import_object(mox.IgnoreArg()).AndReturn(fakevif) self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') connection.LibvirtConnection._conn = fake @@ -279,22 +296,6 @@ class LibvirtConnTestCase(test.TestCase): _create_network_info(2)) self.assertTrue(len(result['nics']) == 2) - def test_get_nic_for_xml_v4(self): - conn = connection.LibvirtConnection(True) - network, mapping = _create_network_info()[0] - self.flags(use_ipv6=False) - params = conn._get_nic_for_xml(network, mapping)['extra_params'] - self.assertTrue(params.find('PROJNETV6') == -1) - self.assertTrue(params.find('PROJMASKV6') == -1) - - def test_get_nic_for_xml_v6(self): - conn = connection.LibvirtConnection(True) - network, mapping = _create_network_info()[0] - self.flags(use_ipv6=True) - params = conn._get_nic_for_xml(network, mapping)['extra_params'] - self.assertTrue(params.find('PROJNETV6') > -1) - self.assertTrue(params.find('PROJMASKV6') > -1) - @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) @@ -721,6 +722,9 @@ class LibvirtConnTestCase(test.TestCase): return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) + self.mox.StubOutWithMock(self.compute, "recover_live_migration") + self.compute.recover_live_migration(self.context, instance_ref, + dest='dest') # Start test self.mox.ReplayAll() diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index b09021e13..28f50d328 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -45,6 +45,7 @@ class FakeModel(dict): networks = [{'id': 0, 'label': 'test0', 'injected': False, + 'multi_host': False, 'cidr': '192.168.0.0/24', 'cidr_v6': '2001:db8::/64', 'gateway_v6': '2001:db8::1', @@ -54,7 +55,8 @@ networks = [{'id': 0, 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'broadcast': '192.168.0.255', - 'dns': '192.168.0.1', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', 'vlan': None, 'host': None, 'project_id': 'fake_project', @@ -62,6 +64,7 @@ networks = [{'id': 0, {'id': 1, 'label': 'test1', 'injected': False, + 'multi_host': False, 'cidr': '192.168.1.0/24', 'cidr_v6': '2001:db9::/64', 'gateway_v6': '2001:db9::1', @@ -71,7 +74,8 @@ networks = [{'id': 0, 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', 'broadcast': '192.168.1.255', - 'dns': '192.168.0.1', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', 'vlan': None, 'host': None, 'project_id': 'fake_project', @@ -122,34 +126,20 @@ class FlatNetworkTestCase(test.TestCase): self.network = network_manager.FlatManager(host=HOST) self.network.db = db - def test_set_network_hosts(self): - self.mox.StubOutWithMock(db, 'network_get_all') - self.mox.StubOutWithMock(db, 'network_set_host') - self.mox.StubOutWithMock(db, 'network_update') - - db.network_get_all(mox.IgnoreArg()).AndReturn([networks[0]]) - db.network_set_host(mox.IgnoreArg(), - networks[0]['id'], - mox.IgnoreArg()).AndReturn(HOST) - db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.mox.ReplayAll() - - self.network.set_network_hosts(None) - def test_get_instance_nw_info(self): self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') - self.mox.StubOutWithMock(db, 'instance_type_get_by_id') + self.mox.StubOutWithMock(db, 'instance_type_get') db.fixed_ip_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixed_ips) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs) - db.instance_type_get_by_id(mox.IgnoreArg(), + db.instance_type_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(flavor) self.mox.ReplayAll() - nw_info = self.network.get_instance_nw_info(None, 0, 0) + nw_info = self.network.get_instance_nw_info(None, 0, 0, None) self.assertTrue(nw_info) @@ -159,11 +149,15 @@ class FlatNetworkTestCase(test.TestCase): 'cidr': '192.168.%s.0/24' % i, 'cidr_v6': '2001:db%s::/64' % i8, 'id': i, - 'injected': 'DONTCARE'} + 'multi_host': False, + 'injected': 'DONTCARE', + 'bridge_interface': 'fake_fa%s' % i, + 'vlan': None} self.assertDictMatch(nw[0], check) check = {'broadcast': '192.168.%s.255' % i, + 'dhcp_server': '192.168.%s.1' % i, 'dns': 'DONTCARE', 'gateway': '192.168.%s.1' % i, 'gateway6': '2001:db%s::1' % i8, @@ -171,7 +165,9 @@ class FlatNetworkTestCase(test.TestCase): 'ips': 'DONTCARE', 'label': 'test%s' % i, 'mac': 'DE:AD:BE:EF:00:0%s' % i, - 'rxtx_cap': 'DONTCARE'} + 'rxtx_cap': 'DONTCARE', + 'should_create_vlan': False, + 'should_create_bridge': False} self.assertDictMatch(nw[1], check) check = [{'enabled': 'DONTCARE', diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 4cb7447d3..199a8bc52 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -647,7 +647,7 @@ class XenAPIVMTestCase(test.TestCase): self.flags(xenapi_inject_image=False) instance = self._create_instance() conn = xenapi_conn.get_connection(False) - conn.rescue(instance, None) + conn.rescue(instance, None, []) def test_unrescue(self): instance = self._create_instance() diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 178279d31..34dc5f544 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -61,11 +61,11 @@ class ComputeDriver(object): """Return a list of InstanceInfo for all registered VMs""" raise NotImplementedError() - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): """Launch a VM for the specified instance""" raise NotImplementedError() - def destroy(self, instance, cleanup=True): + def destroy(self, instance, network_info, cleanup=True): """Destroy (shutdown and delete) the specified instance. The given parameter is an instance of nova.compute.service.Instance, @@ -81,7 +81,7 @@ class ComputeDriver(object): """ raise NotImplementedError() - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot specified VM""" raise NotImplementedError() @@ -146,11 +146,11 @@ class ComputeDriver(object): """resume the specified instance""" raise NotImplementedError() - def rescue(self, instance, callback): + def rescue(self, instance, callback, network_info): """Rescue the specified instance""" raise NotImplementedError() - def unrescue(self, instance, callback): + def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" raise NotImplementedError() @@ -224,7 +224,7 @@ class ComputeDriver(object): """ raise NotImplementedError() - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() @@ -253,3 +253,7 @@ class ComputeDriver(object): def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" raise NotImplementedError() + + def plug_vifs(self, instance, network_info): + """Plugs in VIFs to networks.""" + raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index ea0a59f21..26bc421c0 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -167,7 +167,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def reboot(self, instance): + def reboot(self, instance, network_info): """ Reboot the specified instance. @@ -240,13 +240,13 @@ class FakeConnection(driver.ComputeDriver): """ pass - def rescue(self, instance): + def rescue(self, instance, callback, network_info): """ Rescue the specified instance. """ pass - def unrescue(self, instance): + def unrescue(self, instance, callback, network_info): """ Unrescue the specified instance. """ @@ -293,7 +293,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def destroy(self, instance): + def destroy(self, instance, network_info): key = instance.name if key in self.instances: del self.instances[key] @@ -499,7 +499,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info=None): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 5c1dc772d..81c7dea58 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver): return instance_infos - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: @@ -368,14 +368,14 @@ class HyperVConnection(driver.ComputeDriver): wmi_obj.Properties_.Item(prop).Value return newinst - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot the specified instance.""" vm = self._lookup(instance.name) if vm is None: raise exception.InstanceNotFound(instance_id=instance.id) self._set_vm_state(instance.name, 'Reboot') - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy the VM. Also destroy the associated VHD disk files""" LOG.debug(_("Got request to destroy vm %s"), instance.name) vm = self._lookup(instance.name) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index e1a683da8..a75636390 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -82,9 +82,13 @@ </disk> #end if #for $vol in $volumes - <disk type='block'> + <disk type='${vol.type}'> <driver type='raw'/> + #if $vol.type == 'network' + <source protocol='${vol.protocol}' name='${vol.name}'/> + #else <source dev='${vol.device_path}'/> + #end if <target dev='${vol.mount_device}' bus='${disk_bus}'/> </disk> #end for @@ -92,6 +96,22 @@ #end if #for $nic in $nics + #if $vif_type == 'ethernet' + <interface type='ethernet'> + <target dev='${nic.name}' /> + <mac address='${nic.mac_address}' /> + <script path='${nic.script}' /> + </interface> + #else if $vif_type == '802.1Qbh' + <interface type='direct'> + <mac address='${nic.mac_address}'/> + <source dev='${nic.device_name}' mode='private'/> + <virtualport type='802.1Qbh'> + <parameters profileid='${nic.profile_name}'/> + </virtualport> + <model type='virtio'/> + </interface> + #else <interface type='bridge'> <source bridge='${nic.bridge_name}'/> <mac address='${nic.mac_address}'/> @@ -107,6 +127,8 @@ #end if </filterref> </interface> + #end if + #end for <!-- The order is significant here. File must be defined first --> <serial type="file"> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 977bb7dfe..96f9c41f9 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -123,6 +123,11 @@ flags.DEFINE_string('qemu_img', 'qemu-img', 'binary to use for qemu-img commands') flags.DEFINE_bool('start_guests_on_host_boot', False, 'Whether to restart guests when the host reboots') +flags.DEFINE_string('libvirt_vif_type', 'bridge', + 'Type of VIF to create.') +flags.DEFINE_string('libvirt_vif_driver', + 'nova.virt.libvirt.vif.LibvirtBridgeDriver', + 'The libvirt VIF driver to configure the VIFs.') def get_connection(read_only): @@ -165,6 +170,7 @@ class LibvirtConnection(driver.ComputeDriver): fw_class = utils.import_class(FLAGS.firewall_driver) self.firewall_driver = fw_class(get_connection=self._get_connection) + self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver) def init_host(self, host): # Adopt existing VM's running here @@ -256,7 +262,12 @@ class LibvirtConnection(driver.ComputeDriver): infos.append(info) return infos - def destroy(self, instance, cleanup=True): + def plug_vifs(self, instance, network_info): + """Plugin VIFs into networks.""" + for (network, mapping) in network_info: + self.vif_driver.plug(instance, network, mapping) + + def destroy(self, instance, network_info, cleanup=True): instance_name = instance['name'] try: @@ -300,6 +311,9 @@ class LibvirtConnection(driver.ComputeDriver): locals()) raise + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def _wait_for_destroy(): """Called at an interval until the VM is gone.""" instance_name = instance['name'] @@ -314,7 +328,8 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_destroy) timer.start(interval=0.5, now=True) - self.firewall_driver.unfilter_instance(instance) + self.firewall_driver.unfilter_instance(instance, + network_info=network_info) if cleanup: self._cleanup(instance) @@ -335,21 +350,20 @@ class LibvirtConnection(driver.ComputeDriver): def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] - if device_path.startswith('/dev/'): + (type, protocol, name) = \ + self._get_volume_device_info(vol['device_path']) + if type == 'block': xml = """<disk type='block'> <driver name='qemu' type='raw'/> <source dev='%s'/> <target dev='%s' bus='virtio'/> </disk>""" % (device_path, mount_device) - elif ':' in device_path: - (protocol, name) = device_path.split(':') + elif type == 'network': xml = """<disk type='network'> <driver name='qemu' type='raw'/> <source protocol='%s' name='%s'/> <target dev='%s' bus='virtio'/> - </disk>""" % (protocol, - name, - mount_device) + </disk>""" % (protocol, name, mount_device) else: raise exception.InvalidDevicePath(path=device_path) @@ -461,7 +475,7 @@ class LibvirtConnection(driver.ComputeDriver): shutil.rmtree(temp_dir) @exception.wrap_exception() - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot a virtual machine, given an instance reference. This method actually destroys and re-creates the domain to ensure the @@ -476,7 +490,8 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... - self.destroy(instance, False) + self.destroy(instance, network_info, cleanup=False) + self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance) self.firewall_driver.prepare_instance_filter(instance) self._create_new_domain(xml) @@ -526,7 +541,7 @@ class LibvirtConnection(driver.ComputeDriver): dom.create() @exception.wrap_exception() - def rescue(self, instance): + def rescue(self, instance, callback, network_info): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the @@ -535,7 +550,7 @@ class LibvirtConnection(driver.ComputeDriver): data recovery. """ - self.destroy(instance, False) + self.destroy(instance, network_info, cleanup=False) xml = self.to_xml(instance, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, @@ -564,14 +579,14 @@ class LibvirtConnection(driver.ComputeDriver): return timer.start(interval=0.5, now=True) @exception.wrap_exception() - def unrescue(self, instance): + def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. Because reboot destroys and re-creates instances, unresue should simply call reboot. """ - self.reboot(instance) + self.reboot(instance, network_info) @exception.wrap_exception() def poll_rescued_instances(self, timeout): @@ -580,7 +595,7 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): xml = self.to_xml(instance, False, network_info=network_info, block_device_mapping=block_device_mapping) block_device_mapping = block_device_mapping or [] @@ -881,9 +896,12 @@ class LibvirtConnection(driver.ComputeDriver): address = mapping['ips'][0]['ip'] netmask = mapping['ips'][0]['netmask'] address_v6 = None + gateway_v6 = None + netmask_v6 = None if FLAGS.use_ipv6: address_v6 = mapping['ip6s'][0]['ip'] netmask_v6 = mapping['ip6s'][0]['netmask'] + gateway_v6 = mapping['gateway6'] net_info = {'name': 'eth%d' % ifc_num, 'address': address, 'netmask': netmask, @@ -891,7 +909,7 @@ class LibvirtConnection(driver.ComputeDriver): 'broadcast': mapping['broadcast'], 'dns': mapping['dns'], 'address_v6': address_v6, - 'gateway6': mapping['gateway6'], + 'gateway6': gateway_v6, 'netmask_v6': netmask_v6} nets.append(net_info) @@ -926,40 +944,6 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'uml': utils.execute('sudo', 'chown', 'root', basepath('disk')) - def _get_nic_for_xml(self, network, mapping): - # Assume that the gateway also acts as the dhcp server. - dhcp_server = mapping['gateway'] - gateway6 = mapping.get('gateway6') - mac_id = mapping['mac'].replace(':', '') - - if FLAGS.allow_project_net_traffic: - template = "<parameter name=\"%s\"value=\"%s\" />\n" - net, mask = netutils.get_net_and_mask(network['cidr']) - values = [("PROJNET", net), ("PROJMASK", mask)] - if FLAGS.use_ipv6: - net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( - network['cidr_v6']) - values.extend([("PROJNETV6", net_v6), - ("PROJMASKV6", prefixlen_v6)]) - - extra_params = "".join([template % value for value in values]) - else: - extra_params = "\n" - - result = { - 'id': mac_id, - 'bridge_name': network['bridge'], - 'mac_address': mapping['mac'], - 'ip_address': mapping['ips'][0]['ip'], - 'dhcp_server': dhcp_server, - 'extra_params': extra_params, - } - - if gateway6: - result['gateway6'] = gateway6 + "/128" - - return result - root_mount_device = 'vda' # FIXME for now. it's hard coded. local_mount_device = 'vdb' # FIXME for now. it's hard coded. @@ -971,6 +955,16 @@ class LibvirtConnection(driver.ComputeDriver): return True return False + @exception.wrap_exception + def _get_volume_device_info(self, device_path): + if device_path.startswith('/dev/'): + return ('block', None, None) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + return ('network', protocol, name) + else: + raise exception.InvalidDevicePath(path=device_path) + def _prepare_xml_info(self, instance, rescue=False, network_info=None, block_device_mapping=None): block_device_mapping = block_device_mapping or [] @@ -981,7 +975,7 @@ class LibvirtConnection(driver.ComputeDriver): nics = [] for (network, mapping) in network_info: - nics.append(self._get_nic_for_xml(network, mapping)) + nics.append(self.vif_driver.plug(instance, network, mapping)) # FIXME(vish): stick this in db inst_type_id = instance['instance_type_id'] inst_type = instance_types.get_instance_type(inst_type_id) @@ -993,6 +987,9 @@ class LibvirtConnection(driver.ComputeDriver): for vol in block_device_mapping: vol['mount_device'] = _strip_dev(vol['mount_device']) + (vol['type'], vol['protocol'], vol['name']) = \ + self._get_volume_device_info(vol['device_path']) + ebs_root = self._volume_in_mapping(self.root_mount_device, block_device_mapping) if self._volume_in_mapping(self.local_mount_device, @@ -1010,14 +1007,14 @@ class LibvirtConnection(driver.ComputeDriver): 'rescue': rescue, 'local': local_gb, 'driver_type': driver_type, + 'vif_type': FLAGS.libvirt_vif_type, 'nics': nics, 'ebs_root': ebs_root, 'volumes': block_device_mapping} - if FLAGS.vnc_enabled: - if FLAGS.libvirt_type != 'lxc' or FLAGS.libvirt_type != 'uml': - xml_info['vncserver_host'] = FLAGS.vncserver_host - xml_info['vnc_keymap'] = FLAGS.vnc_keymap + if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'): + xml_info['vncserver_host'] = FLAGS.vncserver_host + xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" @@ -1580,9 +1577,10 @@ class LibvirtConnection(driver.ComputeDriver): timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info): """See comments of same method in firewall_driver.""" - self.firewall_driver.unfilter_instance(instance_ref) + self.firewall_driver.unfilter_instance(instance_ref, + network_info=network_info) def update_host_status(self): """See xenapi_conn.py implementation.""" diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 379197398..9ce57b6c9 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -46,7 +46,7 @@ class FirewallDriver(object): At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): """Stop filtering instance""" raise NotImplementedError() @@ -300,9 +300,10 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): """Clear out the nwfilter rules.""" - network_info = netutils.get_network_info(instance) + if not network_info: + network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -542,11 +543,11 @@ class IptablesFirewallDriver(FirewallDriver): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance) + self.nwfilter.unfilter_instance(instance, network_info) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index e5aaf7cec..041eacb2d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -59,7 +59,7 @@ def get_network_info(instance): vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get_by_id(admin_context, + flavor = db.instance_type_get(admin_context, instance['instance_type_id']) network_info = [] @@ -91,9 +91,14 @@ def get_network_info(instance): 'broadcast': network['broadcast'], 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [network['dns']], + 'dns': [], 'ips': [ip_dict(ip) for ip in network_ips]} + if network['dns1']: + mapping['dns'].append(network['dns1']) + if network['dns2']: + mapping['dns'].append(network['dns2']) + if FLAGS.use_ipv6: mapping['ip6s'] = [ip6_dict()] mapping['gateway6'] = network['gateway_v6'] diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py new file mode 100644 index 000000000..24d45d1a7 --- /dev/null +++ b/nova/virt/libvirt/vif.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# Copyright (C) 2011 Nicira, Inc +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for libvirt.""" + +from nova import flags +from nova import log as logging +from nova.network import linux_net +from nova.virt.libvirt import netutils +from nova import utils +from nova.virt.vif import VIFDriver + +LOG = logging.getLogger('nova.virt.libvirt.vif') + +FLAGS = flags.FLAGS + +flags.DEFINE_string('libvirt_ovs_bridge', 'br-int', + 'Name of Integration Bridge used by Open vSwitch') + + +class LibvirtBridgeDriver(VIFDriver): + """VIF driver for Linux bridge.""" + + def _get_configurations(self, network, mapping): + """Get a dictionary of VIF configurations for bridge type.""" + # Assume that the gateway also acts as the dhcp server. + gateway6 = mapping.get('gateway6') + mac_id = mapping['mac'].replace(':', '') + + if FLAGS.allow_project_net_traffic: + template = "<parameter name=\"%s\"value=\"%s\" />\n" + net, mask = netutils.get_net_and_mask(network['cidr']) + values = [("PROJNET", net), ("PROJMASK", mask)] + if FLAGS.use_ipv6: + net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( + network['cidr_v6']) + values.extend([("PROJNETV6", net_v6), + ("PROJMASKV6", prefixlen_v6)]) + + extra_params = "".join([template % value for value in values]) + else: + extra_params = "\n" + + result = { + 'id': mac_id, + 'bridge_name': network['bridge'], + 'mac_address': mapping['mac'], + 'ip_address': mapping['ips'][0]['ip'], + 'dhcp_server': mapping['dhcp_server'], + 'extra_params': extra_params, + } + + if gateway6: + result['gateway6'] = gateway6 + "/128" + + return result + + def plug(self, instance, network, mapping): + """Ensure that the bridge exists, and add VIF to it.""" + if (not network.get('multi_host') and + mapping.get('should_create_bridge')): + if mapping.get('should_create_vlan'): + LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), + {'vlan': network['vlan'], + 'bridge': network['bridge']}) + linux_net.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface']) + else: + LOG.debug(_("Ensuring bridge %s"), network['bridge']) + linux_net.ensure_bridge(network['bridge'], + network['bridge_interface']) + + return self._get_configurations(network, mapping) + + def unplug(self, instance, network, mapping): + """No manual unplugging required.""" + pass + + +class LibvirtOpenVswitchDriver(VIFDriver): + """VIF driver for Open vSwitch.""" + + def plug(self, instance, network, mapping): + vif_id = str(instance['id']) + "-" + str(network['id']) + dev = "tap-%s" % vif_id + iface_id = "nova-" + vif_id + if not linux_net._device_exists(dev): + utils.execute('sudo', 'ip', 'tuntap', 'add', dev, 'mode', 'tap') + utils.execute('sudo', 'ip', 'link', 'set', dev, 'up') + utils.execute('sudo', 'ovs-vsctl', '--', '--may-exist', 'add-port', + FLAGS.libvirt_ovs_bridge, dev, + '--', 'set', 'Interface', dev, + "external-ids:iface-id=%s" % iface_id, + '--', 'set', 'Interface', dev, + "external-ids:iface-status=active", + '--', 'set', 'Interface', dev, + "external-ids:attached-mac=%s" % mapping['mac']) + + result = { + 'script': '', + 'name': dev, + 'mac_address': mapping['mac']} + return result + + def unplug(self, instance, network, mapping): + """Unplug the VIF from the network by deleting the port from + the bridge.""" + vif_id = str(instance['id']) + "-" + str(network['id']) + dev = "tap-%s" % vif_id + try: + utils.execute('sudo', 'ovs-vsctl', 'del-port', + FLAGS.flat_network_bridge, dev) + utils.execute('sudo', 'ip', 'link', 'delete', dev) + except: + LOG.warning(_("Failed while unplugging vif of instance '%s'"), + instance['name']) + raise diff --git a/nova/virt/vif.py b/nova/virt/vif.py new file mode 100644 index 000000000..b78689957 --- /dev/null +++ b/nova/virt/vif.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF module common to all virt layers.""" + + +class VIFDriver(object): + """Abstract class that defines generic interfaces for all VIF drivers.""" + + def plug(self, instance, network, mapping): + """Plug VIF into network.""" + raise NotImplementedError() + + def unplug(self, instance, network, mapping): + """Unplug VIF from network.""" + raise NotImplementedError() diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_utils.py index e77842535..08e3bf0b1 100644 --- a/nova/virt/vmwareapi/network_utils.py +++ b/nova/virt/vmwareapi/network_utils.py @@ -45,10 +45,30 @@ def get_network_with_the_name(session, network_name="vmnet0"): networks = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Network", vm_networks, ["summary.name"])
- for network in networks:
- if network.propSet[0].val == network_name:
- return network.obj
- return None
+ network_obj = {}
+ for network in vm_networks:
+ # Get network properties
+ if network._type == 'DistributedVirtualPortgroup':
+ props = session._call_method(vim_util,
+ "get_dynamic_property", network,
+ "DistributedVirtualPortgroup", "config")
+ # NOTE(asomya): This only works on ESXi if the port binding is
+ # set to ephemeral
+ if props.name == network_name:
+ network_obj['type'] = 'DistributedVirtualPortgroup'
+ network_obj['dvpg'] = props.key
+ network_obj['dvsw'] = props.distributedVirtualSwitch.value
+ else:
+ props = session._call_method(vim_util,
+ "get_dynamic_property", network,
+ "Network", "summary.name")
+ if props == network_name:
+ network_obj['type'] = 'Network'
+ network_obj['name'] = network_name
+ if (len(network_obj) > 0):
+ return network_obj
+ else:
+ return None
def get_vswitch_for_vlan_interface(session, vlan_interface):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py new file mode 100644 index 000000000..b3e43b209 --- /dev/null +++ b/nova/virt/vmwareapi/vif.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for VMWare.""" + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.vif import VIFDriver +from nova.virt.vmwareapi_conn import VMWareAPISession +from nova.virt.vmwareapi import network_utils + + +LOG = logging.getLogger("nova.virt.vmwareapi.vif") + +FLAGS = flags.FLAGS + + +class VMWareVlanBridgeDriver(VIFDriver): + """VIF Driver to setup bridge/VLAN networking using VMWare API.""" + + def plug(self, instance, network, mapping): + """Create a vlan and bridge unless they already exist.""" + vlan_num = network['vlan'] + bridge = network['bridge'] + bridge_interface = network['bridge_interface'] + + # Open vmwareapi session + host_ip = FLAGS.vmwareapi_host_ip + host_username = FLAGS.vmwareapi_host_username + host_password = FLAGS.vmwareapi_host_password + if not host_ip or host_username is None or host_password is None: + raise Exception(_('Must specify vmwareapi_host_ip, ' + 'vmwareapi_host_username ' + 'and vmwareapi_host_password to use ' + 'connection_type=vmwareapi')) + session = VMWareAPISession(host_ip, host_username, host_password, + FLAGS.vmwareapi_api_retry_count) + vlan_interface = bridge_interface + # Check if the vlan_interface physical network adapter exists on the + # host. + if not network_utils.check_if_vlan_interface_exists(session, + vlan_interface): + raise exception.NetworkAdapterNotFound(adapter=vlan_interface) + + # Get the vSwitch associated with the Physical Adapter + vswitch_associated = network_utils.get_vswitch_for_vlan_interface( + session, vlan_interface) + if vswitch_associated is None: + raise exception.SwicthNotFoundForNetworkAdapter( + adapter=vlan_interface) + # Check whether bridge already exists and retrieve the the ref of the + # network whose name_label is "bridge" + network_ref = network_utils.get_network_with_the_name(session, bridge) + if network_ref is None: + # Create a port group on the vSwitch associated with the + # vlan_interface corresponding physical network adapter on the ESX + # host. + network_utils.create_port_group(session, bridge, + vswitch_associated, vlan_num) + else: + # Get the vlan id and vswitch corresponding to the port group + pg_vlanid, pg_vswitch = \ + network_utils.get_vlanid_and_vswitch_for_portgroup(session, + bridge) + + # Check if the vswitch associated is proper + if pg_vswitch != vswitch_associated: + raise exception.InvalidVLANPortGroup( + bridge=bridge, expected=vswitch_associated, + actual=pg_vswitch) + + # Check if the vlan id is proper for the port group + if pg_vlanid != vlan_num: + raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, + pgroup=pg_vlanid) + + def unplug(self, instance, network, mapping): + pass diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 1638149f1..55578dd3c 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -40,7 +40,7 @@ def split_datastore_path(datastore_path): def get_vm_create_spec(client_factory, instance, data_store_name,
network_name="vmnet0",
- os_type="otherGuest"):
+ os_type="otherGuest", network_ref=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.name
@@ -93,7 +93,8 @@ def create_controller_spec(client_factory, key): return virtual_device_config
-def create_network_spec(client_factory, network_name, mac_address):
+def create_network_spec(client_factory, network_name, mac_address,
+ network_ref=None):
"""
Builds a config spec for the addition of a new network
adapter to the VM.
@@ -105,9 +106,24 @@ def create_network_spec(client_factory, network_name, mac_address): # Get the recommended card type for the VM based on the guest OS of the VM
net_device = client_factory.create('ns0:VirtualPCNet32')
- backing = \
- client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
- backing.deviceName = network_name
+ # NOTE(asomya): Only works on ESXi if the portgroup binding is set to
+ # ephemeral. Invalid configuration if set to static and the NIC does
+ # not come up on boot if set to dynamic.
+ backing = None
+ if (network_ref['type'] == "DistributedVirtualPortgroup"):
+ backing_name = \
+ 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ backing = \
+ client_factory.create(backing_name)
+ portgroup = \
+ client_factory.create('ns0:DistributedVirtualSwitchPortConnection')
+ portgroup.switchUuid = network_ref['dvsw']
+ portgroup.portgroupKey = network_ref['dvpg']
+ backing.port = portgroup
+ else:
+ backing = \
+ client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
+ backing.deviceName = network_name
connectable_spec = \
client_factory.create('ns0:VirtualDeviceConnectInfo')
@@ -278,9 +294,11 @@ def get_dummy_vm_create_spec(client_factory, name, data_store_name): return config_spec
-def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, gateway):
+def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask,
+ gateway, broadcast, dns):
"""Builds the machine id change config spec."""
- machine_id_str = "%s;%s;%s;%s" % (mac, ip_addr, netmask, gateway)
+ machine_id_str = "%s;%s;%s;%s;%s;%s" % (mac, ip_addr, netmask,
+ gateway, broadcast, dns)
virtual_machine_config_spec = \
client_factory.create('ns0:VirtualMachineConfigSpec')
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 94d9e6226..7e7d2dac3 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -31,6 +31,7 @@ from nova import db from nova import exception
from nova import flags
from nova import log as logging
+from nova import utils
from nova.compute import power_state
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -38,6 +39,10 @@ from nova.virt.vmwareapi import vmware_images from nova.virt.vmwareapi import network_utils
FLAGS = flags.FLAGS
+flags.DEFINE_string('vmware_vif_driver',
+ 'nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
+ 'The VMWare VIF driver to configure the VIFs.')
+
LOG = logging.getLogger("nova.virt.vmwareapi.vmops")
VMWARE_POWER_STATES = {
@@ -52,6 +57,7 @@ class VMWareVMOps(object): def __init__(self, session):
"""Initializer."""
self._session = session
+ self._vif_driver = utils.import_object(FLAGS.vmware_vif_driver)
def _wait_with_callback(self, instance_id, task, callback):
"""Waits for the task to finish and does a callback after."""
@@ -83,7 +89,7 @@ class VMWareVMOps(object): LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance):
+ def spawn(self, instance, network_info):
"""
Creates a VM instance.
@@ -116,8 +122,10 @@ class VMWareVMOps(object): net_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=net_name)
+ return network_ref
- _check_if_network_bridge_exists()
+ self.plug_vifs(instance, network_info)
+ network_obj = _check_if_network_bridge_exists()
def _get_datastore_ref():
"""Get the datastore list and choose the first local storage."""
@@ -175,8 +183,10 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
# Get the create vm config spec
- config_spec = vm_util.get_vm_create_spec(client_factory, instance,
- data_store_name, net_name, os_type)
+ config_spec = vm_util.get_vm_create_spec(
+ client_factory, instance,
+ data_store_name, net_name, os_type,
+ network_obj)
def _execute_create_vm():
"""Create VM on ESX host."""
@@ -472,11 +482,14 @@ class VMWareVMOps(object): _clean_temp_data()
- def reboot(self, instance):
+ def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
+
+ self.plug_vifs(instance, network_info)
+
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -514,7 +527,7 @@ class VMWareVMOps(object): self._session._wait_for_task(instance.id, reset_task)
LOG.debug(_("Did hard reboot of VM %s") % instance.name)
- def destroy(self, instance):
+ def destroy(self, instance, network_info):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
@@ -560,6 +573,8 @@ class VMWareVMOps(object): LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep))
+ self._unplug_vifs(instance, network_info)
+
# Delete the folder holding the VM related content on
# the datastore.
try:
@@ -718,13 +733,17 @@ class VMWareVMOps(object): net_mask = network["netmask"]
gateway = network["gateway"]
+ broadcast = network["broadcast"]
+ dns = network["dns"]
+
addresses = db.instance_get_fixed_addresses(admin_context,
instance['id'])
ip_addr = addresses[0] if addresses else None
machine_id_chanfge_spec = \
vm_util.get_machine_id_change_spec(client_factory, mac_address,
- ip_addr, net_mask, gateway)
+ ip_addr, net_mask, gateway,
+ broadcast, dns)
LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
({'name': instance.name,
@@ -784,3 +803,13 @@ class VMWareVMOps(object): if vm.propSet[0].val == vm_name:
return vm.obj
return None
+
+ def plug_vifs(self, instance, network_info):
+ """Plug VIFs into networks."""
+ for (network, mapping) in network_info:
+ self._vif_driver.plug(instance, network, mapping)
+
+ def _unplug_vifs(self, instance, network_info):
+ """Unplug VIFs from networks."""
+ for (network, mapping) in network_info:
+ self._vif_driver.unplug(instance, network, mapping)
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index d80e14931..ce57847b2 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -124,21 +124,21 @@ class VMWareESXConnection(driver.ComputeDriver): """List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info=None, block_device_mapping=None):
+ def spawn(self, instance, network_info, block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance)
+ self._vmops.spawn(instance, network_info)
def snapshot(self, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(instance, name)
- def reboot(self, instance):
+ def reboot(self, instance, network_info):
"""Reboot VM instance."""
- self._vmops.reboot(instance)
+ self._vmops.reboot(instance, network_info)
- def destroy(self, instance):
+ def destroy(self, instance, network_info):
"""Destroy VM instance."""
- self._vmops.destroy(instance)
+ self._vmops.destroy(instance, network_info)
def pause(self, instance, callback):
"""Pause VM instance."""
@@ -194,6 +194,10 @@ class VMWareESXConnection(driver.ComputeDriver): """Sets the specified host's ability to accept new instances."""
pass
+ def plug_vifs(self, instance, network_info):
+ """Plugs in VIFs to networks."""
+ self._vmops.plug_vifs(instance, network_info)
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py new file mode 100644 index 000000000..527602243 --- /dev/null +++ b/nova/virt/xenapi/vif.py @@ -0,0 +1,140 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# Copyright (C) 2011 Nicira, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for XenAPI.""" + +from nova import flags +from nova import log as logging +from nova.virt.vif import VIFDriver +from nova.virt.xenapi.network_utils import NetworkHelper + +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_ovs_integration_bridge', 'xapi1', + 'Name of Integration Bridge used by Open vSwitch') + +LOG = logging.getLogger("nova.virt.xenapi.vif") + + +class XenAPIBridgeDriver(VIFDriver): + """VIF Driver for XenAPI that uses XenAPI to create Networks.""" + + def plug(self, xenapi_session, vm_ref, instance, device, network, + network_mapping): + if network_mapping.get('should_create_vlan'): + network_ref = self.ensure_vlan_bridge(xenapi_session, network) + else: + network_ref = NetworkHelper.find_network_with_bridge( + xenapi_session, network['bridge']) + rxtx_cap = network_mapping.pop('rxtx_cap') + vif_rec = {} + vif_rec['device'] = str(device) + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = network_mapping['mac'] + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else '' + vif_rec['qos_algorithm_params'] = \ + {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {} + return vif_rec + + def ensure_vlan_bridge(self, xenapi_session, network): + """Ensure that a VLAN bridge exists""" + + vlan_num = network['vlan'] + bridge = network['bridge'] + bridge_interface = network['bridge_interface'] + # Check whether bridge already exists + # Retrieve network whose name_label is "bridge" + network_ref = NetworkHelper.find_network_with_name_label( + xenapi_session, bridge) + if network_ref is None: + # If bridge does not exists + # 1 - create network + description = 'network for nova bridge %s' % bridge + network_rec = {'name_label': bridge, + 'name_description': description, + 'other_config': {}} + network_ref = xenapi_session.call_xenapi('network.create', + network_rec) + # 2 - find PIF for VLAN NOTE(salvatore-orlando): using double + # quotes inside single quotes as xapi filter only support + # tokens in double quotes + expr = 'field "device" = "%s" and \ + field "VLAN" = "-1"' % bridge_interface + pifs = xenapi_session.call_xenapi('PIF.get_all_records_where', + expr) + pif_ref = None + # Multiple PIF are ok: we are dealing with a pool + if len(pifs) == 0: + raise Exception(_('Found no PIF for device %s') % \ + bridge_interface) + for pif_ref in pifs.keys(): + xenapi_session.call_xenapi('VLAN.create', + pif_ref, + str(vlan_num), + network_ref) + else: + # Check VLAN tag is appropriate + network_rec = xenapi_session.call_xenapi('network.get_record', + network_ref) + # Retrieve PIFs from network + for pif_ref in network_rec['PIFs']: + # Retrieve VLAN from PIF + pif_rec = xenapi_session.call_xenapi('PIF.get_record', + pif_ref) + pif_vlan = int(pif_rec['VLAN']) + # Raise an exception if VLAN != vlan_num + if pif_vlan != vlan_num: + raise Exception(_( + "PIF %(pif_rec['uuid'])s for network " + "%(bridge)s has VLAN id %(pif_vlan)d. " + "Expected %(vlan_num)d") % locals()) + + return network_ref + + def unplug(self, instance, network, mapping): + pass + + +class XenAPIOpenVswitchDriver(VIFDriver): + """VIF driver for Open vSwitch with XenAPI.""" + + def plug(self, xenapi_session, vm_ref, instance, device, network, + network_mapping): + # with OVS model, always plug into an OVS integration bridge + # that is already created + network_ref = NetworkHelper.find_network_with_bridge(xenapi_session, + FLAGS.xenapi_ovs_integration_bridge) + vif_rec = {} + vif_rec['device'] = str(device) + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = network_mapping['mac'] + vif_rec['MTU'] = '1500' + vif_id = "nova-" + str(instance['id']) + "-" + str(network['id']) + vif_rec['qos_algorithm_type'] = "" + vif_rec['qos_algorithm_params'] = {} + # OVS on the hypervisor monitors this key and uses it to + # set the iface-id attribute + vif_rec['other_config'] = {"nicira-iface-id": vif_id} + return vif_rec + + def unplug(self, instance, network, mapping): + pass diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 71107aff4..62863c6d8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -283,28 +283,6 @@ class VMHelper(HelperBase): raise StorageError(_('Unable to destroy VDI %s') % vdi_ref) @classmethod - def create_vif(cls, session, vm_ref, network_ref, mac_address, - dev, rxtx_cap=0): - """Create a VIF record. Returns a Deferred that gives the new - VIF reference.""" - vif_rec = {} - vif_rec['device'] = str(dev) - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mac_address - vif_rec['MTU'] = '1500' - vif_rec['other_config'] = {} - vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else '' - vif_rec['qos_algorithm_params'] = \ - {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {} - LOG.debug(_('Creating VIF for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) - vif_ref = session.call_xenapi('VIF.create', vif_rec) - LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) - return vif_ref - - @classmethod def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): """Create a VDI record and returns its reference.""" vdi_ref = session.get_xenapi().VDI.create( diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c332c27b0..0473abb97 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -52,6 +52,9 @@ FLAGS = flags.FLAGS flags.DEFINE_integer('windows_version_timeout', 300, 'number of seconds to wait for windows agent to be ' 'fully operational') +flags.DEFINE_string('xenapi_vif_driver', + 'nova.virt.xenapi.vif.XenAPIBridgeDriver', + 'The XenAPI VIF driver using XenServer Network APIs.') def cmp_version(a, b): @@ -78,6 +81,7 @@ class VMOps(object): self._session = session self.poll_rescue_last_ran = None VMHelper.XenAPI = self.XenAPI + self.vif_driver = utils.import_object(FLAGS.xenapi_vif_driver) def list_instances(self): """List VM instances.""" @@ -255,7 +259,7 @@ class VMOps(object): VMHelper.preconfigure_instance(self._session, instance, first_vdi_ref, network_info) - self.create_vifs(vm_ref, network_info) + self.create_vifs(vm_ref, instance, network_info) self.inject_network_info(instance, network_info, vm_ref) return vm_ref @@ -340,6 +344,7 @@ class VMOps(object): _check_agent_version() _inject_files() _set_admin_password() + self.reset_network(instance, vm_ref) return True except Exception, exc: LOG.warn(exc) @@ -349,9 +354,6 @@ class VMOps(object): timer.f = _wait_for_boot - # call to reset network to configure network from xenstore - self.reset_network(instance, vm_ref) - return timer.start(interval=0.5, now=True) def _handle_spawn_error(self, vdis, spawn_error): @@ -469,7 +471,7 @@ class VMOps(object): self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: - self._destroy(instance, template_vm_ref, + self._destroy(instance, template_vm_ref, None, shutdown=False, destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance) @@ -839,7 +841,7 @@ class VMOps(object): self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref) - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy VM instance. This is the method exposed by xenapi_conn.destroy(). The rest of the @@ -849,9 +851,9 @@ class VMOps(object): instance_id = instance.id LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) vm_ref = VMHelper.lookup(self._session, instance.name) - return self._destroy(instance, vm_ref, shutdown=True) + return self._destroy(instance, vm_ref, network_info, shutdown=True) - def _destroy(self, instance, vm_ref, shutdown=True, + def _destroy(self, instance, vm_ref, network_info, shutdown=True, destroy_kernel_ramdisk=True): """Destroys VM instance by performing: @@ -873,6 +875,10 @@ class VMOps(object): self._destroy_kernel_ramdisk(instance, vm_ref) self._destroy_vm(instance, vm_ref) + if network_info: + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def _wait_with_callback(self, instance_id, task, callback): ret = None try: @@ -1068,7 +1074,7 @@ class VMOps(object): # catch KeyError for domid if instance isn't running pass - def create_vifs(self, vm_ref, network_info): + def create_vifs(self, vm_ref, instance, network_info): """Creates vifs for an instance.""" logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) @@ -1077,14 +1083,19 @@ class VMOps(object): self._session.get_xenapi().VM.get_record(vm_ref) for device, (network, info) in enumerate(network_info): - mac_address = info['mac'] - bridge = network['bridge'] - rxtx_cap = info.pop('rxtx_cap') - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, - bridge) - VMHelper.create_vif(self._session, vm_ref, network_ref, - mac_address, device, rxtx_cap) + vif_rec = self.vif_driver.plug(self._session, + vm_ref, instance, device, network, info) + network_ref = vif_rec['network'] + LOG.debug(_('Creating VIF for VM %(vm_ref)s,' \ + ' network %(network_ref)s.') % locals()) + vif_ref = self._session.call_xenapi('VIF.create', vif_rec) + LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) + + def plug_vifs(instance, network_info): + """Set up VIF networking on the host.""" + for (network, mapping) in network_info: + self.vif_driver.plug(self._session, instance, network, mapping) def reset_network(self, instance, vm_ref=None): """Creates uuid arg to pass to make_agent_call and calls it.""" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index ec8c44c1c..7c355a55b 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -210,7 +210,7 @@ class XenAPIConnection(driver.ComputeDriver): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, image_id) - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot VM instance""" self._vmops.reboot(instance) @@ -224,9 +224,9 @@ class XenAPIConnection(driver.ComputeDriver): """ self._vmops.inject_file(instance, b64_path, b64_contents) - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy VM instance""" - self._vmops.destroy(instance) + self._vmops.destroy(instance, network_info) def pause(self, instance, callback): """Pause VM instance""" @@ -249,11 +249,11 @@ class XenAPIConnection(driver.ComputeDriver): """resume the specified instance""" self._vmops.resume(instance, callback) - def rescue(self, instance, callback): + def rescue(self, instance, callback, network_info): """Rescue the specified instance""" self._vmops.rescue(instance, callback) - def unrescue(self, instance, callback): + def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" self._vmops.unrescue(instance, callback) @@ -269,6 +269,9 @@ class XenAPIConnection(driver.ComputeDriver): """inject network info for specified instance""" self._vmops.inject_network_info(instance, network_info) + def plug_vifs(self, instance_ref, network_info): + self._vmops.plug_vifs(instance_ref, network_info) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) @@ -322,7 +325,7 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/volume/api.py b/nova/volume/api.py index cfc274c77..52b3a9fed 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -52,7 +52,7 @@ class API(base.Base): if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" + LOG.warn(_("Quota exceeded for %(pid)s, tried to create" " %(size)sG volume") % locals()) raise quota.QuotaError(_("Volume quota exceeded. You cannot " "create a volume of size %sG") % size) |
