diff options
| author | Rick Harris <rconradharris@gmail.com> | 2011-09-16 19:45:46 +0000 |
|---|---|---|
| committer | Rick Harris <rconradharris@gmail.com> | 2011-09-16 19:45:46 +0000 |
| commit | 9f2ef26d5e4e34c26bd3c8fe62b81bd6409de2c5 (patch) | |
| tree | cfdd3d3612c680c9a2ca68cd6dcbb1464690e0cc | |
| parent | f31b37c80a9ef0c4ba07940897388094e5ed052c (diff) | |
| parent | 8c5c5bb4dd8b8c53fb3ed0bbed5598da55fab12b (diff) | |
| download | nova-9f2ef26d5e4e34c26bd3c8fe62b81bd6409de2c5.tar.gz nova-9f2ef26d5e4e34c26bd3c8fe62b81bd6409de2c5.tar.xz nova-9f2ef26d5e4e34c26bd3c8fe62b81bd6409de2c5.zip | |
Merging trunk
67 files changed, 2387 insertions, 1679 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index eacfdc0df..fb1afa43a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -272,11 +272,23 @@ class CloudController(object): mappings = {} mappings['ami'] = block_device.strip_dev(root_device_name) mappings['root'] = root_device_name - - # 'ephemeralN' and 'swap' + default_local_device = instance_ref.get('default_local_device') + if default_local_device: + mappings['ephemeral0'] = default_local_device + default_swap_device = instance_ref.get('default_swap_device') + if default_swap_device: + mappings['swap'] = default_swap_device + ebs_devices = [] + + # 'ephemeralN', 'swap' and ebs for bdm in db.block_device_mapping_get_all_by_instance( ctxt, instance_ref['id']): - if (bdm['volume_id'] or bdm['snapshot_id'] or bdm['no_device']): + if bdm['no_device']: + continue + + # ebs volume case + if (bdm['volume_id'] or bdm['snapshot_id']): + ebs_devices.append(bdm['device_name']) continue virtual_name = bdm['virtual_name'] @@ -286,6 +298,16 @@ class CloudController(object): if block_device.is_swap_or_ephemeral(virtual_name): mappings[virtual_name] = bdm['device_name'] + # NOTE(yamahata): I'm not sure how ebs device should be numbered. + # Right now sort by device name for deterministic + # result. + if ebs_devices: + nebs = 0 + ebs_devices.sort() + for ebs in ebs_devices: + mappings['ebs%d' % nebs] = ebs + nebs += 1 + return mappings def get_metadata(self, address): @@ -304,11 +326,6 @@ class CloudController(object): instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) - if instance_ref['key_name']: - keys = {'0': {'_name': instance_ref['key_name'], - 'openssh-key': instance_ref['key_data']}} - else: - keys = '' hostname = instance_ref['hostname'] host = instance_ref['host'] availability_zone = self._get_availability_zone_by_host(ctxt, host) @@ -336,11 +353,16 @@ class CloudController(object): 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', - 'public-keys': keys, 'reservation-id': instance_ref['reservation_id'], 'security-groups': security_groups, 'mpi': mpi}} + # public-keys should be in meta-data only if user specified one + if instance_ref['key_name']: + data['meta-data']['public-keys'] = { + '0': {'_name': instance_ref['key_name'], + 'openssh-key': instance_ref['key_data']}} + for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type], diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d743a66ef..ca7848678 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -16,6 +16,7 @@ # under the License. import functools +from lxml import etree import re import urlparse from xml.dom import minidom @@ -27,6 +28,7 @@ from nova import flags from nova import log as logging from nova import quota from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil from nova.compute import vm_states from nova.compute import task_states @@ -185,30 +187,16 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): def get_id_from_href(href): - """Return the id portion of a url as an int. + """Return the id or uuid portion of a url. Given: 'http://www.foo.com/bar/123?q=4' - Returns: 123 + Returns: '123' - In order to support local hrefs, the href argument can be just an id: - Given: '123' - Returns: 123 + Given: 'http://www.foo.com/bar/abc123?q=4' + Returns: 'abc123' """ - LOG.debug(_("Attempting to treat %(href)s as an integer ID.") % locals()) - - try: - return int(href) - except ValueError: - pass - - LOG.debug(_("Attempting to treat %(href)s as a URL.") % locals()) - - try: - return int(urlparse.urlsplit(href).path.split('/')[-1]) - except ValueError as error: - LOG.debug(_("Failed to parse ID from %(href)s: %(error)s") % locals()) - raise + return urlparse.urlsplit("%s" % href).path.split('/')[-1] def remove_version_from_href(href): @@ -308,54 +296,48 @@ class MetadataHeadersSerializer(wsgi.ResponseHeadersSerializer): class MetadataXMLSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_V11} + def __init__(self, xmlns=wsgi.XMLNS_V11): super(MetadataXMLSerializer, self).__init__(xmlns=xmlns) - def _meta_item_to_xml(self, doc, key, value): - node = doc.createElement('meta') - doc.appendChild(node) - node.setAttribute('key', '%s' % key) - text = doc.createTextNode('%s' % value) - node.appendChild(text) - return node - - def meta_list_to_xml(self, xml_doc, meta_items): - container_node = xml_doc.createElement('metadata') - for (key, value) in meta_items: - item_node = self._meta_item_to_xml(xml_doc, key, value) - container_node.appendChild(item_node) - return container_node - - def _meta_list_to_xml_string(self, metadata_dict): - xml_doc = minidom.Document() - items = metadata_dict['metadata'].items() - container_node = self.meta_list_to_xml(xml_doc, items) - xml_doc.appendChild(container_node) - self._add_xmlns(container_node) - return xml_doc.toxml('UTF-8') + def populate_metadata(self, metadata_elem, meta_dict): + for (key, value) in meta_dict.items(): + elem = etree.SubElement(metadata_elem, 'meta') + elem.set('key', str(key)) + elem.text = value + + def _populate_meta_item(self, meta_elem, meta_item_dict): + """Populate a meta xml element from a dict.""" + (key, value) = meta_item_dict.items()[0] + meta_elem.set('key', str(key)) + meta_elem.text = value def index(self, metadata_dict): - return self._meta_list_to_xml_string(metadata_dict) + metadata = etree.Element('metadata', nsmap=self.NSMAP) + self.populate_metadata(metadata, metadata_dict.get('metadata', {})) + return self._to_xml(metadata) def create(self, metadata_dict): - return self._meta_list_to_xml_string(metadata_dict) + metadata = etree.Element('metadata', nsmap=self.NSMAP) + self.populate_metadata(metadata, metadata_dict.get('metadata', {})) + return self._to_xml(metadata) def update_all(self, metadata_dict): - return self._meta_list_to_xml_string(metadata_dict) - - def _meta_item_to_xml_string(self, meta_item_dict): - xml_doc = minidom.Document() - item_key, item_value = meta_item_dict.items()[0] - item_node = self._meta_item_to_xml(xml_doc, item_key, item_value) - xml_doc.appendChild(item_node) - self._add_xmlns(item_node) - return xml_doc.toxml('UTF-8') + metadata = etree.Element('metadata', nsmap=self.NSMAP) + self.populate_metadata(metadata, metadata_dict.get('metadata', {})) + return self._to_xml(metadata) def show(self, meta_item_dict): - return self._meta_item_to_xml_string(meta_item_dict['meta']) + meta = etree.Element('meta', nsmap=self.NSMAP) + self._populate_meta_item(meta, meta_item_dict['meta']) + return self._to_xml(meta) def update(self, meta_item_dict): - return self._meta_item_to_xml_string(meta_item_dict['meta']) + meta = etree.Element('meta', nsmap=self.NSMAP) + self._populate_meta_item(meta, meta_item_dict['meta']) + return self._to_xml(meta) def default(self, *args, **kwargs): return '' diff --git a/nova/api/openstack/contrib/flavorextradata.py b/nova/api/openstack/contrib/flavorextradata.py new file mode 100644 index 000000000..d0554c7b4 --- /dev/null +++ b/nova/api/openstack/contrib/flavorextradata.py @@ -0,0 +1,46 @@ +# Copyright 2011 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Flavor extra data extension +Openstack API version 1.1 lists "name", "ram", "disk", "vcpus" as flavor +attributes. This extension adds to that list: + rxtx_cap + rxtx_quota + swap +""" + +from nova.api.openstack import extensions + + +class Flavorextradata(extensions.ExtensionDescriptor): + """The Flavor extra data extension for the OpenStack API.""" + + def get_name(self): + return "FlavorExtraData" + + def get_alias(self): + return "os-flavor-extra-data" + + def get_description(self): + return "Provide additional data for flavors" + + def get_namespace(self): + return "http://docs.openstack.org/ext/flavor_extra_data/api/v1.1" + + def get_updated(self): + return "2011-09-14T00:00:00+00:00" + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index 3de128895..2e5dbab73 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -18,11 +18,14 @@ import webob from webob import exc from nova import compute +from nova import flags from nova import log as logging +from nova import utils from nova.api.openstack import extensions as exts from nova.api.openstack import faults +FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api.contrib.rescue") @@ -30,7 +33,7 @@ def wrap_errors(fn): """"Ensure errors are not passed along.""" def wrapped(*args): try: - fn(*args) + return fn(*args) except Exception, e: return faults.Fault(exc.HTTPInternalServerError()) return wrapped @@ -46,9 +49,13 @@ class Rescue(exts.ExtensionDescriptor): def _rescue(self, input_dict, req, instance_id): """Rescue an instance.""" context = req.environ["nova.context"] - self.compute_api.rescue(context, instance_id) + if input_dict['rescue'] and 'adminPass' in input_dict['rescue']: + password = input_dict['rescue']['adminPass'] + else: + password = utils.generate_password(FLAGS.password_length) + self.compute_api.rescue(context, instance_id, rescue_password=password) - return webob.Response(status_int=202) + return {'adminPass': password} @wrap_errors def _unrescue(self, input_dict, req, instance_id): diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index d62225e58..9d4254f1f 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -372,8 +372,7 @@ class BootFromVolumeController(servers.ControllerV11): for key in ['instance_type', 'image_ref']: inst[key] = extra_values[key] - builder = self._get_view_builder(req) - server = builder.build(inst, is_detail=True) + server = self._build_view(req, inst, is_detail=True) server['server']['adminPass'] = extra_values['password'] return server diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index e27ddf78b..79f17e27f 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -317,14 +317,14 @@ class CreateInstanceHelper(object): def _get_server_admin_password_old_style(self, server): """ Determine the admin password for a server on creation """ - return utils.generate_password(16) + return utils.generate_password(FLAGS.password_length) def _get_server_admin_password_new_style(self, server): """ Determine the admin password for a server on creation """ password = server.get('adminPass') if password is None: - return utils.generate_password(16) + return utils.generate_password(FLAGS.password_length) if not isinstance(password, basestring) or password == '': msg = _("Invalid adminPass") raise exc.HTTPBadRequest(explanation=msg) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index fd36060da..8a310c900 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -16,12 +16,13 @@ # under the License. import webob -import xml.dom.minidom as minidom +from lxml import etree from nova import db from nova import exception from nova.api.openstack import views from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil class Controller(object): @@ -78,48 +79,48 @@ class ControllerV11(Controller): class FlavorXMLSerializer(wsgi.XMLDictSerializer): + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self): super(FlavorXMLSerializer, self).__init__(xmlns=wsgi.XMLNS_V11) - def _flavor_to_xml(self, xml_doc, flavor, detailed): - flavor_node = xml_doc.createElement('flavor') - flavor_node.setAttribute('id', str(flavor['id'])) - flavor_node.setAttribute('name', flavor['name']) + def _populate_flavor(self, flavor_elem, flavor_dict, detailed=False): + """Populate a flavor xml element from a dict.""" + flavor_elem.set('name', flavor_dict['name']) + flavor_elem.set('id', str(flavor_dict['id'])) if detailed: - flavor_node.setAttribute('ram', str(flavor['ram'])) - flavor_node.setAttribute('disk', str(flavor['disk'])) - - link_nodes = self._create_link_nodes(xml_doc, flavor['links']) - for link_node in link_nodes: - flavor_node.appendChild(link_node) - return flavor_node + flavor_elem.set('ram', str(flavor_dict['ram'])) + flavor_elem.set('disk', str(flavor_dict['disk'])) - def _flavors_list_to_xml(self, xml_doc, flavors, detailed): - container_node = xml_doc.createElement('flavors') + for attr in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): + flavor_elem.set(attr, str(flavor_dict.get(attr, ""))) - for flavor in flavors: - item_node = self._flavor_to_xml(xml_doc, flavor, detailed) - container_node.appendChild(item_node) - return container_node + for link in flavor_dict.get('links', []): + elem = etree.SubElement(flavor_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return flavor_elem def show(self, flavor_container): - xml_doc = minidom.Document() - flavor = flavor_container['flavor'] - node = self._flavor_to_xml(xml_doc, flavor, True) - return self.to_xml_string(node, True) - - def detail(self, flavors_container): - xml_doc = minidom.Document() - flavors = flavors_container['flavors'] - node = self._flavors_list_to_xml(xml_doc, flavors, True) - return self.to_xml_string(node, True) - - def index(self, flavors_container): - xml_doc = minidom.Document() - flavors = flavors_container['flavors'] - node = self._flavors_list_to_xml(xml_doc, flavors, False) - return self.to_xml_string(node, True) + flavor = etree.Element('flavor', nsmap=self.NSMAP) + self._populate_flavor(flavor, flavor_container['flavor'], True) + return self._to_xml(flavor) + + def detail(self, flavors_dict): + flavors = etree.Element('flavors', nsmap=self.NSMAP) + for flavor_dict in flavors_dict['flavors']: + flavor = etree.SubElement(flavors, 'flavor') + self._populate_flavor(flavor, flavor_dict, True) + return self._to_xml(flavors) + + def index(self, flavors_dict): + flavors = etree.Element('flavors', nsmap=self.NSMAP) + for flavor_dict in flavors_dict['flavors']: + flavor = etree.SubElement(flavors, 'flavor') + self._populate_flavor(flavor, flavor_dict, False) + return self._to_xml(flavors) def create_resource(version='1.0'): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index fcaa94651..4340cbe3e 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -16,8 +16,8 @@ import urlparse import os.path +from lxml import etree import webob.exc -from xml.dom import minidom from nova import compute from nova import exception @@ -29,6 +29,7 @@ from nova.api.openstack import image_metadata from nova.api.openstack import servers from nova.api.openstack.views import images as images_view from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil LOG = log.getLogger('nova.api.openstack.images') @@ -206,93 +207,71 @@ class ControllerV11(Controller): class ImageXMLSerializer(wsgi.XMLDictSerializer): - xmlns = wsgi.XMLNS_V11 + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} def __init__(self): self.metadata_serializer = common.MetadataXMLSerializer() - def _image_to_xml(self, xml_doc, image): - image_node = xml_doc.createElement('image') - image_node.setAttribute('id', str(image['id'])) - image_node.setAttribute('name', image['name']) - link_nodes = self._create_link_nodes(xml_doc, - image['links']) - for link_node in link_nodes: - image_node.appendChild(link_node) - return image_node - - def _image_to_xml_detailed(self, xml_doc, image): - image_node = xml_doc.createElement('image') - self._add_image_attributes(image_node, image) - - if 'server' in image: - server_node = self._create_server_node(xml_doc, image['server']) - image_node.appendChild(server_node) - - metadata = image.get('metadata', {}).items() - if len(metadata) > 0: - metadata_node = self._create_metadata_node(xml_doc, metadata) - image_node.appendChild(metadata_node) - - link_nodes = self._create_link_nodes(xml_doc, - image['links']) - for link_node in link_nodes: - image_node.appendChild(link_node) - - return image_node - - def _add_image_attributes(self, node, image): - node.setAttribute('id', str(image['id'])) - node.setAttribute('name', image['name']) - node.setAttribute('created', image['created']) - node.setAttribute('updated', image['updated']) - node.setAttribute('status', image['status']) - if 'progress' in image: - node.setAttribute('progress', str(image['progress'])) - - def _create_metadata_node(self, xml_doc, metadata): - return self.metadata_serializer.meta_list_to_xml(xml_doc, metadata) - - def _create_server_node(self, xml_doc, server): - server_node = xml_doc.createElement('server') - server_node.setAttribute('id', str(server['id'])) - link_nodes = self._create_link_nodes(xml_doc, - server['links']) - for link_node in link_nodes: - server_node.appendChild(link_node) - return server_node - - def _image_list_to_xml(self, xml_doc, images, detailed): - container_node = xml_doc.createElement('images') + def _create_metadata_node(self, metadata_dict): + metadata_elem = etree.Element('metadata', nsmap=self.NSMAP) + self.metadata_serializer.populate_metadata(metadata_elem, + metadata_dict) + return metadata_elem + + def _create_server_node(self, server_dict): + server_elem = etree.Element('server', nsmap=self.NSMAP) + server_elem.set('id', str(server_dict['id'])) + for link in server_dict.get('links', []): + elem = etree.SubElement(server_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return server_elem + + def _populate_image(self, image_elem, image_dict, detailed=False): + """Populate an image xml element from a dict.""" + + image_elem.set('name', image_dict['name']) + image_elem.set('id', str(image_dict['id'])) if detailed: - image_to_xml = self._image_to_xml_detailed - else: - image_to_xml = self._image_to_xml - - for image in images: - item_node = image_to_xml(xml_doc, image) - container_node.appendChild(item_node) - return container_node + image_elem.set('updated', str(image_dict['updated'])) + image_elem.set('created', str(image_dict['created'])) + image_elem.set('status', str(image_dict['status'])) + if 'progress' in image_dict: + image_elem.set('progress', str(image_dict['progress'])) + if 'server' in image_dict: + server_elem = self._create_server_node(image_dict['server']) + image_elem.append(server_elem) + + meta_elem = self._create_metadata_node( + image_dict.get('metadata', {})) + image_elem.append(meta_elem) + + for link in image_dict.get('links', []): + elem = etree.SubElement(image_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return image_elem def index(self, images_dict): - xml_doc = minidom.Document() - node = self._image_list_to_xml(xml_doc, - images_dict['images'], - detailed=False) - return self.to_xml_string(node, True) + images = etree.Element('images', nsmap=self.NSMAP) + for image_dict in images_dict['images']: + image = etree.SubElement(images, 'image') + self._populate_image(image, image_dict, False) + return self._to_xml(images) def detail(self, images_dict): - xml_doc = minidom.Document() - node = self._image_list_to_xml(xml_doc, - images_dict['images'], - detailed=True) - return self.to_xml_string(node, True) + images = etree.Element('images', nsmap=self.NSMAP) + for image_dict in images_dict['images']: + image = etree.SubElement(images, 'image') + self._populate_image(image, image_dict, True) + return self._to_xml(images) def show(self, image_dict): - xml_doc = minidom.Document() - node = self._image_to_xml_detailed(xml_doc, - image_dict['image']) - return self.to_xml_string(node, True) + image = etree.Element('image', nsmap=self.NSMAP) + self._populate_image(image, image_dict['image'], True) + return self._to_xml(image) def create_resource(version='1.0'): diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index a74fae487..7e644ba04 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -15,14 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. +from lxml import etree import time -from xml.dom import minidom from webob import exc import nova import nova.api.openstack.views.addresses from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil from nova import db @@ -102,42 +103,36 @@ class ControllerV11(Controller): class IPXMLSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_V11} + def __init__(self, xmlns=wsgi.XMLNS_V11): super(IPXMLSerializer, self).__init__(xmlns=xmlns) - def _ip_to_xml(self, xml_doc, ip_dict): - ip_node = xml_doc.createElement('ip') - ip_node.setAttribute('addr', ip_dict['addr']) - ip_node.setAttribute('version', str(ip_dict['version'])) - return ip_node - - def _network_to_xml(self, xml_doc, network_id, ip_dicts): - network_node = xml_doc.createElement('network') - network_node.setAttribute('id', network_id) + def populate_addresses_node(self, addresses_elem, addresses_dict): + for (network_id, ip_dicts) in addresses_dict.items(): + network_elem = self._create_network_node(network_id, ip_dicts) + addresses_elem.append(network_elem) + def _create_network_node(self, network_id, ip_dicts): + network_elem = etree.Element('network', nsmap=self.NSMAP) + network_elem.set('id', str(network_id)) for ip_dict in ip_dicts: - ip_node = self._ip_to_xml(xml_doc, ip_dict) - network_node.appendChild(ip_node) - - return network_node - - def networks_to_xml(self, xml_doc, networks_container): - addresses_node = xml_doc.createElement('addresses') - for (network_id, ip_dicts) in networks_container.items(): - network_node = self._network_to_xml(xml_doc, network_id, ip_dicts) - addresses_node.appendChild(network_node) - return addresses_node - - def show(self, network_container): - (network_id, ip_dicts) = network_container.items()[0] - xml_doc = minidom.Document() - node = self._network_to_xml(xml_doc, network_id, ip_dicts) - return self.to_xml_string(node, False) - - def index(self, addresses_container): - xml_doc = minidom.Document() - node = self.networks_to_xml(xml_doc, addresses_container['addresses']) - return self.to_xml_string(node, False) + ip_elem = etree.SubElement(network_elem, 'ip') + ip_elem.set('version', str(ip_dict['version'])) + ip_elem.set('addr', ip_dict['addr']) + return network_elem + + def show(self, network_dict): + (network_id, ip_dicts) = network_dict.items()[0] + network = self._create_network_node(network_id, ip_dicts) + return self._to_xml(network) + + def index(self, addresses_dict): + addresses = etree.Element('addresses', nsmap=self.NSMAP) + self.populate_addresses_node(addresses, + addresses_dict.get('addresses', {})) + return self._to_xml(addresses) def create_resource(version): diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 86afa3b62..f6df94eea 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -20,12 +20,12 @@ Module dedicated functions/classes dealing with rate limiting requests. import copy import httplib import json +from lxml import etree import math import re import time import urllib import webob.exc -from xml.dom import minidom from collections import defaultdict @@ -38,6 +38,7 @@ from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import limits as limits_views from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil # Convenience constants for the limits dictionary passed to Limiter(). @@ -81,52 +82,49 @@ class LimitsXMLSerializer(wsgi.XMLDictSerializer): xmlns = wsgi.XMLNS_V11 + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self): pass - def _create_rates_node(self, xml_doc, rates): - rates_node = xml_doc.createElement('rates') + def _create_rates_node(self, rates): + rates_elem = etree.Element('rates', nsmap=self.NSMAP) for rate in rates: - rate_node = xml_doc.createElement('rate') - rate_node.setAttribute('uri', rate['uri']) - rate_node.setAttribute('regex', rate['regex']) - + rate_node = etree.SubElement(rates_elem, 'rate') + rate_node.set('uri', rate['uri']) + rate_node.set('regex', rate['regex']) for limit in rate['limit']: - limit_node = xml_doc.createElement('limit') - limit_node.setAttribute('value', str(limit['value'])) - limit_node.setAttribute('verb', limit['verb']) - limit_node.setAttribute('remaining', str(limit['remaining'])) - limit_node.setAttribute('unit', limit['unit']) - limit_node.setAttribute('next-available', - str(limit['next-available'])) - rate_node.appendChild(limit_node) - - rates_node.appendChild(rate_node) - return rates_node - - def _create_absolute_node(self, xml_doc, absolutes): - absolute_node = xml_doc.createElement('absolute') - for key, value in absolutes.iteritems(): - limit_node = xml_doc.createElement('limit') - limit_node.setAttribute('name', key) - limit_node.setAttribute('value', str(value)) - absolute_node.appendChild(limit_node) - return absolute_node - - def _limits_to_xml(self, xml_doc, limits): - limits_node = xml_doc.createElement('limits') - rates_node = self._create_rates_node(xml_doc, limits['rate']) - limits_node.appendChild(rates_node) - - absolute_node = self._create_absolute_node(xml_doc, limits['absolute']) - limits_node.appendChild(absolute_node) - - return limits_node + limit_elem = etree.SubElement(rate_node, 'limit') + limit_elem.set('value', str(limit['value'])) + limit_elem.set('verb', str(limit['verb'])) + limit_elem.set('remaining', str(limit['remaining'])) + limit_elem.set('unit', str(limit['unit'])) + limit_elem.set('next-available', str(limit['next-available'])) + return rates_elem + + def _create_absolute_node(self, absolute_dict): + absolute_elem = etree.Element('absolute', nsmap=self.NSMAP) + for key, value in absolute_dict.items(): + limit_elem = etree.SubElement(absolute_elem, 'limit') + limit_elem.set('name', str(key)) + limit_elem.set('value', str(value)) + return absolute_elem + + def _populate_limits(self, limits_elem, limits_dict): + """Populate a limits xml element from a dict.""" + + rates_elem = self._create_rates_node( + limits_dict.get('rate', [])) + limits_elem.append(rates_elem) + + absolutes_elem = self._create_absolute_node( + limits_dict.get('absolute', {})) + limits_elem.append(absolutes_elem) def index(self, limits_dict): - xml_doc = minidom.Document() - node = self._limits_to_xml(xml_doc, limits_dict['limits']) - return self.to_xml_string(node, False) + limits = etree.Element('limits', nsmap=self.NSMAP) + self._populate_limits(limits, limits_dict['limits']) + return self._to_xml(limits) def create_resource(version='1.0'): diff --git a/nova/api/openstack/schemas/v1.1/addresses.rng b/nova/api/openstack/schemas/v1.1/addresses.rng new file mode 100644 index 000000000..b498e8a63 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/addresses.rng @@ -0,0 +1,14 @@ +<element name="addresses" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <zeroOrMore> + <element name="network"> + <attribute name="id"> <text/> </attribute> + <zeroOrMore> + <element name="ip"> + <attribute name="version"> <text/> </attribute> + <attribute name="addr"> <text/> </attribute> + </element> + </zeroOrMore> + </element> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/flavor.rng b/nova/api/openstack/schemas/v1.1/flavor.rng new file mode 100644 index 000000000..6d3adc8dc --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/flavor.rng @@ -0,0 +1,14 @@ +<element name="flavor" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <attribute name="name"> <text/> </attribute> + <attribute name="id"> <text/> </attribute> + <attribute name="ram"> <text/> </attribute> + <attribute name="disk"> <text/> </attribute> + <attribute name="rxtx_cap"> <text/> </attribute> + <attribute name="rxtx_quota"> <text/> </attribute> + <attribute name="swap"> <text/> </attribute> + <attribute name="vcpus"> <text/> </attribute> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/flavors.rng b/nova/api/openstack/schemas/v1.1/flavors.rng new file mode 100644 index 000000000..b7a3acc01 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/flavors.rng @@ -0,0 +1,6 @@ +<element name="flavors" xmlns="http://relaxng.org/ns/structure/1.0" + ns="http://docs.openstack.org/compute/api/v1.1"> + <zeroOrMore> + <externalRef href="flavor.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/flavors_index.rng b/nova/api/openstack/schemas/v1.1/flavors_index.rng new file mode 100644 index 000000000..d1a4fedb1 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/flavors_index.rng @@ -0,0 +1,12 @@ +<element name="flavors" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <zeroOrMore> + <element name="flavor"> + <attribute name="name"> <text/> </attribute> + <attribute name="id"> <text/> </attribute> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> + </element> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/image.rng b/nova/api/openstack/schemas/v1.1/image.rng new file mode 100644 index 000000000..887f76751 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/image.rng @@ -0,0 +1,30 @@ +<element name="image" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <attribute name="name"> <text/> </attribute> + <attribute name="id"> <text/> </attribute> + <attribute name="updated"> <text/> </attribute> + <attribute name="created"> <text/> </attribute> + <attribute name="status"> <text/> </attribute> + <optional> + <attribute name="progress"> <text/> </attribute> + </optional> + <optional> + <element name="server"> + <attribute name="id"> <text/> </attribute> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> + </element> + </optional> + <element name="metadata"> + <zeroOrMore> + <element name="meta"> + <attribute name="key"> <text/> </attribute> + <text/> + </element> + </zeroOrMore> + </element> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/images.rng b/nova/api/openstack/schemas/v1.1/images.rng new file mode 100644 index 000000000..064d4d9cc --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/images.rng @@ -0,0 +1,6 @@ +<element name="images" xmlns="http://relaxng.org/ns/structure/1.0" + ns="http://docs.openstack.org/compute/api/v1.1"> + <zeroOrMore> + <externalRef href="image.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/images_index.rng b/nova/api/openstack/schemas/v1.1/images_index.rng new file mode 100644 index 000000000..81af19cb5 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/images_index.rng @@ -0,0 +1,12 @@ +<element name="images" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <zeroOrMore> + <element name="image"> + <attribute name="name"> <text/> </attribute> + <attribute name="id"> <text/> </attribute> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> + </element> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/limits.rng b/nova/api/openstack/schemas/v1.1/limits.rng new file mode 100644 index 000000000..1af8108ec --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/limits.rng @@ -0,0 +1,28 @@ +<element name="limits" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <element name="rates"> + <zeroOrMore> + <element name="rate"> + <attribute name="uri"> <text/> </attribute> + <attribute name="regex"> <text/> </attribute> + <zeroOrMore> + <element name="limit"> + <attribute name="value"> <text/> </attribute> + <attribute name="verb"> <text/> </attribute> + <attribute name="remaining"> <text/> </attribute> + <attribute name="unit"> <text/> </attribute> + <attribute name="next-available"> <text/> </attribute> + </element> + </zeroOrMore> + </element> + </zeroOrMore> + </element> + <element name="absolute"> + <zeroOrMore> + <element name="limit"> + <attribute name="name"> <text/> </attribute> + <attribute name="value"> <text/> </attribute> + </element> + </zeroOrMore> + </element> +</element> diff --git a/nova/api/openstack/schemas/v1.1/metadata.rng b/nova/api/openstack/schemas/v1.1/metadata.rng new file mode 100644 index 000000000..b2f5d702a --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/metadata.rng @@ -0,0 +1,9 @@ + <element name="metadata" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <zeroOrMore> + <element name="meta"> + <attribute name="key"> <text/> </attribute> + <text/> + </element> + </zeroOrMore> + </element> diff --git a/nova/api/openstack/schemas/v1.1/server.rng b/nova/api/openstack/schemas/v1.1/server.rng index ef835e408..4eb1a0b85 100644 --- a/nova/api/openstack/schemas/v1.1/server.rng +++ b/nova/api/openstack/schemas/v1.1/server.rng @@ -17,9 +17,6 @@ <optional> <attribute name="adminPass"> <text/> </attribute> </optional> - <zeroOrMore> - <externalRef href="../atom-link.rng"/> - </zeroOrMore> <element name="image"> <attribute name="id"> <text/> </attribute> <externalRef href="../atom-link.rng"/> @@ -49,4 +46,7 @@ </element> </zeroOrMore> </element> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> </element> diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f5447edc5..0ef246852 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -17,8 +17,8 @@ import base64 import os import traceback +from lxml import etree from webob import exc -from xml.dom import minidom import webob from nova import compute @@ -38,6 +38,7 @@ import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers +from nova.api.openstack import xmlutil LOG = logging.getLogger('nova.api.openstack.servers') @@ -476,16 +477,22 @@ class Controller(object): return webob.Response(status_int=202) @scheduler_api.redirect_handler - def rescue(self, req, id): + def rescue(self, req, id, body={}): """Permit users to rescue the server.""" context = req.environ["nova.context"] try: - self.compute_api.rescue(context, id) + if 'rescue' in body and body['rescue'] and \ + 'adminPass' in body['rescue']: + password = body['rescue']['adminPass'] + else: + password = utils.generate_password(FLAGS.password_length) + self.compute_api.rescue(context, id, rescue_password=password) except Exception: readable = traceback.format_exc() LOG.exception(_("compute.api::rescue %s"), readable) raise exc.HTTPUnprocessableEntity() - return webob.Response(status_int=202) + + return {'adminPass': password} @scheduler_api.redirect_handler def unrescue(self, req, id): @@ -617,7 +624,7 @@ class ControllerV10(Controller): LOG.debug(msg) raise exc.HTTPBadRequest(explanation=msg) - password = utils.generate_password(16) + password = utils.generate_password(FLAGS.password_length) try: self.compute_api.rebuild(context, instance_id, image_id, password) @@ -759,8 +766,10 @@ class ControllerV11(Controller): self._validate_metadata(metadata) self._decode_personalities(personalities) - password = info["rebuild"].get("adminPass", - utils.generate_password(16)) + if 'rebuild' in info and 'adminPass' in info['rebuild']: + password = info['rebuild']['adminPass'] + else: + password = utils.generate_password(FLAGS.password_length) try: self.compute_api.rebuild(context, instance_id, image_href, @@ -850,130 +859,113 @@ class HeadersSerializer(wsgi.ResponseHeadersSerializer): class ServerXMLSerializer(wsgi.XMLDictSerializer): - xmlns = wsgi.XMLNS_V11 + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} def __init__(self): self.metadata_serializer = common.MetadataXMLSerializer() self.addresses_serializer = ips.IPXMLSerializer() - def _create_basic_entity_node(self, xml_doc, id, links, name): - basic_node = xml_doc.createElement(name) - basic_node.setAttribute('id', str(id)) - link_nodes = self._create_link_nodes(xml_doc, links) - for link_node in link_nodes: - basic_node.appendChild(link_node) - return basic_node - - def _create_metadata_node(self, xml_doc, metadata): - return self.metadata_serializer.meta_list_to_xml(xml_doc, metadata) - - def _create_addresses_node(self, xml_doc, addresses): - return self.addresses_serializer.networks_to_xml(xml_doc, addresses) - - def _add_server_attributes(self, node, server): - node.setAttribute('id', str(server['id'])) - node.setAttribute('userId', str(server['user_id'])) - node.setAttribute('tenantId', str(server['tenant_id'])) - node.setAttribute('uuid', str(server['uuid'])) - node.setAttribute('hostId', str(server['hostId'])) - node.setAttribute('name', server['name']) - node.setAttribute('created', str(server['created'])) - node.setAttribute('updated', str(server['updated'])) - node.setAttribute('status', server['status']) - if 'accessIPv4' in server: - node.setAttribute('accessIPv4', str(server['accessIPv4'])) - if 'accessIPv6' in server: - node.setAttribute('accessIPv6', str(server['accessIPv6'])) - if 'progress' in server: - node.setAttribute('progress', str(server['progress'])) - - def _server_to_xml(self, xml_doc, server): - server_node = xml_doc.createElement('server') - server_node.setAttribute('id', str(server['id'])) - server_node.setAttribute('name', server['name']) - link_nodes = self._create_link_nodes(xml_doc, - server['links']) - for link_node in link_nodes: - server_node.appendChild(link_node) - return server_node - - def _server_to_xml_detailed(self, xml_doc, server): - server_node = xml_doc.createElement('server') - self._add_server_attributes(server_node, server) - - link_nodes = self._create_link_nodes(xml_doc, - server['links']) - for link_node in link_nodes: - server_node.appendChild(link_node) - - if 'image' in server: - image_node = self._create_basic_entity_node(xml_doc, - server['image']['id'], - server['image']['links'], - 'image') - server_node.appendChild(image_node) - - if 'flavor' in server: - flavor_node = self._create_basic_entity_node(xml_doc, - server['flavor']['id'], - server['flavor']['links'], - 'flavor') - server_node.appendChild(flavor_node) - - metadata = server.get('metadata', {}).items() - if len(metadata) > 0: - metadata_node = self._create_metadata_node(xml_doc, metadata) - server_node.appendChild(metadata_node) - - addresses_node = self._create_addresses_node(xml_doc, - server['addresses']) - server_node.appendChild(addresses_node) - - if 'security_groups' in server: - security_groups_node = self._create_security_groups_node(xml_doc, - server['security_groups']) - server_node.appendChild(security_groups_node) - - return server_node - - def _server_list_to_xml(self, xml_doc, servers, detailed): - container_node = xml_doc.createElement('servers') + def _create_metadata_node(self, metadata_dict): + metadata_elem = etree.Element('metadata', nsmap=self.NSMAP) + self.metadata_serializer.populate_metadata(metadata_elem, + metadata_dict) + return metadata_elem + + def _create_image_node(self, image_dict): + image_elem = etree.Element('image', nsmap=self.NSMAP) + image_elem.set('id', str(image_dict['id'])) + for link in image_dict.get('links', []): + elem = etree.SubElement(image_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return image_elem + + def _create_flavor_node(self, flavor_dict): + flavor_elem = etree.Element('flavor', nsmap=self.NSMAP) + flavor_elem.set('id', str(flavor_dict['id'])) + for link in flavor_dict.get('links', []): + elem = etree.SubElement(flavor_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return flavor_elem + + def _create_addresses_node(self, addresses_dict): + addresses_elem = etree.Element('addresses', nsmap=self.NSMAP) + self.addresses_serializer.populate_addresses_node(addresses_elem, + addresses_dict) + return addresses_elem + + def _populate_server(self, server_elem, server_dict, detailed=False): + """Populate a server xml element from a dict.""" + + server_elem.set('name', server_dict['name']) + server_elem.set('id', str(server_dict['id'])) if detailed: - server_to_xml = self._server_to_xml_detailed - else: - server_to_xml = self._server_to_xml - - for server in servers: - item_node = server_to_xml(xml_doc, server) - container_node.appendChild(item_node) - return container_node + server_elem.set('uuid', str(server_dict['uuid'])) + server_elem.set('userId', str(server_dict['user_id'])) + server_elem.set('tenantId', str(server_dict['tenant_id'])) + server_elem.set('updated', str(server_dict['updated'])) + server_elem.set('created', str(server_dict['created'])) + server_elem.set('hostId', str(server_dict['hostId'])) + server_elem.set('accessIPv4', str(server_dict['accessIPv4'])) + server_elem.set('accessIPv6', str(server_dict['accessIPv6'])) + server_elem.set('status', str(server_dict['status'])) + if 'progress' in server_dict: + server_elem.set('progress', str(server_dict['progress'])) + image_elem = self._create_image_node(server_dict['image']) + server_elem.append(image_elem) + + flavor_elem = self._create_flavor_node(server_dict['flavor']) + server_elem.append(flavor_elem) + + meta_elem = self._create_metadata_node( + server_dict.get('metadata', {})) + server_elem.append(meta_elem) + + addresses_elem = self._create_addresses_node( + server_dict.get('addresses', {})) + server_elem.append(addresses_elem) + groups = server_dict.get('security_groups') + if groups: + groups_elem = etree.SubElement(server_elem, 'security_groups') + for group in groups: + group_elem = etree.SubElement(groups_elem, + 'security_group') + group_elem.set('name', group['name']) + + for link in server_dict.get('links', []): + elem = etree.SubElement(server_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return server_elem def index(self, servers_dict): - xml_doc = minidom.Document() - node = self._server_list_to_xml(xml_doc, - servers_dict['servers'], - detailed=False) - return self.to_xml_string(node, True) + servers = etree.Element('servers', nsmap=self.NSMAP) + for server_dict in servers_dict['servers']: + server = etree.SubElement(servers, 'server') + self._populate_server(server, server_dict, False) + return self._to_xml(servers) def detail(self, servers_dict): - xml_doc = minidom.Document() - node = self._server_list_to_xml(xml_doc, - servers_dict['servers'], - detailed=True) - return self.to_xml_string(node, True) + servers = etree.Element('servers', nsmap=self.NSMAP) + for server_dict in servers_dict['servers']: + server = etree.SubElement(servers, 'server') + self._populate_server(server, server_dict, True) + return self._to_xml(servers) def show(self, server_dict): - xml_doc = minidom.Document() - node = self._server_to_xml_detailed(xml_doc, - server_dict['server']) - return self.to_xml_string(node, True) + server = etree.Element('server', nsmap=self.NSMAP) + self._populate_server(server, server_dict['server'], True) + return self._to_xml(server) def create(self, server_dict): - xml_doc = minidom.Document() - node = self._server_to_xml_detailed(xml_doc, - server_dict['server']) - node.setAttribute('adminPass', server_dict['server']['adminPass']) - return self.to_xml_string(node, True) + server = etree.Element('server', nsmap=self.NSMAP) + self._populate_server(server, server_dict['server'], True) + server.set('adminPass', server_dict['server']['adminPass']) + return self._to_xml(server) def action(self, server_dict): #NOTE(bcwaldon): We need a way to serialize actions individually. This @@ -981,23 +973,9 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): return self.create(server_dict) def update(self, server_dict): - xml_doc = minidom.Document() - node = self._server_to_xml_detailed(xml_doc, - server_dict['server']) - return self.to_xml_string(node, True) - - def _security_group_to_xml(self, doc, security_group): - node = doc.createElement('security_group') - node.setAttribute('name', str(security_group.get('name'))) - return node - - def _create_security_groups_node(self, xml_doc, security_groups): - security_groups_node = xml_doc.createElement('security_groups') - if security_groups: - for security_group in security_groups: - node = self._security_group_to_xml(xml_doc, security_group) - security_groups_node.appendChild(node) - return security_groups_node + server = etree.Element('server', nsmap=self.NSMAP) + self._populate_server(server, server_dict['server'], True) + return self._to_xml(server) def create_resource(version='1.0'): diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index e2f892fb6..75a1d0ba4 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -16,12 +16,13 @@ # under the License. from datetime import datetime +from lxml import etree import webob import webob.dec -from xml.dom import minidom import nova.api.openstack.views.versions from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil VERSIONS = { @@ -106,7 +107,9 @@ class Versions(wsgi.Resource): headers_serializer=headers_serializer) supported_content_types = ('application/json', + 'application/vnd.openstack.compute+json', 'application/xml', + 'application/vnd.openstack.compute+xml', 'application/atom+xml') deserializer = VersionsRequestDeserializer( supported_content_types=supported_content_types) @@ -159,83 +162,51 @@ class VersionsRequestDeserializer(wsgi.RequestDeserializer): class VersionsXMLSerializer(wsgi.XMLDictSerializer): - #TODO(wwolf): this is temporary until we get rid of toprettyxml - # in the base class (XMLDictSerializer), which I plan to do in - # another branch - def to_xml_string(self, node, has_atom=False): - self._add_xmlns(node, has_atom) - return node.toxml(encoding='UTF-8') - - def _versions_to_xml(self, versions, name="versions", xmlns=None): - root = self._xml_doc.createElement(name) - root.setAttribute("xmlns", wsgi.XMLNS_V11) - root.setAttribute("xmlns:atom", wsgi.XMLNS_ATOM) - for version in versions: - root.appendChild(self._create_version_node(version)) - - return root - - def _create_media_types(self, media_types): - base = self._xml_doc.createElement('media-types') - for type in media_types: - node = self._xml_doc.createElement('media-type') - node.setAttribute('base', type['base']) - node.setAttribute('type', type['type']) - base.appendChild(node) - - return base - - def _create_version_node(self, version, create_ns=False): - version_node = self._xml_doc.createElement('version') - if create_ns: - xmlns = wsgi.XMLNS_V11 - xmlns_atom = wsgi.XMLNS_ATOM - version_node.setAttribute('xmlns', xmlns) - version_node.setAttribute('xmlns:atom', xmlns_atom) - - version_node.setAttribute('id', version['id']) - version_node.setAttribute('status', version['status']) + def _populate_version(self, version_node, version): + version_node.set('id', version['id']) + version_node.set('status', version['status']) if 'updated' in version: - version_node.setAttribute('updated', version['updated']) - + version_node.set('updated', version['updated']) if 'media-types' in version: - media_types = self._create_media_types(version['media-types']) - version_node.appendChild(media_types) - - link_nodes = self._create_link_nodes(self._xml_doc, version['links']) - for link in link_nodes: - version_node.appendChild(link) - - return version_node + media_types = etree.SubElement(version_node, 'media-types') + for mtype in version['media-types']: + elem = etree.SubElement(media_types, 'media-type') + elem.set('base', mtype['base']) + elem.set('type', mtype['type']) + for link in version.get('links', []): + elem = etree.SubElement(version_node, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + if 'type' in link: + elem.set('type', link['type']) + + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} def index(self, data): - self._xml_doc = minidom.Document() - node = self._versions_to_xml(data['versions']) - - return self.to_xml_string(node) + root = etree.Element('versions', nsmap=self.NSMAP) + for version in data['versions']: + version_elem = etree.SubElement(root, 'version') + self._populate_version(version_elem, version) + return self._to_xml(root) def show(self, data): - self._xml_doc = minidom.Document() - node = self._create_version_node(data['version'], True) - - return self.to_xml_string(node) + root = etree.Element('version', nsmap=self.NSMAP) + self._populate_version(root, data['version']) + return self._to_xml(root) def multi(self, data): - self._xml_doc = minidom.Document() - node = self._versions_to_xml(data['choices'], 'choices', - xmlns=wsgi.XMLNS_V11) - - return self.to_xml_string(node) + root = etree.Element('choices', nsmap=self.NSMAP) + for version in data['choices']: + version_elem = etree.SubElement(root, 'version') + self._populate_version(version_elem, version) + return self._to_xml(root) class VersionsAtomSerializer(wsgi.XMLDictSerializer): - #TODO(wwolf): this is temporary until we get rid of toprettyxml - # in the base class (XMLDictSerializer), which I plan to do in - # another branch - def to_xml_string(self, node, has_atom=False): - self._add_xmlns(node, has_atom) - return node.toxml(encoding='UTF-8') + + NSMAP = {None: xmlutil.XMLNS_ATOM} def __init__(self, metadata=None, xmlns=None): self.metadata = metadata or {} @@ -244,14 +215,6 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): else: self.xmlns = xmlns - def _create_text_elem(self, name, text, type=None): - elem = self._xml_doc.createElement(name) - if type: - elem.setAttribute('type', type) - elem_text = self._xml_doc.createTextNode(text) - elem.appendChild(elem_text) - return elem - def _get_most_recent_update(self, versions): recent = None for version in versions: @@ -269,105 +232,64 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' - def _create_detail_meta(self, root, version): - title = self._create_text_elem('title', "About This Version", - type='text') - - updated = self._create_text_elem('updated', version['updated']) - - uri = version['links'][0]['href'] - id = self._create_text_elem('id', uri) - - link = self._xml_doc.createElement('link') - link.setAttribute('rel', 'self') - link.setAttribute('href', uri) + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title - author = self._xml_doc.createElement('author') - author_name = self._create_text_elem('name', 'Rackspace') - author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') - author.appendChild(author_name) - author.appendChild(author_uri) - - root.appendChild(title) - root.appendChild(updated) - root.appendChild(id) - root.appendChild(author) - root.appendChild(link) - - def _create_list_meta(self, root, versions): - title = self._create_text_elem('title', "Available API Versions", - type='text') # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) - updated = self._create_text_elem('updated', recent) - - base_url = self._get_base_url(versions[0]['links'][0]['href']) - id = self._create_text_elem('id', base_url) + etree.SubElement(feed, 'updated').text = recent - link = self._xml_doc.createElement('link') - link.setAttribute('rel', 'self') - link.setAttribute('href', base_url) + etree.SubElement(feed, 'id').text = feed_id - author = self._xml_doc.createElement('author') - author_name = self._create_text_elem('name', 'Rackspace') - author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') - author.appendChild(author_name) - author.appendChild(author_uri) + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) - root.appendChild(title) - root.appendChild(updated) - root.appendChild(id) - root.appendChild(author) - root.appendChild(link) + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' - def _create_version_entries(self, root, versions): for version in versions: - entry = self._xml_doc.createElement('entry') - - id = self._create_text_elem('id', version['links'][0]['href']) - title = self._create_text_elem('title', - 'Version %s' % version['id'], - type='text') - updated = self._create_text_elem('updated', version['updated']) - - entry.appendChild(id) - entry.appendChild(title) - entry.appendChild(updated) - - for link in version['links']: - link_node = self._xml_doc.createElement('link') - link_node.setAttribute('rel', link['rel']) - link_node.setAttribute('href', link['href']) - if 'type' in link: - link_node.setAttribute('type', link['type']) - - entry.appendChild(link_node) - - content = self._create_text_elem('content', - 'Version %s %s (%s)' % - (version['id'], - version['status'], - version['updated']), - type='text') - - entry.appendChild(content) - root.appendChild(entry) + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry def index(self, data): - self._xml_doc = minidom.Document() - node = self._xml_doc.createElementNS(self.xmlns, 'feed') - self._create_list_meta(node, data['versions']) - self._create_version_entries(node, data['versions']) - - return self.to_xml_string(node) + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) def show(self, data): - self._xml_doc = minidom.Document() - node = self._xml_doc.createElementNS(self.xmlns, 'feed') - self._create_detail_meta(node, data['version']) - self._create_version_entries(node, [data['version']]) - - return self.to_xml_string(node) + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) class VersionsHeadersSerializer(wsgi.ResponseHeadersSerializer): @@ -388,7 +310,9 @@ def create_resource(version='1.0'): serializer = wsgi.ResponseSerializer(body_serializers) supported_content_types = ('application/json', + 'application/vnd.openstack.compute+json', 'application/xml', + 'application/vnd.openstack.compute+xml', 'application/atom+xml') deserializer = wsgi.RequestDeserializer( supported_content_types=supported_content_types) diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py index aea34b424..def969a6c 100644 --- a/nova/api/openstack/views/flavors.py +++ b/nova/api/openstack/views/flavors.py @@ -50,6 +50,9 @@ class ViewBuilder(object): "disk": flavor_obj["local_gb"], } + for key in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): + detail[key] = flavor_obj.get(key, "") + detail.update(simple) return detail diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 8983b2957..86e8d7f3a 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -71,6 +71,7 @@ class ViewBuilder(object): } self._build_server(image, image_obj) + self._build_image_id(image, image_obj) if detail: image.update({ @@ -96,6 +97,12 @@ class ViewBuilderV10(ViewBuilder): except (KeyError, ValueError): pass + def _build_image_id(self, image, image_obj): + try: + image['id'] = int(image_obj['id']) + except ValueError: + pass + class ViewBuilderV11(ViewBuilder): """OpenStack API v1.1 Image Builder""" @@ -119,6 +126,9 @@ class ViewBuilderV11(ViewBuilder): except KeyError: return + def _build_image_id(self, image, image_obj): + image['id'] = "%s" % image_obj['id'] + def generate_href(self, image_id): """Return an href string pointing to this object.""" return os.path.join(self.base_url, self.project_id, diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py index 03da80818..1ac398706 100644 --- a/nova/api/openstack/views/versions.py +++ b/nova/api/openstack/views/versions.py @@ -52,7 +52,7 @@ class ViewBuilder(object): def build_versions(self, versions): version_objs = [] - for version in versions: + for version in sorted(versions.keys()): version = versions[version] version_objs.append({ "id": version['id'], diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 8641e960a..180f328b9 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -1,5 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. import json +from lxml import etree import webob from xml.dom import minidom from xml.parsers import expat @@ -18,6 +35,21 @@ XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') +# The vendor content types should serialize identically to the non-vendor +# content types. So to avoid littering the code with both options, we +# map the vendor to the other when looking up the type +_CONTENT_TYPE_MAP = { + 'application/vnd.openstack.compute+json': 'application/json', + 'application/vnd.openstack.compute+xml': 'application/xml', +} + +_SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.compute+json', + 'application/xml', + 'application/vnd.openstack.compute+xml', +) + class Request(webob.Request): """Add some Openstack API-specific logic to the base webob.Request.""" @@ -29,7 +61,7 @@ class Request(webob.Request): """ supported_content_types = supported_content_types or \ - ('application/json', 'application/xml') + _SUPPORTED_CONTENT_TYPES parts = self.path.rsplit('.', 1) if len(parts) > 1: @@ -51,7 +83,7 @@ class Request(webob.Request): if not "Content-Type" in self.headers: return None - allowed_types = ("application/xml", "application/json") + allowed_types = _SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: @@ -191,7 +223,7 @@ class RequestDeserializer(object): supported_content_types=None): self.supported_content_types = supported_content_types or \ - ('application/json', 'application/xml') + _SUPPORTED_CONTENT_TYPES self.body_deserializers = { 'application/xml': XMLDeserializer(), @@ -249,7 +281,8 @@ class RequestDeserializer(object): def get_body_deserializer(self, content_type): try: - return self.body_deserializers[content_type] + ctype = _CONTENT_TYPE_MAP.get(content_type, content_type) + return self.body_deserializers[ctype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -315,7 +348,7 @@ class XMLDictSerializer(DictSerializer): def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) - return node.toprettyxml(indent=' ', encoding='UTF-8') + return node.toxml('UTF-8') #NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current @@ -392,6 +425,10 @@ class XMLDictSerializer(DictSerializer): link_nodes.append(link_node) return link_nodes + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + class ResponseHeadersSerializer(ActionDispatcher): """Default response headers serialization""" @@ -439,7 +476,8 @@ class ResponseSerializer(object): def get_body_serializer(self, content_type): try: - return self.body_serializers[content_type] + ctype = _CONTENT_TYPE_MAP.get(content_type, content_type) + return self.body_serializers[ctype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) diff --git a/nova/compute/api.py b/nova/compute/api.py index 8b3306409..1ecf1e2e3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1052,7 +1052,7 @@ class API(base.Base): vm_state=vm_states.ACTIVE, task_state=task_states.REBOOTING) self._cast_compute_message('reboot_instance', context, instance_id, - reboot_type) + params={'reboot_type': reboot_type}) @scheduler_api.reroute_compute("rebuild") def rebuild(self, context, instance_id, image_href, admin_password, @@ -1277,13 +1277,18 @@ class API(base.Base): self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") - def rescue(self, context, instance_id): + def rescue(self, context, instance_id, rescue_password=None): """Rescue the given instance.""" self.update(context, instance_id, vm_state=vm_states.ACTIVE, task_state=task_states.RESCUING) - self._cast_compute_message('rescue_instance', context, instance_id) + + rescue_params = { + "rescue_password": rescue_password + } + self._cast_compute_message('rescue_instance', context, instance_id, + params=rescue_params) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d80fa6e70..46c85889b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -70,8 +70,6 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') flags.DEFINE_string('stub_network', False, 'Stub network related code') -flags.DEFINE_integer('password_length', 12, - 'Length of generated admin passwords') flags.DEFINE_string('console_host', socket.gethostname(), 'Console proxy host to use to connect to instances on' 'this host.') @@ -797,12 +795,18 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock - def rescue_instance(self, context, instance_id): - """Rescue an instance on this host.""" + def rescue_instance(self, context, instance_id, **kwargs): + """ + Rescue an instance on this host. + :param rescue_password: password to set on rescue instance + """ + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) + instance_ref.admin_pass = kwargs.get('rescue_password', + utils.generate_password(FLAGS.password_length)) network_info = self._get_instance_nw_info(context, instance_ref) # NOTE(blamar): None of the virt drivers use the 'callback' param diff --git a/nova/db/api.py b/nova/db/api.py index a9d2dc065..05d81d8b2 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -261,11 +261,13 @@ def floating_ip_disassociate(context, address): return IMPL.floating_ip_disassociate(context, address) -def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): +def floating_ip_fixed_ip_associate(context, floating_address, + fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, - fixed_address) + fixed_address, + host) def floating_ip_get_all(context): @@ -367,7 +369,7 @@ def fixed_ip_get_all(context): def fixed_ip_get_all_by_instance_host(context, host): """Get all allocated fixed ips filtered by instance host.""" - return IMPL.fixed_ip_get_all_instance_by_host(context, host) + return IMPL.fixed_ip_get_all_by_instance_host(context, host) def fixed_ip_get_by_address(context, address): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e5a661c7f..8ea154490 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -529,7 +529,8 @@ def floating_ip_count_by_project(context, project_id): @require_context -def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): +def floating_ip_fixed_ip_associate(context, floating_address, + fixed_address, host): session = get_session() with session.begin(): # TODO(devcamcar): How to ensure floating_id belongs to user? @@ -540,6 +541,7 @@ def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): fixed_address, session=session) floating_ip_ref.fixed_ip = fixed_ip_ref + floating_ip_ref.host = host floating_ip_ref.save(session=session) @@ -583,6 +585,7 @@ def floating_ip_disassociate(context, address): else: fixed_ip_address = None floating_ip_ref.fixed_ip = None + floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address diff --git a/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py b/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py new file mode 100644 index 000000000..63e7bc4f9 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py @@ -0,0 +1,48 @@ +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + +meta = MetaData() + +default_local_device = Column( + 'default_local_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +default_swap_device = Column( + 'default_swap_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances.create_column(default_local_device) + instances.create_column(default_swap_device) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta.bind = migrate_engine + instances.drop_column('default_swap_device') + instances.drop_column('default_local_device') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 6ff7fbfda..7f0684588 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -232,6 +232,8 @@ class Instance(BASE, NovaBase): uuid = Column(String(36)) root_device_name = Column(String(255)) + default_local_device = Column(String(255), nullable=True) + default_swap_device = Column(String(255), nullable=True) config_drive = Column(String(255)) # User editable field meant to represent what ip should be used diff --git a/nova/exception.py b/nova/exception.py index a3cbb98cf..4f25d3721 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -506,7 +506,7 @@ class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") -class NoMoreFixedIps(Error): +class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") diff --git a/nova/flags.py b/nova/flags.py index e79b280c9..b90f274c5 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -421,6 +421,9 @@ DEFINE_string('root_helper', 'sudo', DEFINE_bool('use_ipv6', False, 'use ipv6') +DEFINE_integer('password_length', 12, + 'Length of generated instance admin passwords') + DEFINE_bool('monkey_patch', False, 'Whether to log monkey patching') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7d89b2bcc..e693e5939 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -68,6 +68,8 @@ flags.DEFINE_string('linuxnet_interface_driver', 'Driver used to create ethernet devices.') flags.DEFINE_string('linuxnet_ovs_integration_bridge', 'br-int', 'Name of Open vSwitch bridge used with linuxnet') +flags.DEFINE_bool('send_arp_for_ha', False, + 'send gratuitous ARPs for HA setup') flags.DEFINE_bool('use_single_default_gateway', False, 'Use single default gateway. Only first nic of vm' ' will get default gateway from dhcp server') @@ -407,6 +409,10 @@ def bind_floating_ip(floating_ip, check_exit_code=True): _execute('ip', 'addr', 'add', floating_ip, 'dev', FLAGS.public_interface, run_as_root=True, check_exit_code=check_exit_code) + if FLAGS.send_arp_for_ha: + _execute('arping', '-U', floating_ip, + '-A', '-I', FLAGS.public_interface, + '-c', 1, run_as_root=True, check_exit_code=False) def unbind_floating_ip(floating_ip): @@ -478,6 +484,10 @@ def initialize_gateway_device(dev, network_ref): check_exit_code=False) if err and err != 'RTNETLINK answers: File exists\n': raise exception.Error('Failed to add ip: %s' % err) + if FLAGS.send_arp_for_ha: + _execute('arping', '-U', network_ref['gateway'], + '-A', '-I', dev, + '-c', 1, run_as_root=True, check_exit_code=False) if(FLAGS.use_ipv6): _execute('ip', '-f', 'inet6', 'addr', 'change', network_ref['cidr_v6'], diff --git a/nova/network/manager.py b/nova/network/manager.py index da360720b..70e51888f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -289,7 +289,8 @@ class FloatingIP(object): self.db.floating_ip_fixed_ip_associate(context, floating_address, - fixed_address) + fixed_address, + self.host) self.driver.bind_floating_ip(floating_address) self.driver.ensure_floating_forward(floating_address, fixed_address) diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 7fe353b3d..7bdae0552 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -1540,7 +1540,9 @@ class CloudTestCase(test.TestCase): 'ephemeral0': '/dev/sdb', 'swap': '/dev/sdc', 'ephemeral1': '/dev/sdd', - 'ephemeral2': '/dev/sd3'} + 'ephemeral2': '/dev/sd3', + 'ebs0': '/dev/sdh', + 'ebs1': '/dev/sdi'} self.assertEqual(self.cloud._format_instance_mapping(ctxt, instance_ref0), diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py index 74bb8729a..19515ca67 100644 --- a/nova/tests/api/openstack/common.py +++ b/nova/tests/api/openstack/common.py @@ -34,3 +34,25 @@ def webob_factory(url): req.body = json.dumps(body) return req return web_request + + +def compare_links(actual, expected): + """Compare xml atom links.""" + + return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) + + +def compare_media_types(actual, expected): + """Compare xml media types.""" + + return compare_tree_to_dict(actual, expected, ('base', 'type')) + + +def compare_tree_to_dict(actual, expected, keys): + """Compare parts of lxml.etree objects to dicts.""" + + for elem, data in zip(actual, expected): + for key in keys: + if elem.get(key) != data.get(key): + return False + return True diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index 24f756d5b..1ab2062e9 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -49,10 +49,14 @@ INSTANCE = { "id": 1, "display_name": "test_server", "uuid": FAKE_UUID, + "user_id": 'fake_user_id', + "tenant_id": 'fake_tenant_id', "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "security_groups": [{"id": 1, "name": "test"}], "progress": 0 + "image_ref": 'http://foo.com/123', + "instance_type": {"flavorid": '124'}, } diff --git a/nova/tests/api/openstack/contrib/test_rescue.py b/nova/tests/api/openstack/contrib/test_rescue.py index f8126d461..403bcfd4c 100644 --- a/nova/tests/api/openstack/contrib/test_rescue.py +++ b/nova/tests/api/openstack/contrib/test_rescue.py @@ -16,11 +16,14 @@ import json import webob from nova import compute +from nova import flags from nova import test from nova.tests.api.openstack import fakes +FLAGS = flags.FLAGS -def rescue(self, context, instance_id): + +def rescue(self, context, instance_id, rescue_password=None): pass @@ -34,7 +37,19 @@ class RescueTest(test.TestCase): self.stubs.Set(compute.api.API, "rescue", rescue) self.stubs.Set(compute.api.API, "unrescue", unrescue) - def test_rescue(self): + def test_rescue_with_preset_password(self): + body = {"rescue": {"adminPass": "AABBCC112233"}} + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + resp_json = json.loads(resp.body) + self.assertEqual("AABBCC112233", resp_json['adminPass']) + + def test_rescue_generates_password(self): body = dict(rescue=None) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') req.method = "POST" @@ -43,6 +58,8 @@ class RescueTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) + resp_json = json.loads(resp.body) + self.assertEqual(FLAGS.password_length, len(resp_json['adminPass'])) def test_unrescue(self): body = dict(unrescue=None) @@ -52,4 +69,4 @@ class RescueTest(test.TestCase): req.headers["content-type"] = "application/json" resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.status_int, 202) diff --git a/nova/tests/api/openstack/contrib/test_volumes.py b/nova/tests/api/openstack/contrib/test_volumes.py new file mode 100644 index 000000000..52b65f5e1 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_volumes.py @@ -0,0 +1,77 @@ +# Copyright 2011 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import webob + +import nova +from nova import context +from nova import flags +from nova import test +from nova.api.openstack.contrib.volumes import BootFromVolumeController +from nova.compute import instance_types +from nova.tests.api.openstack import fakes +from nova.tests.api.openstack.test_servers import fake_gen_uuid + + +FLAGS = flags.FLAGS + + +def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs): + inst_type = instance_types.get_instance_type_by_flavor_id(2) + return [{'id': 1, + 'display_name': 'test_server', + 'uuid': fake_gen_uuid(), + 'instance_type': dict(inst_type), + 'access_ip_v4': '1.2.3.4', + 'access_ip_v6': 'fead::1234', + 'image_ref': 3, + 'user_id': 'fake', + 'project_id': 'fake', + 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0), + 'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0), + }] + + +class BootFromVolumeTest(test.TestCase): + + def setUp(self): + super(BootFromVolumeTest, self).setUp() + self.stubs.Set(nova.compute.API, 'create', fake_compute_api_create) + + def test_create_root_volume(self): + body = dict(server=dict( + name='test_server', imageRef=3, + flavorRef=2, min_count=1, max_count=1, + block_device_mapping=[dict( + volume_id=1, + device_name='/dev/vda', + virtual='root', + delete_on_termination=False, + )] + )) + req = webob.Request.blank('/v1.1/fake/os-volumes_boot') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + server = json.loads(res.body)['server'] + self.assertEqual(1, server['id']) + self.assertEqual(2, int(server['flavor']['id'])) + self.assertEqual(u'test_server', server['name']) + self.assertEqual(3, int(server['image']['id'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index 7321c329f..b7a0b01ef 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -20,6 +20,7 @@ import json import webob.exc import webob.dec +from lxml import etree from webob import Request from nova import test @@ -52,6 +53,30 @@ class APITest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_vendor_content_type_json(self): + ctype = 'application/vnd.openstack.compute+json' + + req = webob.Request.blank('/') + req.headers['Accept'] = ctype + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, ctype) + + body = json.loads(res.body) + + def test_vendor_content_type_xml(self): + ctype = 'application/vnd.openstack.compute+xml' + + req = webob.Request.blank('/') + req.headers['Accept'] = ctype + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, ctype) + + body = etree.XML(res.body) + def test_exceptions_are_converted_to_faults(self): @webob.dec.wsgify diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index b422bc4d1..1628ad1c8 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -19,6 +19,7 @@ Test suites for 'common' code used throughout the OpenStack HTTP API. """ +from lxml import etree import webob.exc import xml.dom.minidom as minidom @@ -26,6 +27,11 @@ from webob import Request from nova import test from nova.api.openstack import common +from nova.api.openstack import xmlutil + + +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" class LimiterTest(test.TestCase): @@ -237,21 +243,41 @@ class MiscFunctionsTest(test.TestCase): common.remove_version_from_href, fixture) - def test_get_id_from_href(self): + def test_get_id_from_href_with_int_url(self): fixture = 'http://www.testsite.com/dir/45' actual = common.get_id_from_href(fixture) - expected = 45 + expected = '45' self.assertEqual(actual, expected) - def test_get_id_from_href_bad_request(self): - fixture = 'http://45' - self.assertRaises(ValueError, - common.get_id_from_href, - fixture) + def test_get_id_from_href_with_int(self): + fixture = '45' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_int_url_query(self): + fixture = 'http://www.testsite.com/dir/45?asdf=jkl' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) - def test_get_id_from_href_int(self): - fixture = 1 - self.assertEqual(fixture, common.get_id_from_href(fixture)) + def test_get_id_from_href_with_uuid_url(self): + fixture = 'http://www.testsite.com/dir/abc123' + actual = common.get_id_from_href(fixture) + expected = "abc123" + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid_url_query(self): + fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl' + actual = common.get_id_from_href(fixture) + expected = "abc123" + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid(self): + fixture = 'abc123' + actual = common.get_id_from_href(fixture) + expected = 'abc123' + self.assertEqual(actual, expected) def test_get_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' @@ -314,7 +340,7 @@ class MetadataXMLDeserializationTest(test.TestCase): class MetadataXMLSerializationTest(test.TestCase): - def test_index(self): + def test_xml_declaration(self): serializer = common.MetadataXMLSerializer() fixture = { 'metadata': { @@ -322,17 +348,31 @@ class MetadataXMLSerializationTest(test.TestCase): 'three': 'four', }, } - output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - expected = minidom.parseString(""" - <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="three">four</meta> - <meta key="one">two</meta> - </metadata> - """.replace(" ", "").replace("\n", "")) + output = serializer.serialize(fixture, 'index') + print output + has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") + self.assertTrue(has_dec) - self.assertEqual(expected.toxml(), actual.toxml()) + def test_index(self): + serializer = common.MetadataXMLSerializer() + fixture = { + 'metadata': { + 'one': 'two', + 'three': 'four', + }, + } + output = serializer.serialize(fixture, 'index') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) def test_index_null(self): serializer = common.MetadataXMLSerializer() @@ -342,15 +382,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="None">None</meta> - </metadata> - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) def test_index_unicode(self): serializer = common.MetadataXMLSerializer() @@ -360,15 +401,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(u""" - <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="three">Jos\xe9</meta> - </metadata> - """.encode("UTF-8").replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(metadata_elem.text.strip(), meta_value) def test_show(self): serializer = common.MetadataXMLSerializer() @@ -378,14 +420,12 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <meta xmlns="http://docs.openstack.org/compute/api/v1.1" - key="one">two</meta> - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + meta_dict = fixture['meta'] + (meta_key, meta_value) = meta_dict.items()[0] + self.assertEqual(str(root.get('key')), str(meta_key)) + self.assertEqual(root.text.strip(), meta_value) def test_update_all(self): serializer = common.MetadataXMLSerializer() @@ -396,16 +436,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'update_all') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="key6">value6</meta> - <meta key="key4">value4</meta> - </metadata> - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) def test_update_item(self): serializer = common.MetadataXMLSerializer() @@ -415,14 +455,12 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'update') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <meta xmlns="http://docs.openstack.org/compute/api/v1.1" - key="one">two</meta> - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + meta_dict = fixture['meta'] + (meta_key, meta_value) = meta_dict.items()[0] + self.assertEqual(str(root.get('key')), str(meta_key)) + self.assertEqual(root.text.strip(), meta_value) def test_create(self): serializer = common.MetadataXMLSerializer() @@ -434,6 +472,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'create') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 3) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) actual = minidom.parseString(output.replace(" ", "")) expected = minidom.parseString(""" diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 31443242b..44f4eb055 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -87,6 +87,7 @@ class ExtensionControllerTest(test.TestCase): self.ext_list = [ "Createserverext", "FlavorExtraSpecs", + "FlavorExtraData", "Floating_ips", "Fox In Socks", "Hosts", diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 812bece42..348042bfe 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -17,16 +17,21 @@ import json import webob -import xml.dom.minidom as minidom +from lxml import etree from nova.api.openstack import flavors import nova.db.api from nova import exception from nova import test +from nova.api.openstack import xmlutil from nova.tests.api.openstack import fakes from nova import wsgi +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" + + def stub_flavor(flavorid, name, memory_mb="256", local_gb="10"): return { "flavorid": str(flavorid), @@ -107,12 +112,20 @@ class FlavorsTest(test.TestCase): "name": "flavor 1", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", }, { "id": "2", "name": "flavor 2", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", }, ] self.assertEqual(flavors, expected) @@ -127,6 +140,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 12", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", } self.assertEqual(flavor, expected) @@ -149,6 +166,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 12", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -216,6 +237,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 1", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -232,6 +257,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 2", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -262,15 +291,50 @@ class FlavorsTest(test.TestCase): class FlavorsXMLSerializationTest(test.TestCase): + def test_xml_declaration(self): + serializer = flavors.FlavorXMLSerializer() + + fixture = { + "flavor": { + "id": "12", + "name": "asdf", + "ram": "256", + "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/fake/flavors/12", + }, + { + "rel": "bookmark", + "href": "http://localhost/fake/flavors/12", + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + print output + has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") + self.assertTrue(has_dec) + def test_show(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavor": { "id": "12", "name": "asdf", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -284,34 +348,34 @@ class FlavorsXMLSerializationTest(test.TestCase): }, } - output = serializer.serialize(input, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <flavor xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - id="12" - name="asdf" - ram="256" - disk="10"> - <atom:link href="http://localhost/v1.1/fake/flavors/12" - rel="self"/> - <atom:link href="http://localhost/fake/flavors/12" - rel="bookmark"/> - </flavor> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'show') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavor') + flavor_dict = fixture['flavor'] + + for key in ['name', 'id', 'ram', 'disk']: + self.assertEqual(root.get(key), str(flavor_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_handles_integers(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavor": { "id": 12, "name": "asdf", "ram": 256, "disk": 10, + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -325,35 +389,35 @@ class FlavorsXMLSerializationTest(test.TestCase): }, } - output = serializer.serialize(input, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <flavor xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - id="12" - name="asdf" - ram="256" - disk="10"> - <atom:link href="http://localhost/v1.1/fake/flavors/12" - rel="self"/> - <atom:link href="http://localhost/fake/flavors/12" - rel="bookmark"/> - </flavor> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'show') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavor') + flavor_dict = fixture['flavor'] + + for key in ['name', 'id', 'ram', 'disk']: + self.assertEqual(root.get(key), str(flavor_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_detail(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavors": [ { "id": "23", "name": "flavor 23", "ram": "512", "disk": "20", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -369,6 +433,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "flavor 13", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -383,45 +451,38 @@ class FlavorsXMLSerializationTest(test.TestCase): ], } - output = serializer.serialize(input, 'detail') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <flavors xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom"> - <flavor id="23" - name="flavor 23" - ram="512" - disk="20"> - <atom:link href="http://localhost/v1.1/fake/flavors/23" - rel="self"/> - <atom:link href="http://localhost/fake/flavors/23" - rel="bookmark"/> - </flavor> - <flavor id="13" - name="flavor 13" - ram="256" - disk="10"> - <atom:link href="http://localhost/v1.1/fake/flavors/13" - rel="self"/> - <atom:link href="http://localhost/fake/flavors/13" - rel="bookmark"/> - </flavor> - </flavors> - """.replace(" ", "") % locals()) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'detail') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavors') + flavor_elems = root.findall('{0}flavor'.format(NS)) + self.assertEqual(len(flavor_elems), 2) + for i, flavor_elem in enumerate(flavor_elems): + flavor_dict = fixture['flavors'][i] + + for key in ['name', 'id', 'ram', 'disk']: + self.assertEqual(flavor_elem.get(key), str(flavor_dict[key])) + + link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_index(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavors": [ { "id": "23", "name": "flavor 23", "ram": "512", "disk": "20", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -437,6 +498,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "flavor 13", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -451,42 +516,34 @@ class FlavorsXMLSerializationTest(test.TestCase): ], } - output = serializer.serialize(input, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <flavors xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom"> - <flavor id="23" name="flavor 23"> - <atom:link href="http://localhost/v1.1/fake/flavors/23" - rel="self"/> - <atom:link href="http://localhost/fake/flavors/23" - rel="bookmark"/> - </flavor> - <flavor id="13" name="flavor 13"> - <atom:link href="http://localhost/v1.1/fake/flavors/13" - rel="self"/> - <atom:link href="http://localhost/fake/flavors/13" - rel="bookmark"/> - </flavor> - </flavors> - """.replace(" ", "") % locals()) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'index') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavors_index') + flavor_elems = root.findall('{0}flavor'.format(NS)) + self.assertEqual(len(flavor_elems), 2) + for i, flavor_elem in enumerate(flavor_elems): + flavor_dict = fixture['flavors'][i] + + for key in ['name', 'id']: + self.assertEqual(flavor_elem.get(key), str(flavor_dict[key])) + + link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_index_empty(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavors": [], } - output = serializer.serialize(input, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <flavors xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" /> - """.replace(" ", "") % locals()) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'index') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavors_index') + flavor_elems = root.findall('{0}flavor'.format(NS)) + self.assertEqual(len(flavor_elems), 0) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 7759b52ef..e5fd4764a 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -24,6 +24,7 @@ import copy import json import xml.dom.minidom as minidom +from lxml import etree import mox import stubout import webob @@ -31,10 +32,13 @@ import webob from nova import context import nova.api.openstack from nova.api.openstack import images +from nova.api.openstack import xmlutil from nova import test from nova.tests.api.openstack import fakes +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" NOW_API_FORMAT = "2010-10-11T10:30:22Z" @@ -73,14 +77,14 @@ class ImagesTest(test.TestCase): response_dict = json.loads(response.body) response_list = response_dict["images"] - expected = [{'id': '123', 'name': 'public image'}, - {'id': '124', 'name': 'queued snapshot'}, - {'id': '125', 'name': 'saving snapshot'}, - {'id': '126', 'name': 'active snapshot'}, - {'id': '127', 'name': 'killed snapshot'}, - {'id': '128', 'name': 'deleted snapshot'}, - {'id': '129', 'name': 'pending_delete snapshot'}, - {'id': '130', 'name': None}] + expected = [{'id': 123, 'name': 'public image'}, + {'id': 124, 'name': 'queued snapshot'}, + {'id': 125, 'name': 'saving snapshot'}, + {'id': 126, 'name': 'active snapshot'}, + {'id': 127, 'name': 'killed snapshot'}, + {'id': 128, 'name': 'deleted snapshot'}, + {'id': 129, 'name': 'pending_delete snapshot'}, + {'id': 130, 'name': None}] self.assertDictListMatch(response_list, expected) @@ -95,7 +99,7 @@ class ImagesTest(test.TestCase): expected_image = { "image": { - "id": "123", + "id": 123, "name": "public image", "updated": NOW_API_FORMAT, "created": NOW_API_FORMAT, @@ -127,7 +131,7 @@ class ImagesTest(test.TestCase): "status": "SAVING", "progress": 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -220,12 +224,10 @@ class ImagesTest(test.TestCase): expected = minidom.parseString(""" <itemNotFound code="404" - xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> - <message> - Image not found. - </message> + xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + <message>Image not found.</message> </itemNotFound> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) actual = minidom.parseString(response.body.replace(" ", "")) @@ -257,12 +259,10 @@ class ImagesTest(test.TestCase): # because the element hasn't changed definition expected = minidom.parseString(""" <itemNotFound code="404" - xmlns="http://docs.openstack.org/compute/api/v1.1"> - <message> - Image not found. - </message> + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <message>Image not found.</message> </itemNotFound> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) actual = minidom.parseString(response.body.replace(" ", "")) @@ -402,7 +402,7 @@ class ImagesTest(test.TestCase): response_list = response_dict["images"] expected = [{ - 'id': '123', + 'id': 123, 'name': 'public image', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -410,7 +410,7 @@ class ImagesTest(test.TestCase): 'progress': 100, }, { - 'id': '124', + 'id': 124, 'name': 'queued snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -418,7 +418,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '125', + 'id': 125, 'name': 'saving snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -426,7 +426,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '126', + 'id': 126, 'name': 'active snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -434,7 +434,7 @@ class ImagesTest(test.TestCase): 'progress': 100, }, { - 'id': '127', + 'id': 127, 'name': 'killed snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -442,7 +442,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '128', + 'id': 128, 'name': 'deleted snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -450,7 +450,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '129', + 'id': 129, 'name': 'pending_delete snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -458,7 +458,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '130', + 'id': 130, 'name': None, 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -507,7 +507,7 @@ class ImagesTest(test.TestCase): 'status': 'SAVING', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -538,7 +538,7 @@ class ImagesTest(test.TestCase): 'status': 'SAVING', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -569,7 +569,7 @@ class ImagesTest(test.TestCase): 'status': 'ACTIVE', 'progress': 100, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -600,7 +600,7 @@ class ImagesTest(test.TestCase): 'status': 'ERROR', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -631,7 +631,7 @@ class ImagesTest(test.TestCase): 'status': 'DELETED', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -662,7 +662,7 @@ class ImagesTest(test.TestCase): 'status': 'DELETED', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -910,7 +910,7 @@ class ImagesTest(test.TestCase): app = fakes.wsgi_app(fake_auth_context=self._get_fake_context()) res = req.get_response(app) image_meta = json.loads(res.body)['image'] - expected = {'id': '123', 'name': 'public image', + expected = {'id': 123, 'name': 'public image', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, 'status': 'ACTIVE', 'progress': 100} @@ -972,7 +972,7 @@ class ImageXMLSerializationTest(test.TestCase): IMAGE_HREF = 'http://localhost/v1.1/fake/images/%s' IMAGE_BOOKMARK = 'http://localhost/fake/images/%s' - def test_show(self): + def test_xml_declaration(self): serializer = images.ImageXMLSerializer() fixture = { @@ -984,7 +984,7 @@ class ImageXMLSerializationTest(test.TestCase): 'status': 'ACTIVE', 'progress': 80, 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1013,37 +1013,80 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) + print output + has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") + self.assertTrue(has_dec) - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <image id="1" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE" - progress="80"> - <server id="1"> - <atom:link rel="self" href="%(expected_server_href)s"/> - <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> - </server> - <metadata> - <meta key="key1"> - value1 - </meta> - </metadata> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - """.replace(" ", "") % (locals())) + def test_show(self): + serializer = images.ImageXMLSerializer() - self.assertEqual(expected.toxml(), actual.toxml()) + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'progress': 80, + 'server': { + 'id': '1', + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status', 'progress']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = image_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root.get('id'), image_dict['server']['id']) + link_nodes = server_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['server']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_zero_metadata(self): serializer = images.ImageXMLSerializer() @@ -1056,7 +1099,7 @@ class ImageXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, 'status': 'ACTIVE', 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1083,31 +1126,31 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <image id="1" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE"> - <server id="1"> - <atom:link rel="self" href="%(expected_server_href)s"/> - <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> - </server> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + meta_nodes = root.findall('{0}meta'.format(ATOMNS)) + self.assertEqual(len(meta_nodes), 0) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root.get('id'), image_dict['server']['id']) + link_nodes = server_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['server']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_image_no_metadata_key(self): serializer = images.ImageXMLSerializer() @@ -1120,7 +1163,7 @@ class ImageXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, 'status': 'ACTIVE', 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1146,31 +1189,31 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <image id="1" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE"> - <server id="1"> - <atom:link rel="self" href="%(expected_server_href)s"/> - <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> - </server> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + meta_nodes = root.findall('{0}meta'.format(ATOMNS)) + self.assertEqual(len(meta_nodes), 0) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root.get('id'), image_dict['server']['id']) + link_nodes = server_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['server']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_no_server(self): serializer = images.ImageXMLSerializer() @@ -1199,30 +1242,30 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <image id="1" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE"> - <metadata> - <meta key="key1"> - value1 - </meta> - </metadata> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = image_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root, None) def test_index(self): serializer = images.ImageXMLSerializer() @@ -1237,6 +1280,10 @@ class ImageXMLSerializationTest(test.TestCase): 'href': self.IMAGE_HREF % 1, 'rel': 'self', }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, ], }, { @@ -1247,35 +1294,32 @@ class ImageXMLSerializationTest(test.TestCase): 'href': self.IMAGE_HREF % 2, 'rel': 'self', }, + { + 'href': self.IMAGE_BOOKMARK % 2, + 'rel': 'bookmark', + }, ], }, ] } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_href_two = self.IMAGE_HREF % 2 - expected_bookmark_two = self.IMAGE_BOOKMARK % 2 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <images - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom"> - <image id="1" name="Image1"> - <atom:link href="%(expected_href)s" rel="self"/> - </image> - <image id="2" name="Image2"> - <atom:link href="%(expected_href_two)s" rel="self"/> - </image> - </images> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'images_index') + image_elems = root.findall('{0}image'.format(NS)) + self.assertEqual(len(image_elems), 2) + for i, image_elem in enumerate(image_elems): + image_dict = fixture['images'][i] + + for key in ['name', 'id']: + self.assertEqual(image_elem.get(key), str(image_dict[key])) + + link_nodes = image_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_index_zero_images(self): serializer = images.ImageXMLSerializer() @@ -1285,15 +1329,11 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixtures, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <images - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" /> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'images_index') + image_elems = root.findall('{0}image'.format(NS)) + self.assertEqual(len(image_elems), 0) def test_detail(self): serializer = images.ImageXMLSerializer() @@ -1307,7 +1347,7 @@ class ImageXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, 'status': 'ACTIVE', 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1331,7 +1371,7 @@ class ImageXMLSerializationTest(test.TestCase): ], }, { - 'id': 2, + 'id': '2', 'name': 'Image2', 'created': self.TIMESTAMP, 'updated': self.TIMESTAMP, @@ -1355,46 +1395,22 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'detail') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_href_two = self.IMAGE_HREF % 2 - expected_bookmark_two = self.IMAGE_BOOKMARK % 2 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <images - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom"> - <image id="1" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE"> - <server id="1"> - <atom:link rel="self" href="%(expected_server_href)s"/> - <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> - </server> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - <image id="2" - name="Image2" - updated="%(expected_now)s" - created="%(expected_now)s" - status="SAVING" - progress="80"> - <metadata> - <meta key="key1"> - value1 - </meta> - </metadata> - <atom:link href="%(expected_href_two)s" rel="self"/> - <atom:link href="%(expected_bookmark_two)s" rel="bookmark"/> - </image> - </images> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'images') + image_elems = root.findall('{0}image'.format(NS)) + self.assertEqual(len(image_elems), 2) + for i, image_elem in enumerate(image_elems): + image_dict = fixture['images'][i] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(image_elem.get(key), str(image_dict[key])) + + link_nodes = image_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = image_elem.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 801b06230..6f0210c27 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -19,6 +19,7 @@ Tests dealing with HTTP rate-limiting. import httplib import json +from lxml import etree import StringIO import stubout import time @@ -29,6 +30,7 @@ from xml.dom import minidom import nova.context from nova.api.openstack import limits from nova.api.openstack import views +from nova.api.openstack import xmlutil from nova import test @@ -39,6 +41,10 @@ TEST_LIMITS = [ limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE), ] +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/compute/api/v1.1' +} class BaseLimitTestSuite(unittest.TestCase): @@ -168,12 +174,11 @@ class LimitsControllerV10Test(BaseLimitTestSuite): response = request.get_response(self.controller) expected = minidom.parseString(""" - <limits - xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + <limits xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> <rate/> <absolute/> </limits> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) body = minidom.parseString(response.body.replace(" ", "")) @@ -186,17 +191,16 @@ class LimitsControllerV10Test(BaseLimitTestSuite): response = request.get_response(self.controller) expected = minidom.parseString(""" - <limits - xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + <limits xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> <rate> <limit URI="*" regex=".*" remaining="10" resetTime="0" - unit="MINUTE" value="10" verb="GET"/> + unit="MINUTE" value="10" verb="GET"/> <limit URI="*" regex=".*" remaining="5" resetTime="0" - unit="HOUR" value="5" verb="POST"/> + unit="HOUR" value="5" verb="POST"/> </rate> <absolute/> </limits> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) body = minidom.parseString(response.body.replace(" ", "")) self.assertEqual(expected.toxml(), body.toxml()) @@ -980,9 +984,22 @@ class LimitsXMLSerializationTest(test.TestCase): def tearDown(self): pass - def test_index(self): + def test_xml_declaration(self): serializer = limits.LimitsXMLSerializer() + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture, 'index') + print output + has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") + self.assertTrue(has_dec) + + def test_index(self): + serializer = limits.LimitsXMLSerializer() + fixture = { + "limits": { "rate": [{ "uri": "*", "regex": ".*", @@ -1006,32 +1023,32 @@ class LimitsXMLSerializationTest(test.TestCase): "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <limits xmlns="http://docs.openstack.org/compute/api/v1.1"> - <rates> - <rate uri="*" regex=".*"> - <limit value="10" verb="POST" remaining="2" - unit="MINUTE" - next-available="2011-12-15T22:42:45Z"/> - </rate> - <rate uri="*/servers" regex="^/servers"> - <limit value="50" verb="POST" remaining="10" - unit="DAY" - next-available="2011-12-15T22:42:45Z"/> - </rate> - </rates> - <absolute> - <limit name="maxServerMeta" value="1"/> - <limit name="maxPersonality" value="5"/> - <limit name="maxImageMeta" value="1"/> - <limit name="maxPersonalitySize" value="10240"/> - </absolute> - </limits> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 4) + for limit in absolutes: + name = limit.get('name') + value = limit.get('value') + self.assertEqual(value, str(fixture['limits']['absolute'][name])) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 2) + for i, rate in enumerate(rates): + for key in ['uri', 'regex']: + self.assertEqual(rate.get(key), + str(fixture['limits']['rate'][i][key])) + rate_limits = rate.xpath('ns:limit', namespaces=NS) + self.assertEqual(len(rate_limits), 1) + for j, limit in enumerate(rate_limits): + for key in ['verb', 'value', 'remaining', 'unit', + 'next-available']: + self.assertEqual(limit.get(key), + str(fixture['limits']['rate'][i]['limit'][j][key])) def test_index_no_limits(self): serializer = limits.LimitsXMLSerializer() @@ -1041,13 +1058,14 @@ class LimitsXMLSerializationTest(test.TestCase): "absolute": {}}} output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') - expected = minidom.parseString(""" - <limits xmlns="http://docs.openstack.org/compute/api/v1.1"> - <rates /> - <absolute /> - </limits> - """.replace(" ", "")) + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 0) - self.assertEqual(expected.toxml(), actual.toxml()) + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 0) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index f6e45e9c7..251b5d126 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -623,7 +623,8 @@ class ServerActionsTestV11(test.TestCase): self.assertEqual(res.status_int, 202) body = json.loads(res.body) self.assertEqual(body['server']['image']['id'], '2') - self.assertEqual(len(body['server']['adminPass']), 16) + self.assertEqual(len(body['server']['adminPass']), + FLAGS.password_length) def test_server_rebuild_rejected_when_building(self): body = { diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index f654bf209..f7e08118f 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -28,6 +28,7 @@ import webob from nova import context from nova import db from nova import exception +from nova import flags from nova import test from nova import utils import nova.api.openstack @@ -49,9 +50,14 @@ from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes +FLAGS = flags.FLAGS FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" +XPATH_NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/compute/api/v1.1' +} def fake_gen_uuid(): @@ -413,12 +419,7 @@ class ServersTest(test.TestCase): def test_get_server_by_id_v1_1_xml(self): image_bookmark = "http://localhost/fake/images/10" - flavor_ref = "http://localhost/v1.1/fake/flavors/1" - flavor_id = "1" flavor_bookmark = "http://localhost/fake/flavors/1" - server_href = "http://localhost/v1.1/fake/servers/1" - server_bookmark = "http://localhost/fake/servers/1" - public_ip = '192.168.0.3' private_ip = '172.19.0.1' interfaces = [ @@ -442,50 +443,88 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.1/fake/servers/1') req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) - actual = minidom.parseString(res.body.replace(' ', '')) - expected_uuid = FAKE_UUID - expected_updated = "2010-11-11T11:00:00Z" - expected_created = "2010-10-10T12:00:00Z" - expected = minidom.parseString(""" - <server id="1" - uuid="%(expected_uuid)s" - userId="fake" - tenantId="fake" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="server1" - updated="%(expected_updated)s" - created="%(expected_created)s" - hostId="" - status="BUILD" - accessIPv4="" - accessIPv6="" - progress="0"> - <atom:link href="%(server_href)s" rel="self"/> - <atom:link href="%(server_bookmark)s" rel="bookmark"/> - <image id="10"> - <atom:link rel="bookmark" href="%(image_bookmark)s"/> - </image> - <flavor id="1"> - <atom:link rel="bookmark" href="%(flavor_bookmark)s"/> - </flavor> - <metadata> - <meta key="seq"> - 1 - </meta> - </metadata> - <addresses> - <network id="public"> - <ip version="4" addr="%(public_ip)s"/> - </network> - <network id="private"> - <ip version="4" addr="%(private_ip)s"/> - </network> - </addresses> - </server> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = res.body + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'server') + + expected = { + 'id': 1, + 'uuid': FAKE_UUID, + 'user_id': 'fake', + 'tenant_id': 'fake', + 'updated': '2010-11-11T11:00:00Z', + 'created': '2010-10-10T12:00:00Z', + 'progress': 0, + 'name': 'server1', + 'status': 'BUILD', + 'accessIPv4': '', + 'accessIPv6': '', + 'hostId': '', + 'key_name': '', + 'image': { + 'id': '10', + 'links': [{'rel': 'bookmark', 'href': image_bookmark}], + }, + 'flavor': { + 'id': '1', + 'links': [{'rel': 'bookmark', 'href': flavor_bookmark}], + }, + 'addresses': { + 'public': [{'version': 4, 'addr': public_ip}], + 'private': [{'version': 4, 'addr': private_ip}], + }, + 'metadata': {'seq': '1'}, + 'config_drive': None, + 'links': [ + { + 'rel': 'self', + 'href': 'http://localhost/v1.1/fake/servers/1', + }, + { + 'rel': 'bookmark', + 'href': 'http://localhost/fake/servers/1', + }, + ], + } + + self.assertTrue(root.xpath('/ns:server', namespaces=XPATH_NS)) + for key in ['id', 'uuid', 'created', 'progress', 'name', 'status', + 'accessIPv4', 'accessIPv6', 'hostId']: + self.assertEqual(root.get(key), str(expected[key])) + self.assertEqual(root.get('userId'), str(expected['user_id'])) + self.assertEqual(root.get('tenantId'), str(expected['tenant_id'])) + + (image,) = root.xpath('ns:image', namespaces=XPATH_NS) + self.assertEqual(image.get('id'), str(expected['image']['id'])) + + links = root.xpath('ns:image/atom:link', namespaces=XPATH_NS) + self.assertTrue(common.compare_links(links, + expected['image']['links'])) + + (flavor,) = root.xpath('ns:flavor', namespaces=XPATH_NS) + self.assertEqual(flavor.get('id'), str(expected['flavor']['id'])) + + (meta,) = root.xpath('ns:metadata/ns:meta', namespaces=XPATH_NS) + self.assertEqual(meta.get('key'), 'seq') + self.assertEqual(meta.text, '1') + + (pub_network, priv_network) = root.xpath('ns:addresses/ns:network', + namespaces=XPATH_NS) + self.assertEqual(pub_network.get('id'), 'public') + (pub_ip,) = pub_network.xpath('ns:ip', namespaces=XPATH_NS) + (priv_ip,) = priv_network.xpath('ns:ip', namespaces=XPATH_NS) + self.assertEqual(pub_ip.get('version'), + str(expected['addresses']['public'][0]['version'])) + self.assertEqual(pub_ip.get('addr'), + str(expected['addresses']['public'][0]['addr'])) + self.assertEqual(priv_ip.get('version'), + str(expected['addresses']['private'][0]['version'])) + self.assertEqual(priv_ip.get('addr'), + str(expected['addresses']['private'][0]['addr'])) + + links = root.xpath('atom:link', namespaces=XPATH_NS) + self.assertTrue(common.compare_links(links, expected['links'])) def test_get_server_with_active_status_by_id_v1_1(self): image_bookmark = "http://localhost/fake/images/10" @@ -1541,7 +1580,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) self.assertEqual(2, server['flavorId']) @@ -1742,7 +1781,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) self.assertEqual(1, server['id']) self.assertEqual(0, server['progress']) self.assertEqual('server_test', server['name']) @@ -1802,7 +1841,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) self.assertEqual(1, server['id']) self.assertEqual("BUILD", server["status"]) self.assertEqual(0, server['progress']) @@ -2513,9 +2552,8 @@ class ServersTest(test.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) - def test_rescue_accepted(self): + def test_rescue_generates_password(self): self.flags(allow_admin_api=True) - body = {} self.called = False @@ -2530,7 +2568,33 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(self.called, True) - self.assertEqual(res.status_int, 202) + self.assertEqual(res.status_int, 200) + res_body = json.loads(res.body) + self.assertTrue('adminPass' in res_body) + self.assertEqual(FLAGS.password_length, len(res_body['adminPass'])) + + def test_rescue_with_preset_password(self): + self.flags(allow_admin_api=True) + + self.called = False + + def rescue_mock(*args, **kwargs): + self.called = True + + self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock) + req = webob.Request.blank('/v1.0/servers/1/rescue') + req.method = 'POST' + body = {"rescue": {"adminPass": "AABBCC112233"}} + req.body = json.dumps(body) + req.content_type = 'application/json' + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(self.called, True) + self.assertEqual(res.status_int, 200) + res_body = json.loads(res.body) + self.assertTrue('adminPass' in res_body) + self.assertEqual('AABBCC112233', res_body['adminPass']) def test_rescue_raises_handled(self): self.flags(allow_admin_api=True) @@ -3288,7 +3352,7 @@ class TestAddressesXMLSerialization(test.TestCase): serializer = nova.api.openstack.ips.IPXMLSerializer() - def test_show(self): + def test_xml_declaration(self): fixture = { 'network_2': [ {'addr': '192.168.0.1', 'version': 4}, @@ -3296,17 +3360,29 @@ class TestAddressesXMLSerialization(test.TestCase): ], } output = self.serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <network xmlns="http://docs.openstack.org/compute/api/v1.1" - id="network_2"> - <ip version="4" addr="192.168.0.1"/> - <ip version="6" addr="fe80::beef"/> - </network> - """.replace(" ", "")) + print output + has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") + self.assertTrue(has_dec) - self.assertEqual(expected.toxml(), actual.toxml()) + def test_show(self): + fixture = { + 'network_2': [ + {'addr': '192.168.0.1', 'version': 4}, + {'addr': 'fe80::beef', 'version': 6}, + ], + } + output = self.serializer.serialize(fixture, 'show') + print output + root = etree.XML(output) + network = fixture['network_2'] + self.assertEqual(str(root.get('id')), 'network_2') + ip_elems = root.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) def test_index(self): fixture = { @@ -3322,22 +3398,22 @@ class TestAddressesXMLSerialization(test.TestCase): }, } output = self.serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <addresses xmlns="http://docs.openstack.org/compute/api/v1.1"> - <network id="network_2"> - <ip version="4" addr="192.168.0.1"/> - <ip version="6" addr="fe80::beef"/> - </network> - <network id="network_1"> - <ip version="4" addr="192.168.0.3"/> - <ip version="4" addr="192.168.0.5"/> - </network> - </addresses> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'addresses') + addresses_dict = fixture['addresses'] + network_elems = root.findall('{0}network'.format(NS)) + self.assertEqual(len(network_elems), 2) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) class TestServerInstanceCreation(test.TestCase): @@ -3575,7 +3651,8 @@ class TestServerInstanceCreation(test.TestCase): self.assertEquals(response.status_int, 202) response = json.loads(response.body) self.assertTrue('adminPass' in response['server']) - self.assertEqual(16, len(response['server']['adminPass'])) + self.assertEqual(FLAGS.password_length, + len(response['server']['adminPass'])) def test_create_instance_admin_pass_xml(self): request, response, dummy = \ @@ -3584,7 +3661,8 @@ class TestServerInstanceCreation(test.TestCase): dom = minidom.parseString(response.body) server = dom.childNodes[0] self.assertEquals(server.nodeName, 'server') - self.assertEqual(16, len(server.getAttribute('adminPass'))) + self.assertEqual(FLAGS.password_length, + len(server.getAttribute('adminPass'))) class TestGetKernelRamdiskFromImage(test.TestCase): @@ -4064,6 +4142,85 @@ class ServerXMLSerializationTest(test.TestCase): self.maxDiff = None test.TestCase.setUp(self) + def test_xml_declaration(self): + serializer = servers.ServerXMLSerializer() + + fixture = { + "server": { + 'id': 1, + 'uuid': FAKE_UUID, + 'user_id': 'fake_user_id', + 'tenant_id': 'fake_tenant_id', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": self.IMAGE_BOOKMARK, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": self.FLAVOR_BOOKMARK, + }, + ], + }, + "addresses": { + "network_one": [ + { + "version": 4, + "addr": "67.23.10.138", + }, + { + "version": 6, + "addr": "::babe:67.23.10.138", + }, + ], + "network_two": [ + { + "version": 4, + "addr": "67.23.10.139", + }, + { + "version": 6, + "addr": "::babe:67.23.10.139", + }, + ], + }, + "metadata": { + "Open": "Stack", + "Number": "1", + }, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + } + } + + output = serializer.serialize(fixture, 'show') + print output + has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") + self.assertTrue(has_dec) + def test_show(self): serializer = servers.ServerXMLSerializer() diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 1269f13c9..f69dbd316 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -15,19 +15,24 @@ # License for the specific language governing permissions and limitations # under the License. +import feedparser import json import stubout import webob -import xml.etree.ElementTree - +from lxml import etree from nova import context from nova import test -from nova.tests.api.openstack import fakes from nova.api.openstack import versions from nova.api.openstack import views from nova.api.openstack import wsgi +from nova.tests.api.openstack import common +from nova.tests.api.openstack import fakes +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/compute/api/v1.1' +} VERSIONS = { "v1.0": { "id": "v1.0", @@ -113,23 +118,23 @@ class VersionsTest(test.TestCase): versions = json.loads(res.body)["versions"] expected = [ { - "id": "v1.1", - "status": "CURRENT", + "id": "v1.0", + "status": "DEPRECATED", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.1/", + "href": "http://localhost/v1.0/", }], }, { - "id": "v1.0", - "status": "DEPRECATED", + "id": "v1.1", + "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.0/", + "href": "http://localhost/v1.1/", }], }, ] @@ -233,48 +238,20 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - root = xml.etree.ElementTree.XML(res.body) - self.assertEqual(root.tag.split('}')[1], "version") - self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - children = list(root) - media_types = children[0] - media_type_nodes = list(media_types) - links = (children[1], children[2], children[3]) - - self.assertEqual(media_types.tag.split('}')[1], 'media-types') - for media_node in media_type_nodes: - self.assertEqual(media_node.tag.split('}')[1], 'media-type') - - expected = """ - <version id="v1.0" status="DEPRECATED" - updated="2011-01-21T11:33:21Z" - xmlns="%s" - xmlns:atom="http://www.w3.org/2005/Atom"> - - <media-types> - <media-type base="application/xml" - type="application/vnd.openstack.compute-v1.0+xml"/> - <media-type base="application/json" - type="application/vnd.openstack.compute-v1.0+json"/> - </media-types> - - <atom:link href="http://localhost/v1.0/" - rel="self"/> - - <atom:link href="http://docs.rackspacecloud.com/servers/ - api/v1.0/cs-devguide-20110125.pdf" - rel="describedby" - type="application/pdf"/> - - <atom:link href="http://docs.rackspacecloud.com/servers/ - api/v1.0/application.wadl" - rel="describedby" - type="application/vnd.sun.wadl+xml"/> - </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + version = etree.XML(res.body) + expected = VERSIONS['v1.0'] + self.assertTrue(version.xpath('/ns:version', namespaces=NS)) + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(common.compare_media_types(media_types, + expected['media-types'])) + for key in ['id', 'status', 'updated']: + self.assertEqual(version.get(key), expected[key]) + links = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.0/'}] + + expected['links'])) def test_get_version_1_1_detail_xml(self): req = webob.Request.blank('/v1.1/') @@ -282,35 +259,20 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - expected = """ - <version id="v1.1" status="CURRENT" - updated="2011-01-21T11:33:21Z" - xmlns="%s" - xmlns:atom="http://www.w3.org/2005/Atom"> - - <media-types> - <media-type base="application/xml" - type="application/vnd.openstack.compute-v1.1+xml"/> - <media-type base="application/json" - type="application/vnd.openstack.compute-v1.1+json"/> - </media-types> - - <atom:link href="http://localhost/v1.1/" - rel="self"/> - - <atom:link href="http://docs.rackspacecloud.com/servers/ - api/v1.1/cs-devguide-20110125.pdf" - rel="describedby" - type="application/pdf"/> - - <atom:link href="http://docs.rackspacecloud.com/servers/ - api/v1.1/application.wadl" - rel="describedby" - type="application/vnd.sun.wadl+xml"/> - </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + + version = etree.XML(res.body) + expected = VERSIONS['v1.1'] + self.assertTrue(version.xpath('/ns:version', namespaces=NS)) + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(common.compare_media_types(media_types, + expected['media-types'])) + for key in ['id', 'status', 'updated']: + self.assertEqual(version.get(key), expected[key]) + links = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.1/'}] + + expected['links'])) def test_get_version_list_xml(self): req = webob.Request.blank('/') @@ -319,21 +281,19 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - expected = """ - <versions xmlns="%s" xmlns:atom="%s"> - <version id="v1.1" status="CURRENT" updated="2011-01-21T11:33:21Z"> - <atom:link href="http://localhost/v1.1/" rel="self"/> - </version> - <version id="v1.0" status="DEPRECATED" - updated="2011-01-21T11:33:21Z"> - <atom:link href="http://localhost/v1.0/" rel="self"/> - </version> - </versions>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, - wsgi.XMLNS_ATOM) + root = etree.XML(res.body) + self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) + versions = root.xpath('ns:version', namespaces=NS) + self.assertEqual(len(versions), 2) - actual = res.body.replace(" ", "").replace("\n", "") - - self.assertEqual(expected, actual) + for i, v in enumerate(['v1.0', 'v1.1']): + version = versions[i] + expected = VERSIONS[v] + for key in ['id', 'status', 'updated']: + self.assertEqual(version.get(key), expected[key]) + (link,) = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(link, + [{'rel': 'self', 'href': 'http://localhost/%s/' % v}])) def test_get_version_1_0_detail_atom(self): req = webob.Request.blank('/v1.0/') @@ -341,36 +301,38 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual("application/atom+xml", res.content_type) - expected = """ - <feed xmlns="http://www.w3.org/2005/Atom"> - <title type="text">About This Version</title> - <updated>2011-01-21T11:33:21Z</updated> - <id>http://localhost/v1.0/</id> - <author> - <name>Rackspace</name> - <uri>http://www.rackspace.com/</uri> - </author> - <link href="http://localhost/v1.0/" rel="self"/> - <entry> - <id>http://localhost/v1.0/</id> - <title type="text">Version v1.0</title> - <updated>2011-01-21T11:33:21Z</updated> - <link href="http://localhost/v1.0/" - rel="self"/> - <link href="http://docs.rackspacecloud.com/servers/ - api/v1.0/cs-devguide-20110125.pdf" - rel="describedby" type="application/pdf"/> - <link href="http://docs.rackspacecloud.com/servers/ - api/v1.0/application.wadl" - rel="describedby" type="application/vnd.sun.wadl+xml"/> - <content type="text"> - Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) - </content> - </entry> - </feed>""".replace(" ", "").replace("\n", "") - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + + f = feedparser.parse(res.body) + self.assertEqual(f.feed.title, 'About This Version') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/v1.0/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.0/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.0/') + self.assertEqual(entry.title, 'Version v1.0') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)') + self.assertEqual(len(entry.links), 3) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.0/') + self.assertEqual(entry.links[0]['rel'], 'self') + self.assertEqual(entry.links[1], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ + 'cs-devguide-20110125.pdf', + 'type': 'application/pdf', + 'rel': 'describedby'}) + self.assertEqual(entry.links[2], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ + 'application.wadl', + 'type': 'application/vnd.sun.wadl+xml', + 'rel': 'describedby'}) def test_get_version_1_1_detail_atom(self): req = webob.Request.blank('/v1.1/') @@ -378,36 +340,38 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual("application/atom+xml", res.content_type) - expected = """ - <feed xmlns="http://www.w3.org/2005/Atom"> - <title type="text">About This Version</title> - <updated>2011-01-21T11:33:21Z</updated> - <id>http://localhost/v1.1/</id> - <author> - <name>Rackspace</name> - <uri>http://www.rackspace.com/</uri> - </author> - <link href="http://localhost/v1.1/" rel="self"/> - <entry> - <id>http://localhost/v1.1/</id> - <title type="text">Version v1.1</title> - <updated>2011-01-21T11:33:21Z</updated> - <link href="http://localhost/v1.1/" - rel="self"/> - <link href="http://docs.rackspacecloud.com/servers/ - api/v1.1/cs-devguide-20110125.pdf" - rel="describedby" type="application/pdf"/> - <link href="http://docs.rackspacecloud.com/servers/ - api/v1.1/application.wadl" - rel="describedby" type="application/vnd.sun.wadl+xml"/> - <content type="text"> - Version v1.1 CURRENT (2011-01-21T11:33:21Z) - </content> - </entry> - </feed>""".replace(" ", "").replace("\n", "") - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + + f = feedparser.parse(res.body) + self.assertEqual(f.feed.title, 'About This Version') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/v1.1/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.1/') + self.assertEqual(entry.title, 'Version v1.1') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') + self.assertEqual(len(entry.links), 3) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(entry.links[0]['rel'], 'self') + self.assertEqual(entry.links[1], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ + 'cs-devguide-20110125.pdf', + 'type': 'application/pdf', + 'rel': 'describedby'}) + self.assertEqual(entry.links[2], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ + 'application.wadl', + 'type': 'application/vnd.sun.wadl+xml', + 'rel': 'describedby'}) def test_get_version_list_atom(self): req = webob.Request.blank('/') @@ -416,40 +380,37 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/atom+xml") - expected = """ - <feed xmlns="http://www.w3.org/2005/Atom"> - <title type="text">Available API Versions</title> - <updated>2011-01-21T11:33:21Z</updated> - <id>http://localhost/</id> - <author> - <name>Rackspace</name> - <uri>http://www.rackspace.com/</uri> - </author> - <link href="http://localhost/" rel="self"/> - <entry> - <id>http://localhost/v1.1/</id> - <title type="text">Version v1.1</title> - <updated>2011-01-21T11:33:21Z</updated> - <link href="http://localhost/v1.1/" rel="self"/> - <content type="text"> - Version v1.1 CURRENT (2011-01-21T11:33:21Z) - </content> - </entry> - <entry> - <id>http://localhost/v1.0/</id> - <title type="text">Version v1.0</title> - <updated>2011-01-21T11:33:21Z</updated> - <link href="http://localhost/v1.0/" rel="self"/> - <content type="text"> - Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) - </content> - </entry> - </feed> - """.replace(" ", "").replace("\n", "") - - actual = res.body.replace(" ", "").replace("\n", "") - - self.assertEqual(expected, actual) + f = feedparser.parse(res.body) + self.assertEqual(f.feed.title, 'Available API Versions') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + + self.assertEqual(len(f.entries), 2) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.0/') + self.assertEqual(entry.title, 'Version v1.0') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)') + self.assertEqual(len(entry.links), 1) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.0/') + self.assertEqual(entry.links[0]['rel'], 'self') + entry = f.entries[1] + self.assertEqual(entry.id, 'http://localhost/v1.1/') + self.assertEqual(entry.title, 'Version v1.1') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') + self.assertEqual(len(entry.links), 1) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(entry.links[0]['rel'], 'self') def test_multi_choice_image(self): req = webob.Request.blank('/images/1') @@ -511,28 +472,32 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 300) self.assertEqual(res.content_type, "application/xml") - expected = """ - <choices xmlns="%s" xmlns:atom="%s"> - <version id="v1.1" status="CURRENT"> - <media-types> - <media-type base="application/xml" - type="application/vnd.openstack.compute-v1.1+xml"/> - <media-type base="application/json" - type="application/vnd.openstack.compute-v1.1+json"/> - </media-types> - <atom:link href="http://localhost/v1.1/images/1" rel="self"/> - </version> - <version id="v1.0" status="DEPRECATED"> - <media-types> - <media-type base="application/xml" - type="application/vnd.openstack.compute-v1.0+xml"/> - <media-type base="application/json" - type="application/vnd.openstack.compute-v1.0+json"/> - </media-types> - <atom:link href="http://localhost/v1.0/images/1" rel="self"/> - </version> - </choices>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, - wsgi.XMLNS_ATOM) + root = etree.XML(res.body) + self.assertTrue(root.xpath('/ns:choices', namespaces=NS)) + versions = root.xpath('ns:version', namespaces=NS) + self.assertEqual(len(versions), 2) + + version = versions[0] + self.assertEqual(version.get('id'), 'v1.1') + self.assertEqual(version.get('status'), 'CURRENT') + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(common.compare_media_types(media_types, + VERSIONS['v1.1']['media-types'])) + links = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.1/images/1'}])) + + version = versions[1] + self.assertEqual(version.get('id'), 'v1.0') + self.assertEqual(version.get('status'), 'DEPRECATED') + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(common.compare_media_types(media_types, + VERSIONS['v1.0']['media-types'])) + links = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.0/images/1'}])) def test_multi_choice_server_atom(self): """ @@ -665,22 +630,20 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsXMLSerializer() response = serializer.index(versions_data) - root = xml.etree.ElementTree.XML(response) - self.assertEqual(root.tag.split('}')[1], "versions") - self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - version = list(root)[0] - self.assertEqual(version.tag.split('}')[1], "version") - self.assertEqual(version.get('id'), - versions_data['versions'][0]['id']) + root = etree.XML(response) + self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) + version_elems = root.xpath('ns:version', namespaces=NS) + self.assertEqual(len(version_elems), 1) + version = version_elems[0] + self.assertEqual(version.get('id'), versions_data['versions'][0]['id']) self.assertEqual(version.get('status'), versions_data['versions'][0]['status']) - link = list(version)[0] - - self.assertEqual(link.tag.split('}')[1], "link") - self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) - for key, val in versions_data['versions'][0]['links'][0].items(): - self.assertEqual(link.get(key), val) + (link,) = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(link, [{ + 'rel': 'self', + 'href': 'http://test/2.7.1', + 'type': 'application/atom+xml'}])) def test_versions_multi_xml_serializer(self): versions_data = { @@ -703,11 +666,9 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsXMLSerializer() response = serializer.multi(versions_data) - root = xml.etree.ElementTree.XML(response) - self.assertEqual(root.tag.split('}')[1], "choices") - self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - version = list(root)[0] - self.assertEqual(version.tag.split('}')[1], "version") + root = etree.XML(response) + self.assertTrue(root.xpath('/ns:choices', namespaces=NS)) + (version,) = root.xpath('ns:version', namespaces=NS) self.assertEqual(version.get('id'), versions_data['choices'][0]['id']) self.assertEqual(version.get('status'), versions_data['choices'][0]['status']) @@ -716,19 +677,14 @@ class VersionsSerializerTests(test.TestCase): media_type_nodes = list(media_types) self.assertEqual(media_types.tag.split('}')[1], "media-types") - set_types = versions_data['choices'][0]['media-types'] - for i, type in enumerate(set_types): - node = media_type_nodes[i] - self.assertEqual(node.tag.split('}')[1], "media-type") - for key, val in set_types[i].items(): - self.assertEqual(node.get(key), val) - - link = list(version)[1] + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(common.compare_media_types(media_types, + versions_data['choices'][0]['media-types'])) - self.assertEqual(link.tag.split('}')[1], "link") - self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) - for key, val in versions_data['choices'][0]['links'][0].items(): - self.assertEqual(link.get(key), val) + (link,) = version.xpath('atom:link', namespaces=NS) + self.assertTrue(common.compare_links(link, + versions_data['choices'][0]['links'])) def test_version_detail_xml_serializer(self): version_data = { @@ -770,7 +726,7 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsXMLSerializer() response = serializer.show(version_data) - root = xml.etree.ElementTree.XML(response) + root = etree.XML(response) self.assertEqual(root.tag.split('}')[1], "version") self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) @@ -811,59 +767,28 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsAtomSerializer() response = serializer.index(versions_data) - - root = xml.etree.ElementTree.XML(response) - self.assertEqual(root.tag.split('}')[1], "feed") - self.assertEqual(root.tag.split('}')[0].strip('{'), - "http://www.w3.org/2005/Atom") - - children = list(root) - title = children[0] - updated = children[1] - id = children[2] - author = children[3] - link = children[4] - entry = children[5] - - self.assertEqual(title.tag.split('}')[1], 'title') - self.assertEqual(title.text, 'Available API Versions') - self.assertEqual(updated.tag.split('}')[1], 'updated') - self.assertEqual(updated.text, '2011-07-20T11:40:00Z') - self.assertEqual(id.tag.split('}')[1], 'id') - self.assertEqual(id.text, 'http://test/') - - self.assertEqual(author.tag.split('}')[1], 'author') - author_name = list(author)[0] - author_uri = list(author)[1] - self.assertEqual(author_name.tag.split('}')[1], 'name') - self.assertEqual(author_name.text, 'Rackspace') - self.assertEqual(author_uri.tag.split('}')[1], 'uri') - self.assertEqual(author_uri.text, 'http://www.rackspace.com/') - - self.assertEqual(link.get('href'), 'http://test/') - self.assertEqual(link.get('rel'), 'self') - - self.assertEqual(entry.tag.split('}')[1], 'entry') - entry_children = list(entry) - entry_id = entry_children[0] - entry_title = entry_children[1] - entry_updated = entry_children[2] - entry_link = entry_children[3] - entry_content = entry_children[4] - self.assertEqual(entry_id.tag.split('}')[1], "id") - self.assertEqual(entry_id.text, "http://test/2.9.8") - self.assertEqual(entry_title.tag.split('}')[1], "title") - self.assertEqual(entry_title.get('type'), "text") - self.assertEqual(entry_title.text, "Version 2.9.8") - self.assertEqual(entry_updated.tag.split('}')[1], "updated") - self.assertEqual(entry_updated.text, "2011-07-20T11:40:00Z") - self.assertEqual(entry_link.tag.split('}')[1], "link") - self.assertEqual(entry_link.get('href'), "http://test/2.9.8") - self.assertEqual(entry_link.get('rel'), "self") - self.assertEqual(entry_content.tag.split('}')[1], "content") - self.assertEqual(entry_content.get('type'), "text") - self.assertEqual(entry_content.text, - "Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)") + f = feedparser.parse(response) + + self.assertEqual(f.feed.title, 'Available API Versions') + self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z') + self.assertEqual(f.feed.id, 'http://test/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') + self.assertEqual(f.feed.links[0]['href'], 'http://test/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://test/2.9.8') + self.assertEqual(entry.title, 'Version 2.9.8') + self.assertEqual(entry.updated, '2011-07-20T11:40:00Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)') + self.assertEqual(len(entry.links), 1) + self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8') + self.assertEqual(entry.links[0]['rel'], 'self') def test_version_detail_atom_serializer(self): versions_data = { @@ -904,63 +829,36 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsAtomSerializer() response = serializer.show(versions_data) - - root = xml.etree.ElementTree.XML(response) - self.assertEqual(root.tag.split('}')[1], "feed") - self.assertEqual(root.tag.split('}')[0].strip('{'), - "http://www.w3.org/2005/Atom") - - children = list(root) - title = children[0] - updated = children[1] - id = children[2] - author = children[3] - link = children[4] - entry = children[5] - - self.assertEqual(root.tag.split('}')[1], 'feed') - self.assertEqual(title.tag.split('}')[1], 'title') - self.assertEqual(title.text, 'About This Version') - self.assertEqual(updated.tag.split('}')[1], 'updated') - self.assertEqual(updated.text, '2011-01-21T11:33:21Z') - self.assertEqual(id.tag.split('}')[1], 'id') - self.assertEqual(id.text, 'http://localhost/v1.1/') - - self.assertEqual(author.tag.split('}')[1], 'author') - author_name = list(author)[0] - author_uri = list(author)[1] - self.assertEqual(author_name.tag.split('}')[1], 'name') - self.assertEqual(author_name.text, 'Rackspace') - self.assertEqual(author_uri.tag.split('}')[1], 'uri') - self.assertEqual(author_uri.text, 'http://www.rackspace.com/') - - self.assertEqual(link.get('href'), - 'http://localhost/v1.1/') - self.assertEqual(link.get('rel'), 'self') - - self.assertEqual(entry.tag.split('}')[1], 'entry') - entry_children = list(entry) - entry_id = entry_children[0] - entry_title = entry_children[1] - entry_updated = entry_children[2] - entry_links = (entry_children[3], entry_children[4], entry_children[5]) - entry_content = entry_children[6] - - self.assertEqual(entry_id.tag.split('}')[1], "id") - self.assertEqual(entry_id.text, - "http://localhost/v1.1/") - self.assertEqual(entry_title.tag.split('}')[1], "title") - self.assertEqual(entry_title.get('type'), "text") - self.assertEqual(entry_title.text, "Version v1.1") - self.assertEqual(entry_updated.tag.split('}')[1], "updated") - self.assertEqual(entry_updated.text, "2011-01-21T11:33:21Z") - - for i, link in enumerate(versions_data["version"]["links"]): - self.assertEqual(entry_links[i].tag.split('}')[1], "link") - for key, val in versions_data["version"]["links"][i].items(): - self.assertEqual(entry_links[i].get(key), val) - - self.assertEqual(entry_content.tag.split('}')[1], "content") - self.assertEqual(entry_content.get('type'), "text") - self.assertEqual(entry_content.text, - "Version v1.1 CURRENT (2011-01-21T11:33:21Z)") + f = feedparser.parse(response) + + self.assertEqual(f.feed.title, 'About This Version') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/v1.1/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.1/') + self.assertEqual(entry.title, 'Version v1.1') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') + self.assertEqual(len(entry.links), 3) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(entry.links[0]['rel'], 'self') + self.assertEqual(entry.links[1], { + 'rel': 'describedby', + 'type': 'application/pdf', + 'href': 'http://docs.rackspacecloud.com/' + 'servers/api/v1.1/cs-devguide-20110125.pdf'}) + self.assertEqual(entry.links[2], { + 'rel': 'describedby', + 'type': 'application/vnd.sun.wadl+xml', + 'href': 'http://docs.rackspacecloud.com/' + 'servers/api/v1.1/application.wadl', + }) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 6dea78d17..74b9ce853 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -27,17 +27,17 @@ class RequestTest(test.TestCase): result = request.get_content_type() self.assertEqual(result, "application/json") - def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - + def test_content_type_from_accept(self): + for content_type in ('application/xml', + 'application/vnd.openstack.compute+xml', + 'application/json', + 'application/vnd.openstack.compute+json'): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) + + def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() @@ -231,7 +231,7 @@ class ResponseSerializerTest(test.TestCase): self.body_serializers = { 'application/json': JSONSerializer(), - 'application/XML': XMLSerializer(), + 'application/xml': XMLSerializer(), } self.serializer = wsgi.ResponseSerializer(self.body_serializers, @@ -250,15 +250,24 @@ class ResponseSerializerTest(test.TestCase): self.serializer.get_body_serializer, 'application/unknown') - def test_serialize_response(self): - response = self.serializer.serialize({}, 'application/json') - self.assertEqual(response.headers['Content-Type'], 'application/json') - self.assertEqual(response.body, 'pew_json') - self.assertEqual(response.status_int, 404) + def test_serialize_response_json(self): + for content_type in ('application/json', + 'application/vnd.openstack.compute+json'): + response = self.serializer.serialize({}, content_type) + self.assertEqual(response.headers['Content-Type'], content_type) + self.assertEqual(response.body, 'pew_json') + self.assertEqual(response.status_int, 404) + + def test_serialize_response_xml(self): + for content_type in ('application/xml', + 'application/vnd.openstack.compute+xml'): + response = self.serializer.serialize({}, content_type) + self.assertEqual(response.headers['Content-Type'], content_type) + self.assertEqual(response.body, 'pew_xml') + self.assertEqual(response.status_int, 404) def test_serialize_response_None(self): response = self.serializer.serialize(None, 'application/json') - print response self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, '') self.assertEqual(response.status_int, 404) @@ -281,7 +290,7 @@ class RequestDeserializerTest(test.TestCase): self.body_deserializers = { 'application/json': JSONDeserializer(), - 'application/XML': XMLDeserializer(), + 'application/xml': XMLDeserializer(), } self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) @@ -290,8 +299,9 @@ class RequestDeserializerTest(test.TestCase): pass def test_get_deserializer(self): - expected = self.deserializer.get_body_deserializer('application/json') - self.assertEqual(expected, self.body_deserializers['application/json']) + ctype = 'application/json' + expected = self.deserializer.get_body_deserializer(ctype) + self.assertEqual(expected, self.body_deserializers[ctype]) def test_get_deserializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, @@ -299,10 +309,11 @@ class RequestDeserializerTest(test.TestCase): 'application/unknown') def test_get_expected_content_type(self): + ctype = 'application/json' request = wsgi.Request.blank('/') - request.headers['Accept'] = 'application/json' + request.headers['Accept'] = ctype self.assertEqual(self.deserializer.get_expected_content_type(request), - 'application/json') + ctype) def test_get_action_args(self): env = { diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 19028a451..cdbfba63a 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -125,10 +125,11 @@ def stub_out_db_network_api(stubs): if ips[0]['fixed_ip']: fixed_ip_address = ips[0]['fixed_ip']['address'] ips[0]['fixed_ip'] = None + ips[0]['host'] = None return fixed_ip_address def fake_floating_ip_fixed_ip_associate(context, floating_address, - fixed_address): + fixed_address, host): float = filter(lambda i: i['address'] == floating_address, floating_ips) fixed = filter(lambda i: i['address'] == fixed_address, @@ -136,6 +137,7 @@ def stub_out_db_network_api(stubs): if float and fixed: float[0]['fixed_ip'] = fixed[0] float[0]['fixed_ip_id'] = fixed[0]['id'] + float[0]['host'] = host def fake_floating_ip_get_all_by_host(context, host): # TODO(jkoelker): Once we get the patches that remove host from diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 1ecb99b31..142206755 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -25,6 +25,36 @@ HOST = "testhost" FLAGS = flags.FLAGS +class FakeIptablesFirewallDriver(object): + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + + def apply_instance_filter(self, instance, network_info): + pass + + +class FakeVIFDriver(object): + + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + + def plug(self, instance, network, mapping): + return { + 'id': 'fake', + 'bridge_name': 'fake', + 'mac_address': 'fake', + 'ip_address': 'fake', + 'dhcp_server': 'fake', + 'extra_params': 'fake', + } + + class FakeModel(dict): """Represent a model from the db""" def __init__(self, *args, **kwargs): diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index 74baaacc2..cf013da1d 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +from lxml import etree + from nova.log import logging from nova.tests.integrated import integrated_helpers from nova.api.openstack import common @@ -34,9 +36,8 @@ class XmlTests(integrated_helpers._IntegratedTestBase): response = self.api.api_request('/limits', headers=headers) data = response.read() LOG.debug("data: %s" % data) - - prefix = '<limits xmlns="%s"' % common.XML_NS_V11 - self.assertTrue(data.startswith(prefix)) + root = etree.XML(data) + self.assertEqual(root.nsmap.get(None), common.XML_NS_V11) def test_namespace_servers(self): """/servers should have v1.1 namespace (has changed in 1.1).""" @@ -46,6 +47,5 @@ class XmlTests(integrated_helpers._IntegratedTestBase): response = self.api.api_request('/servers', headers=headers) data = response.read() LOG.debug("data: %s" % data) - - prefix = '<servers xmlns="%s"' % common.XML_NS_V11 - self.assertTrue(data.startswith(prefix)) + root = etree.XML(data) + self.assertEqual(root.nsmap.get(None), common.XML_NS_V11) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 233ee14de..1924ae050 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -35,6 +35,7 @@ from nova import utils from nova.api.ec2 import cloud from nova.compute import power_state from nova.compute import vm_states +from nova.virt import driver from nova.virt.libvirt import connection from nova.virt.libvirt import firewall from nova.tests import fake_network @@ -51,6 +52,41 @@ def _concurrency(wait, done, target): done.send() +class FakeVirDomainSnapshot(object): + + def __init__(self, dom=None): + self.dom = dom + + def delete(self, flags): + pass + + +class FakeVirtDomain(object): + + def __init__(self, fake_xml=None): + if fake_xml: + self._fake_dom_xml = fake_xml + else: + self._fake_dom_xml = """ + <domain type='kvm'> + <devices> + <disk type='file'> + <source file='filename'/> + </disk> + </devices> + </domain> + """ + + def snapshotCreateXML(self, *args): + return FakeVirDomainSnapshot(self) + + def createWithFlags(self, launch_flags): + pass + + def XMLDesc(self, *args): + return self._fake_dom_xml + + class CacheConcurrencyTestCase(test.TestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() @@ -152,70 +188,24 @@ class LibvirtConnTestCase(test.TestCase): # A fake libvirt.virConnect class FakeLibvirtConnection(object): - pass - - # A fake connection.IptablesFirewallDriver - class FakeIptablesFirewallDriver(object): - - def __init__(self, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - # A fake VIF driver - class FakeVIFDriver(object): - - def __init__(self, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - def plug(self, instance, network, mapping): - return { - 'id': 'fake', - 'bridge_name': 'fake', - 'mac_address': 'fake', - 'ip_address': 'fake', - 'dhcp_server': 'fake', - 'extra_params': 'fake', - } + def defineXML(self, xml): + return FakeVirtDomain() # Creating mocks fake = FakeLibvirtConnection() - fakeip = FakeIptablesFirewallDriver - fakevif = FakeVIFDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) - # Inevitable mocks for connection.LibvirtConnection - self.mox.StubOutWithMock(connection.utils, 'import_class') - connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(connection.utils, 'import_object') - connection.utils.import_object(mox.IgnoreArg()).AndReturn(fakevif) + self.flags(image_service='nova.image.fake.FakeImageService') + fw_driver = "nova.tests.fake_network.FakeIptablesFirewallDriver" + self.flags(firewall_driver=fw_driver) + self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver") + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') connection.LibvirtConnection._conn = fake def fake_lookup(self, instance_name): - - class FakeVirtDomain(object): - - def snapshotCreateXML(self, *args): - return None - - def XMLDesc(self, *args): - return """ - <domain type='kvm'> - <devices> - <disk type='file'> - <source file='filename'/> - </disk> - </devices> - </domain> - """ - return FakeVirtDomain() def fake_execute(self, *args): @@ -797,8 +787,6 @@ class LibvirtConnTestCase(test.TestCase): shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) shutil.rmtree(os.path.join(FLAGS.instances_path, '_base')) - self.assertTrue(count) - def test_get_host_ip_addr(self): conn = connection.LibvirtConnection(False) ip = conn.get_host_ip_addr() @@ -840,6 +828,50 @@ class LibvirtConnTestCase(test.TestCase): _assert_volume_in_mapping('sdg', False) _assert_volume_in_mapping('sdh1', False) + def test_reboot_signature(self): + """Test that libvirt driver method sig matches interface""" + def fake_reboot_with_correct_sig(ignore, instance, + network_info, reboot_type): + pass + + def fake_destroy(instance, network_info, cleanup=False): + pass + + def fake_plug_vifs(instance, network_info): + pass + + def fake_create_new_domain(xml): + return + + def fake_none(self, instance): + return + + instance = db.instance_create(self.context, self.test_instance) + network_info = _fake_network_info(self.stubs, 1) + + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = self.fake_lookup + + conn = connection.LibvirtConnection(False) + self.stubs.Set(conn, 'destroy', fake_destroy) + self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs) + self.stubs.Set(conn.firewall_driver, + 'setup_basic_filtering', + fake_none) + self.stubs.Set(conn.firewall_driver, + 'prepare_instance_filter', + fake_none) + self.stubs.Set(conn, '_create_new_domain', fake_create_new_domain) + self.stubs.Set(conn.firewall_driver, + 'apply_instance_filter', + fake_none) + + args = [instance, network_info, 'SOFT'] + conn.reboot(*args) + + compute_driver = driver.ComputeDriver() + self.assertRaises(NotImplementedError, compute_driver.reboot, *args) + class NWFilterFakes: def __init__(self): diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index a8a03b56b..2cacd2364 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -364,7 +364,7 @@ class XenAPIVMTestCase(test.TestCase): def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", - architecture="x86-64", instance_id=1, + hostname="test", architecture="x86-64", instance_id=1, check_injection=False, create_record=True, empty_dns=False): stubs.stubout_loopingcall_start(self.stubs) @@ -377,6 +377,7 @@ class XenAPIVMTestCase(test.TestCase): 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, 'os_type': os_type, + 'hostname': hostname, 'architecture': architecture} instance = db.instance_create(self.context, instance_values) else: diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py index 0ed5e9b68..7de10e612 100644 --- a/nova/tests/vmwareapi/stubs.py +++ b/nova/tests/vmwareapi/stubs.py @@ -47,7 +47,5 @@ def set_stubs(stubs): stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
fake_get_vim_object)
- stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
- fake_get_vim_object)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",
fake_is_vim_object)
diff --git a/nova/virt/disk.py b/nova/virt/disk.py index cd3422829..9fe164cfb 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -52,6 +52,47 @@ flags.DEFINE_integer('timeout_nbd', 10, flags.DEFINE_integer('max_nbd_devices', 16, 'maximum number of possible nbd devices') +# NOTE(yamahata): DEFINE_list() doesn't work because the command may +# include ','. For example, +# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 +# --label %(fs_label)s %(target)s +# +# DEFINE_list() parses its argument by +# [s.strip() for s in argument.split(self._token)] +# where self._token = ',' +# No escape nor exceptional handling for ','. +# DEFINE_list() doesn't give us what we need. +flags.DEFINE_multistring('virt_mkfs', + ['windows=mkfs.ntfs --fast --label %(fs_label)s ' + '%(target)s', + # NOTE(yamahata): vfat case + #'windows=mkfs.vfat -n %(fs_label)s %(target)s', + 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', + 'default=mkfs.ext3 -L %(fs_label)s -F %(target)s'], + 'mkfs commands for ephemeral device. The format is' + '<os_type>=<mkfs command>') + + +_MKFS_COMMAND = {} +_DEFAULT_MKFS_COMMAND = None + + +for s in FLAGS.virt_mkfs: + # NOTE(yamahata): mkfs command may includes '=' for its options. + # So item.partition('=') doesn't work here + os_type, mkfs_command = s.split('=', 1) + if os_type: + _MKFS_COMMAND[os_type] = mkfs_command + if os_type == 'default': + _DEFAULT_MKFS_COMMAND = mkfs_command + + +def mkfs(os_type, fs_label, target): + mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or + '') % locals() + if mkfs_command: + utils.execute(*mkfs_command.split()) + def extend(image, size): """Increase image to size""" diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index f591ce02c..adbbdfae0 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -29,15 +29,16 @@ Supports KVM, LXC, QEMU, UML, and XEN. (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). :libvirt_xml_template: Libvirt XML Template. -:rescue_image_id: Rescue ami image (default: ami-rescue). -:rescue_kernel_id: Rescue aki image (default: aki-rescue). -:rescue_ramdisk_id: Rescue ari image (default: ari-rescue). +:rescue_image_id: Rescue ami image (None = original image). +:rescue_kernel_id: Rescue aki image (None = original image). +:rescue_ramdisk_id: Rescue ari image (None = original image). :injected_network_template: Template file for injected network :allow_same_net_traffic: Whether to allow in project network traffic """ import hashlib +import functools import multiprocessing import netaddr import os @@ -83,9 +84,9 @@ LOG = logging.getLogger('nova.virt.libvirt_conn') FLAGS = flags.FLAGS flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') # TODO(vish): These flags should probably go into a shared location -flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') -flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') -flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') +flags.DEFINE_string('rescue_image_id', None, 'Rescue ami image') +flags.DEFINE_string('rescue_kernel_id', None, 'Rescue aki image') +flags.DEFINE_string('rescue_ramdisk_id', None, 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') @@ -464,9 +465,10 @@ class LibvirtConnection(driver.ComputeDriver): # Clean up shutil.rmtree(temp_dir) + snapshot_ptr.delete(0) @exception.wrap_exception() - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type=None, xml=None): """Reboot a virtual machine, given an instance reference. This method actually destroys and re-creates the domain to ensure the @@ -477,7 +479,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(itoumsn): Use XML delived from the running instance # instead of using to_xml(instance, network_info). This is almost # the ultimate stupid workaround. - xml = virt_dom.XMLDesc(0) + if not xml: + xml = virt_dom.XMLDesc(0) + # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... @@ -541,43 +545,42 @@ class LibvirtConnection(driver.ComputeDriver): data recovery. """ - self.destroy(instance, network_info, cleanup=False) - - xml = self.to_xml(instance, network_info, rescue=True) - rescue_images = {'image_id': FLAGS.rescue_image_id, - 'kernel_id': FLAGS.rescue_kernel_id, - 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(context, instance, xml, '.rescue', rescue_images) - self._create_new_domain(xml) - - def _wait_for_rescue(): - """Called at an interval until the VM is running again.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - if state == power_state.RUNNING: - msg = _("Instance %s rescued successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone + virt_dom = self._conn.lookupByName(instance['name']) + unrescue_xml = virt_dom.XMLDesc(0) + unrescue_xml_path = os.path.join(FLAGS.instances_path, + instance['name'], + 'unrescue.xml') + f = open(unrescue_xml_path, 'w') + f.write(unrescue_xml) + f.close() - timer = utils.LoopingCall(_wait_for_rescue) - return timer.start(interval=0.5, now=True) + xml = self.to_xml(instance, network_info, rescue=True) + rescue_images = { + 'image_id': FLAGS.rescue_image_id or instance['image_ref'], + 'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'], + 'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'], + } + self._create_image(context, instance, xml, '.rescue', rescue_images, + network_info=network_info) + self.reboot(instance, network_info, xml=xml) @exception.wrap_exception() - def unrescue(self, instance, network_info): + def unrescue(self, instance, callback, network_info): """Reboot the VM which is being rescued back into primary images. Because reboot destroys and re-creates instances, unresue should simply call reboot. """ - self.reboot(instance, network_info) + unrescue_xml_path = os.path.join(FLAGS.instances_path, + instance['name'], + 'unrescue.xml') + f = open(unrescue_xml_path) + unrescue_xml = f.read() + f.close() + os.remove(unrescue_xml_path) + self.reboot(instance, network_info, xml=unrescue_xml) @exception.wrap_exception() def poll_rescued_instances(self, timeout): @@ -778,6 +781,10 @@ class LibvirtConnection(driver.ComputeDriver): if fs_format: utils.execute('mkfs', '-t', fs_format, target) + def _create_ephemeral(self, target, local_size, fs_label, os_type): + self._create_local(target, local_size) + disk.mkfs(os_type, fs_label, target) + def _create_swap(self, target, swap_gb): """Create a swap file of specified size""" self._create_local(target, swap_gb) @@ -808,8 +815,10 @@ class LibvirtConnection(driver.ComputeDriver): utils.execute('mkdir', '-p', container_dir) # NOTE(vish): No need add the suffix to console.log - os.close(os.open(basepath('console.log', ''), - os.O_CREAT | os.O_WRONLY, 0660)) + console_log = basepath('console.log', '') + if os.path.exists(console_log): + utils.execute('chown', os.getuid(), console_log, run_as_root=True) + os.close(os.open(console_log, os.O_CREAT | os.O_WRONLY, 0660)) if not disk_images: disk_images = {'image_id': inst['image_ref'], @@ -866,9 +875,13 @@ class LibvirtConnection(driver.ComputeDriver): local_size=local_gb) for eph in driver.block_device_info_get_ephemerals(block_device_info): - self._cache_image(fn=self._create_local, + fn = functools.partial(self._create_ephemeral, + fs_label='ephemeral%d' % eph['num'], + os_type=inst.os_type) + self._cache_image(fn=fn, target=basepath(_get_eph_disk(eph)), - fname="local_%s" % eph['size'], + fname="ephemeral_%s_%s_%s" % + (eph['num'], eph['size'], inst.os_type), cow=FLAGS.use_cow_images, local_size=eph['size']) @@ -981,15 +994,16 @@ class LibvirtConnection(driver.ComputeDriver): nbd=FLAGS.use_cow_images, tune2fs=tune2fs) - if FLAGS.libvirt_type == 'lxc': - disk.setup_container(basepath('disk'), - container_dir=container_dir, - nbd=FLAGS.use_cow_images) except Exception as e: # This could be a windows image, or a vmdk format disk LOG.warn(_('instance %(inst_name)s: ignoring error injecting' ' data into image %(img_id)s (%(e)s)') % locals()) + if FLAGS.libvirt_type == 'lxc': + disk.setup_container(basepath('disk'), + container_dir=container_dir, + nbd=FLAGS.use_cow_images) + if FLAGS.libvirt_type == 'uml': utils.execute('chown', 'root', basepath('disk'), run_as_root=True) @@ -1102,6 +1116,11 @@ class LibvirtConnection(driver.ComputeDriver): nova_context.get_admin_context(), instance['id'], {'root_device_name': '/dev/' + self.default_root_device}) + if local_device: + db.instance_update( + nova_context.get_admin_context(), instance['id'], + {'default_local_device': '/dev/' + self.default_local_device}) + swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): xml_info['swap_device'] = block_device.strip_dev( @@ -1110,6 +1129,9 @@ class LibvirtConnection(driver.ComputeDriver): not self._volume_in_mapping(self.default_swap_device, block_device_info)): xml_info['swap_device'] = self.default_swap_device + db.instance_update( + nova_context.get_admin_context(), instance['id'], + {'default_swap_device': '/dev/' + self.default_swap_device}) config_drive = False if instance.get('config_drive') or instance.get('config_drive_id'): diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 0dea13aba..ae00bca0f 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -409,7 +409,7 @@ def fake_plug_vifs(*args, **kwargs): def fake_get_network(*args, **kwargs):
"""Fake get network."""
- return [{'type': 'fake'}]
+ return {'type': 'fake'}
def fake_fetch_image(context, image, instance, **kwargs):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index fb6548b34..9906b89e1 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -17,42 +17,35 @@ """VIF drivers for VMWare.""" -from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import utils from nova.virt.vif import VIFDriver -from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils LOG = logging.getLogger("nova.virt.vmwareapi.vif") FLAGS = flags.FLAGS +FLAGS['vmwareapi_vlan_interface'].SetDefault('vmnic0') class VMWareVlanBridgeDriver(VIFDriver): """VIF Driver to setup bridge/VLAN networking using VMWare API.""" def plug(self, instance, network, mapping): + """Plug the VIF to specified instance using information passed. + Currently we are plugging the VIF(s) during instance creation itself. + We can use this method when we add support to add additional NIC to + an existing instance.""" + pass + + def ensure_vlan_bridge(self, session, network): """Create a vlan and bridge unless they already exist.""" vlan_num = network['vlan'] bridge = network['bridge'] - bridge_interface = network['bridge_interface'] + vlan_interface = FLAGS.vmwareapi_vlan_interface - # Open vmwareapi session - host_ip = FLAGS.vmwareapi_host_ip - host_username = FLAGS.vmwareapi_host_username - host_password = FLAGS.vmwareapi_host_password - if not host_ip or host_username is None or host_password is None: - raise Exception(_('Must specify vmwareapi_host_ip, ' - 'vmwareapi_host_username ' - 'and vmwareapi_host_password to use ' - 'connection_type=vmwareapi')) - session = VMWareAPISession(host_ip, host_username, host_password, - FLAGS.vmwareapi_api_retry_count) - vlan_interface = bridge_interface # Check if the vlan_interface physical network adapter exists on the # host. if not network_utils.check_if_vlan_interface_exists(session, @@ -92,4 +85,6 @@ class VMWareVlanBridgeDriver(VIFDriver): pgroup=pg_vlanid) def unplug(self, instance, network, mapping): + """Cleanup operations like deleting port group if no instance + is associated with it.""" pass diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 82b5f7214..dd1c81196 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -39,8 +39,7 @@ def split_datastore_path(datastore_path): def get_vm_create_spec(client_factory, instance, data_store_name,
- network_name="vmnet0",
- os_type="otherGuest", network_ref=None):
+ vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.name
@@ -61,14 +60,12 @@ def get_vm_create_spec(client_factory, instance, data_store_name, config_spec.numCPUs = int(instance.vcpus)
config_spec.memoryMB = int(instance.memory_mb)
- mac_address = None
- if instance['mac_addresses']:
- mac_address = instance['mac_addresses'][0]['address']
+ vif_spec_list = []
+ for vif_info in vif_infos:
+ vif_spec = create_network_spec(client_factory, vif_info)
+ vif_spec_list.append(vif_spec)
- nic_spec = create_network_spec(client_factory,
- network_name, mac_address)
-
- device_config_spec = [nic_spec]
+ device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
return config_spec
@@ -93,8 +90,7 @@ def create_controller_spec(client_factory, key): return virtual_device_config
-def create_network_spec(client_factory, network_name, mac_address,
- network_ref=None):
+def create_network_spec(client_factory, vif_info):
"""
Builds a config spec for the addition of a new network
adapter to the VM.
@@ -109,6 +105,9 @@ def create_network_spec(client_factory, network_name, mac_address, # NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
+ network_ref = vif_info['network_ref']
+ network_name = vif_info['network_name']
+ mac_address = vif_info['mac_address']
backing = None
if (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
@@ -295,11 +294,8 @@ def get_dummy_vm_create_spec(client_factory, name, data_store_name): return config_spec
-def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask,
- gateway, broadcast, dns):
+def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
- machine_id_str = "%s;%s;%s;%s;%s;%s" % (mac, ip_addr, netmask,
- gateway, broadcast, dns)
virtual_machine_config_spec = \
client_factory.create('ns0:VirtualMachineConfigSpec')
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 6bdc2f23a..063b84a62 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -27,7 +27,6 @@ import urllib2 import uuid
from nova import context as nova_context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -111,22 +110,6 @@ class VMWareVMOps(object): client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- network = db.network_get_by_instance(nova_context.get_admin_context(),
- instance['id'])
-
- net_name = network['bridge']
-
- def _check_if_network_bridge_exists():
- network_ref = \
- network_utils.get_network_with_the_name(self._session,
- net_name)
- if network_ref is None:
- raise exception.NetworkNotFoundForBridge(bridge=net_name)
- return network_ref
-
- self.plug_vifs(instance, network_info)
- network_obj = _check_if_network_bridge_exists()
-
def _get_datastore_ref():
"""Get the datastore list and choose the first local storage."""
data_stores = self._session._call_method(vim_util, "get_objects",
@@ -182,11 +165,36 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
+ def _check_if_network_bridge_exists(network_name):
+ network_ref = \
+ network_utils.get_network_with_the_name(self._session,
+ network_name)
+ if network_ref is None:
+ raise exception.NetworkNotFoundForBridge(bridge=network_name)
+ return network_ref
+
+ def _get_vif_infos():
+ vif_infos = []
+ for (network, mapping) in network_info:
+ mac_address = mapping['mac']
+ network_name = network['bridge']
+ if mapping.get('should_create_vlan'):
+ network_ref = self._vif_driver.ensure_vlan_bridge(
+ self._session, network)
+ else:
+ network_ref = _check_if_network_bridge_exists(network_name)
+ vif_infos.append({'network_name': network_name,
+ 'mac_address': mac_address,
+ 'network_ref': network_ref,
+ })
+ return vif_infos
+
+ vif_infos = _get_vif_infos()
+
# Get the create vm config spec
config_spec = vm_util.get_vm_create_spec(
client_factory, instance,
- data_store_name, net_name, os_type,
- network_obj)
+ data_store_name, vif_infos, os_type)
def _execute_create_vm():
"""Create VM on ESX host."""
@@ -204,8 +212,10 @@ class VMWareVMOps(object): _execute_create_vm()
- # Set the machine id for the VM for setting the IP
- self._set_machine_id(client_factory, instance)
+ # Set the machine.id parameter of the instance to inject
+ # the NIC configuration inside the VM
+ if FLAGS.flat_injected:
+ self._set_machine_id(client_factory, instance, network_info)
# Naming the VM files in correspondence with the VM instance name
# The flat vmdk file name
@@ -718,39 +728,45 @@ class VMWareVMOps(object): """Return link to instance's ajax console."""
return 'http://fakeajaxconsole/fake_url'
- def _set_machine_id(self, client_factory, instance):
+ def _set_machine_id(self, client_factory, instance, network_info):
"""
- Set the machine id of the VM for guest tools to pick up and change
- the IP.
+ Set the machine id of the VM for guest tools to pick up and reconfigure
+ the network interfaces.
"""
- admin_context = nova_context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
- network = db.network_get_by_instance(nova_context.get_admin_context(),
- instance['id'])
- mac_address = None
- if instance['mac_addresses']:
- mac_address = instance['mac_addresses'][0]['address']
-
- net_mask = network["netmask"]
- gateway = network["gateway"]
- broadcast = network["broadcast"]
- # TODO(vish): add support for dns2
- dns = network["dns1"]
-
- addresses = db.instance_get_fixed_addresses(admin_context,
- instance['id'])
- ip_addr = addresses[0] if addresses else None
+
+ machine_id_str = ''
+ for (network, info) in network_info:
+ # TODO(vish): add support for dns2
+ # TODO(sateesh): add support for injection of ipv6 configuration
+ ip_v4 = ip_v6 = None
+ if 'ips' in info and len(info['ips']) > 0:
+ ip_v4 = info['ips'][0]
+ if 'ip6s' in info and len(info['ip6s']) > 0:
+ ip_v6 = info['ip6s'][0]
+ if len(info['dns']) > 0:
+ dns = info['dns'][0]
+ else:
+ dns = ''
+
+ interface_str = "%s;%s;%s;%s;%s;%s" % \
+ (info['mac'],
+ ip_v4 and ip_v4['ip'] or '',
+ ip_v4 and ip_v4['netmask'] or '',
+ info['gateway'],
+ info['broadcast'],
+ dns)
+ machine_id_str = machine_id_str + interface_str + '#'
machine_id_change_spec = \
- vm_util.get_machine_id_change_spec(client_factory, mac_address,
- ip_addr, net_mask, gateway,
- broadcast, dns)
+ vm_util.get_machine_id_change_spec(client_factory, machine_id_str)
+
LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
({'name': instance.name,
- 'ip_addr': ip_addr}))
+ 'ip_addr': ip_v4['ip']}))
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=machine_id_change_spec)
@@ -758,7 +774,7 @@ class VMWareVMOps(object): LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
({'name': instance.name,
- 'ip_addr': ip_addr}))
+ 'ip_addr': ip_v4['ip']}))
def _get_datacenter_name_and_ref(self):
"""Get the datacenter name and the reference."""
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 74c1197ae..88b7fb297 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -302,6 +302,8 @@ class VMOps(object): self.create_vifs(vm_ref, instance, network_info) self.inject_network_info(instance, network_info, vm_ref) + self.inject_hostname(instance, vm_ref, instance['hostname']) + return vm_ref def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref, @@ -1254,6 +1256,16 @@ class VMOps(object): resp = self._make_plugin_call('agent', 'resetnetwork', instance, '', args, vm_ref) + def inject_hostname(self, instance, vm_ref, hostname): + """Inject the hostname of the instance into the xenstore.""" + if instance.os_type == "windows": + # NOTE(jk0): Windows hostnames can only be <= 15 chars. + hostname = hostname[:15] + + logging.debug(_("injecting hostname to xs for vm: |%s|"), vm_ref) + self._session.call_xenapi_request("VM.add_to_xenstore_data", + (vm_ref, "vm-data/hostname", hostname)) + def list_from_xenstore(self, vm, path): """ Runs the xenstore-ls command to get a listing of all records diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 35e3ea8d0..e5bb498ed 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -221,7 +221,14 @@ class VolumeDriver(object): class AOEDriver(VolumeDriver): - """Implements AOE specific volume commands.""" + """WARNING! Deprecated. This driver will be removed in Essex. Its use + is not recommended. + + Implements AOE specific volume commands.""" + + def __init__(self, *args, **kwargs): + LOG.warn(_("AOEDriver is deprecated and will be removed in Essex")) + super(AOEDriver, self).__init__(*args, **kwargs) def ensure_export(self, context, volume): # NOTE(vish): we depend on vblade-persist for recreating exports diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py index 97b5302ba..5158d883a 100644 --- a/tools/esx/guest_tool.py +++ b/tools/esx/guest_tool.py @@ -81,28 +81,34 @@ def _bytes2int(bytes): def _parse_network_details(machine_id):
"""
- Parse the machine.id field to get MAC, IP, Netmask and Gateway fields
- machine.id is of the form MAC;IP;Netmask;Gateway;Broadcast;DNS1,DNS2
- where ';' is the separator.
+ Parse the machine_id to get MAC, IP, Netmask and Gateway fields per NIC.
+ machine_id is of the form ('NIC_record#NIC_record#', '')
+ Each of the NIC will have record NIC_record in the form
+ 'MAC;IP;Netmask;Gateway;Broadcast;DNS' where ';' is field separator.
+ Each record is separated by '#' from next record.
"""
+ logging.debug(_("Received machine_id from vmtools : %s") % machine_id[0])
network_details = []
if machine_id[1].strip() == "1":
pass
else:
- network_info_list = machine_id[0].split(';')
- assert len(network_info_list) % 6 == 0
- no_grps = len(network_info_list) / 6
- i = 0
- while i < no_grps:
- k = i * 6
- network_details.append((
- network_info_list[k].strip().lower(),
- network_info_list[k + 1].strip(),
- network_info_list[k + 2].strip(),
- network_info_list[k + 3].strip(),
- network_info_list[k + 4].strip(),
- network_info_list[k + 5].strip().split(',')))
- i += 1
+ for machine_id_str in machine_id[0].split('#'):
+ network_info_list = machine_id_str.split(';')
+ if len(network_info_list) % 6 != 0:
+ break
+ no_grps = len(network_info_list) / 6
+ i = 0
+ while i < no_grps:
+ k = i * 6
+ network_details.append((
+ network_info_list[k].strip().lower(),
+ network_info_list[k + 1].strip(),
+ network_info_list[k + 2].strip(),
+ network_info_list[k + 3].strip(),
+ network_info_list[k + 4].strip(),
+ network_info_list[k + 5].strip().split(',')))
+ i += 1
+ logging.debug(_("NIC information from vmtools : %s") % network_details)
return network_details
@@ -279,6 +285,7 @@ def _filter_duplicates(all_entries): def _set_rhel_networking(network_details=None):
+ """Set IPv4 network settings for RHEL distros."""
network_details = network_details or []
all_dns_servers = []
for network_detail in network_details:
@@ -320,31 +327,33 @@ def _set_rhel_networking(network_details=None): def _set_ubuntu_networking(network_details=None):
+ """Set IPv4 network settings for Ubuntu."""
network_details = network_details or []
- """ Set IPv4 network settings for Ubuntu """
all_dns_servers = []
- for network_detail in network_details:
+ interface_file_name = '/etc/network/interfaces'
+ # Remove file
+ os.remove(interface_file_name)
+ # Touch file
+ _execute(['touch', interface_file_name])
+ interface_file = open(interface_file_name, 'w')
+ for device, network_detail in enumerate(network_details):
mac_address, ip_address, subnet_mask, gateway, broadcast,\
dns_servers = network_detail
all_dns_servers.extend(dns_servers)
adapter_name, current_ip_address = \
_get_linux_adapter_name_and_ip_address(mac_address)
- if adapter_name and not ip_address == current_ip_address:
- interface_file_name = \
- '/etc/network/interfaces'
- # Remove file
- os.remove(interface_file_name)
- # Touch file
- _execute(['touch', interface_file_name])
- interface_file = open(interface_file_name, 'w')
+ if adapter_name:
interface_file.write('\nauto %s' % adapter_name)
interface_file.write('\niface %s inet static' % adapter_name)
interface_file.write('\nbroadcast %s' % broadcast)
interface_file.write('\ngateway %s' % gateway)
interface_file.write('\nnetmask %s' % subnet_mask)
- interface_file.write('\naddress %s' % ip_address)
- interface_file.close()
+ interface_file.write('\naddress %s\n' % ip_address)
+ logging.debug(_("Successfully configured NIC %d with "
+ "NIC info %s") % (device, network_detail))
+ interface_file.close()
+
if all_dns_servers:
dns_file_name = "/etc/resolv.conf"
os.remove(dns_file_name)
@@ -355,7 +364,8 @@ def _set_ubuntu_networking(network_details=None): for dns_server in unique_entries:
dns_file.write("\nnameserver %s" % dns_server)
dns_file.close()
- print "\nRestarting networking....\n"
+
+ logging.debug(_("Restarting networking....\n"))
_execute(['/etc/init.d/networking', 'restart'])
diff --git a/tools/pip-requires b/tools/pip-requires index 66d6a48d9..a4af326dc 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -35,3 +35,4 @@ coverage nosexcover GitPython paramiko +feedparser |
