diff options
| author | Tushar Patil <tushar.vitthal.patil@gmail.com> | 2011-08-09 16:26:12 -0700 |
|---|---|---|
| committer | Tushar Patil <tushar.vitthal.patil@gmail.com> | 2011-08-09 16:26:12 -0700 |
| commit | 8a8b71b2eaf72b03c0c2bc847b449d2d640fc6c0 (patch) | |
| tree | b5be7eacff26e098b93eff60b90e57a25160cb6c | |
| parent | 96631a9e1188d1781381cafc409c2ec3ead895fb (diff) | |
| parent | 4b3165429797d40da17f5c59aaeadb00673b71b2 (diff) | |
| download | nova-8a8b71b2eaf72b03c0c2bc847b449d2d640fc6c0.tar.gz nova-8a8b71b2eaf72b03c0c2bc847b449d2d640fc6c0.tar.xz nova-8a8b71b2eaf72b03c0c2bc847b449d2d640fc6c0.zip | |
Merged with trunk
66 files changed, 3655 insertions, 626 deletions
@@ -18,6 +18,8 @@ <devin.carlen@gmail.com> <devcamcar@illian.local> <ewan.mellor@citrix.com> <emellor@silver> <itoumsn@nttdata.co.jp> <itoumsn@shayol> +<jake@ansolabs.com> <jake@markupisart.com> +<jake@ansolabs.com> <admin@jakedahn.com> <jaypipes@gmail.com> <jpipes@serialcoder> <jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local> <jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com> @@ -37,6 +37,7 @@ Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> Hisaki Ohara <hisaki.ohara@intel.com> Ilya Alekseyev <ilyaalekseyev@acm.org> Isaku Yamahata <yamahata@valinux.co.jp> +Jake Dahn <jake@ansolabs.com> Jason Cannavale <jason.cannavale@rackspace.com> Jason Koelker <jason@koelker.net> Jay Pipes <jaypipes@gmail.com> diff --git a/MANIFEST.in b/MANIFEST.in index 421cd806a..883aba8a1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,6 +10,7 @@ graft bzrplugins graft contrib graft po graft plugins +graft nova/api/openstack/schemas include nova/api/openstack/notes.txt include nova/auth/*.schema include nova/auth/novarc.template diff --git a/bin/nova-manage b/bin/nova-manage index 563b2d11d..1178841ed 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -726,8 +726,7 @@ class NetworkCommands(object): network_size = FLAGS.network_size subnet = 32 - int(math.log(network_size, 2)) oversize_msg = _('Subnet(s) too large, defaulting to /%s.' - ' To override, specify network_size flag.' - ) % subnet + ' To override, specify network_size flag.') % subnet print oversize_msg else: network_size = fixnet.size @@ -1122,10 +1121,12 @@ class InstanceTypeCommands(object): @args('--name', dest='name', metavar='<name>', help='Name of instance type/flavor') - def delete(self, name, purge=None): + @args('--purge', action="store_true", dest='purge', default=False, + help='purge record from database') + def delete(self, name, purge): """Marks instance types / flavors as deleted""" try: - if purge == "--purge": + if purge: instance_types.purge(name) verb = "purged" else: diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 804e54ef9..8b6e47cfb 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -354,6 +354,10 @@ class Executor(wsgi.Application): LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.InvalidParameterValue as ex: + LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0294c09c5..87bba58c3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -25,11 +25,13 @@ datastore. import base64 import netaddr import os -import urllib +import re +import shutil import tempfile import time -import shutil +import urllib +from nova import block_device from nova import compute from nova import context @@ -78,6 +80,10 @@ def _gen_key(context, user_id, key_name): # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' +_DEFAULT_MAPPINGS = {'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': _DEFAULT_ROOT_DEVICE_NAME, + 'swap': 'sda3'} def _parse_block_device_mapping(bdm): @@ -105,7 +111,7 @@ def _parse_block_device_mapping(bdm): def _properties_get_mappings(properties): - return ec2utils.mappings_prepend_dev(properties.get('mappings', [])) + return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): @@ -144,8 +150,7 @@ def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) - if (m['virtual'] == 'swap' or - m['virtual'].startswith('ephemeral'))] + if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] @@ -208,8 +213,9 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} + search_opts = {'project_id': project_id} for instance in self.compute_api.get_all(context, - project_id=project_id): + search_opts=search_opts): if instance['fixed_ips']: line = '%s slots=%d' % (instance['fixed_ips'][0]['address'], instance['vcpus']) @@ -233,10 +239,39 @@ class CloudController(object): state = 'available' return image['properties'].get('image_state', state) + def _format_instance_mapping(self, ctxt, instance_ref): + root_device_name = instance_ref['root_device_name'] + if root_device_name is None: + return _DEFAULT_MAPPINGS + + mappings = {} + mappings['ami'] = block_device.strip_dev(root_device_name) + mappings['root'] = root_device_name + + # 'ephemeralN' and 'swap' + for bdm in db.block_device_mapping_get_all_by_instance( + ctxt, instance_ref['id']): + if (bdm['volume_id'] or bdm['snapshot_id'] or bdm['no_device']): + continue + + virtual_name = bdm['virtual_name'] + if not virtual_name: + continue + + if block_device.is_swap_or_ephemeral(virtual_name): + mappings[virtual_name] = bdm['device_name'] + + return mappings + def get_metadata(self, address): ctxt = context.get_admin_context() - instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) - if instance_ref is None: + search_opts = {'fixed_ip': address} + try: + instance_ref = self.compute_api.get_all(ctxt, + search_opts=search_opts) + except exception.NotFound: + instance_ref = None + if not instance_ref: return None # This ensures that all attributes of the instance @@ -259,18 +294,14 @@ class CloudController(object): security_groups = db.security_group_get_by_instance(ctxt, instance_ref['id']) security_groups = [x['name'] for x in security_groups] + mappings = self._format_instance_mapping(ctxt, instance_ref) data = { - 'user-data': base64.b64decode(instance_ref['user_data']), + 'user-data': self._format_user_data(instance_ref), 'meta-data': { 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', - 'block-device-mapping': { - # TODO(vish): replace with real data - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': _DEFAULT_ROOT_DEVICE_NAME, - 'swap': 'sda3'}, + 'block-device-mapping': mappings, 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, @@ -765,6 +796,22 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): + if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): + # Some validation to ensure that values match API spec. + # - Alphanumeric characters, spaces, dashes, and underscores. + # TODO(Daviey): LP: #813685 extend beyond group_name checking, and + # probably create a param validator that can be used elsewhere. + err = _("Value (%s) for parameter GroupName is invalid." + " Content limited to Alphanumeric characters, " + "spaces, dashes, and underscores.") % group_name + # err not that of master ec2 implementation, as they fail to raise. + raise exception.InvalidParameterValue(err=err) + + if len(str(group_name)) > 255: + err = _("Value (%s) for parameter GroupName is invalid." + " Length exceeds maximum of 255.") % group_name + raise exception.InvalidParameterValue(err=err) + LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): @@ -948,19 +995,113 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} - def _convert_to_set(self, lst, label): + @staticmethod + def _convert_to_set(lst, label): if lst is None or lst == []: return None if not isinstance(lst, list): lst = [lst] return [{label: x} for x in lst] + def _format_kernel_id(self, instance_ref, result, key): + kernel_id = instance_ref['kernel_id'] + if kernel_id is None: + return + result[key] = self.image_ec2_id(instance_ref['kernel_id'], 'aki') + + def _format_ramdisk_id(self, instance_ref, result, key): + ramdisk_id = instance_ref['ramdisk_id'] + if ramdisk_id is None: + return + result[key] = self.image_ec2_id(instance_ref['ramdisk_id'], 'ari') + + @staticmethod + def _format_user_data(instance_ref): + return base64.b64decode(instance_ref['user_data']) + + def describe_instance_attribute(self, context, instance_id, attribute, + **kwargs): + def _unsupported_attribute(instance, result): + raise exception.ApiError(_('attribute not supported: %s') % + attribute) + + def _format_attr_block_device_mapping(instance, result): + tmp = {} + self._format_instance_root_device_name(instance, tmp) + self._format_instance_bdm(context, instance_id, + tmp['rootDeviceName'], result) + + def _format_attr_disable_api_termination(instance, result): + _unsupported_attribute(instance, result) + + def _format_attr_group_set(instance, result): + CloudController._format_group_set(instance, result) + + def _format_attr_instance_initiated_shutdown_behavior(instance, + result): + state_description = instance['state_description'] + state_to_value = {'stopping': 'stop', + 'stopped': 'stop', + 'terminating': 'terminate'} + value = state_to_value.get(state_description) + if value: + result['instanceInitiatedShutdownBehavior'] = value + + def _format_attr_instance_type(instance, result): + self._format_instance_type(instance, result) + + def _format_attr_kernel(instance, result): + self._format_kernel_id(instance, result, 'kernel') + + def _format_attr_ramdisk(instance, result): + self._format_ramdisk_id(instance, result, 'ramdisk') + + def _format_attr_root_device_name(instance, result): + self._format_instance_root_device_name(instance, result) + + def _format_attr_source_dest_check(instance, result): + _unsupported_attribute(instance, result) + + def _format_attr_user_data(instance, result): + result['userData'] = self._format_user_data(instance) + + attribute_formatter = { + 'blockDeviceMapping': _format_attr_block_device_mapping, + 'disableApiTermination': _format_attr_disable_api_termination, + 'groupSet': _format_attr_group_set, + 'instanceInitiatedShutdownBehavior': + _format_attr_instance_initiated_shutdown_behavior, + 'instanceType': _format_attr_instance_type, + 'kernel': _format_attr_kernel, + 'ramdisk': _format_attr_ramdisk, + 'rootDeviceName': _format_attr_root_device_name, + 'sourceDestCheck': _format_attr_source_dest_check, + 'userData': _format_attr_user_data, + } + + fn = attribute_formatter.get(attribute) + if fn is None: + raise exception.ApiError( + _('attribute not supported: %s') % attribute) + + ec2_instance_id = instance_id + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + instance = self.compute_api.get(context, instance_id) + result = {'instance_id': ec2_instance_id} + fn(instance, result) + return result + def describe_instances(self, context, **kwargs): - return self._format_describe_instances(context, **kwargs) + # Optional DescribeInstances argument + instance_id = kwargs.get('instance_id', None) + return self._format_describe_instances(context, + instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): - kwargs['use_v6'] = True - return self._format_describe_instances(context, **kwargs) + # Optional DescribeInstancesV6 argument + instance_id = kwargs.get('instance_id', None) + return self._format_describe_instances(context, + instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} @@ -1001,7 +1142,29 @@ class CloudController(object): result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type - def _format_instances(self, context, instance_id=None, **kwargs): + @staticmethod + def _format_instance_root_device_name(instance, result): + result['rootDeviceName'] = (instance.get('root_device_name') or + _DEFAULT_ROOT_DEVICE_NAME) + + @staticmethod + def _format_instance_type(instance, result): + if instance['instance_type']: + result['instanceType'] = instance['instance_type'].get('name') + else: + result['instanceType'] = None + + @staticmethod + def _format_group_set(instance, result): + security_group_names = [] + if instance.get('security_groups'): + for security_group in instance['security_groups']: + security_group_names.append(security_group['name']) + result['groupSet'] = CloudController._convert_to_set( + security_group_names, 'groupId') + + def _format_instances(self, context, instance_id=None, use_v6=False, + **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that @@ -1012,11 +1175,17 @@ class CloudController(object): instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) - instance = self.compute_api.get(context, - instance_id=internal_id) + try: + instance = self.compute_api.get(context, internal_id) + except exception.NotFound: + continue instances.append(instance) else: - instances = self.compute_api.get_all(context, **kwargs) + try: + instances = self.compute_api.get_all(context, + search_opts=search_opts) + except exception.NotFound: + instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): @@ -1026,6 +1195,8 @@ class CloudController(object): ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = self.image_ec2_id(instance['image_ref']) + self._format_kernel_id(instance, i, 'kernelId') + self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} @@ -1036,7 +1207,7 @@ class CloudController(object): fixed_addr = fixed['address'] if fixed['floating_ips']: floating_addr = fixed['floating_ips'][0]['address'] - if fixed['network'] and 'use_v6' in kwargs: + if fixed['network'] and use_v6: i['dnsNameV6'] = ipv6.to_global( fixed['network']['cidr_v6'], fixed['virtual_interface']['address'], @@ -1054,16 +1225,12 @@ class CloudController(object): instance['project_id'], instance['host']) i['productCodesSet'] = self._convert_to_set([], 'product_codes') - if instance['instance_type']: - i['instanceType'] = instance['instance_type'].get('name') - else: - i['instanceType'] = None + self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] - i['rootDeviceName'] = (instance.get('root_device_name') or - _DEFAULT_ROOT_DEVICE_NAME) + self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] @@ -1073,12 +1240,7 @@ class CloudController(object): r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] - security_group_names = [] - if instance.get('security_groups'): - for security_group in instance['security_groups']: - security_group_names.append(security_group['name']) - r['groupSet'] = self._convert_to_set(security_group_names, - 'groupId') + self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) @@ -1182,7 +1344,7 @@ class CloudController(object): 'AvailabilityZone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, - instances[0]['reservation_id']) + reservation_id=instances[0]['reservation_id']) def _do_instance(self, action, context, ec2_id): instance_id = ec2utils.ec2_id_to_id(ec2_id) @@ -1314,7 +1476,7 @@ class CloudController(object): i['architecture'] = image['properties'].get('architecture') properties = image['properties'] - root_device_name = ec2utils.properties_root_device_name(properties) + root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and @@ -1387,7 +1549,7 @@ class CloudController(object): def _root_device_name_attribute(image, result): result['rootDeviceName'] = \ - ec2utils.properties_root_device_name(image['properties']) + block_device.properties_root_device_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = _DEFAULT_ROOT_DEVICE_NAME @@ -1520,8 +1682,7 @@ class CloudController(object): if virtual_name in ('ami', 'root'): continue - assert (virtual_name == 'swap' or - virtual_name.startswith('ephemeral')) + assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index bae1e0ee5..bcdf2ba78 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -135,32 +135,3 @@ def dict_from_dotted_str(items): args[key] = value return args - - -def properties_root_device_name(properties): - """get root device name from image meta data. - If it isn't specified, return None. - """ - root_device_name = None - - # NOTE(yamahata): see image_service.s3.s3create() - for bdm in properties.get('mappings', []): - if bdm['virtual'] == 'root': - root_device_name = bdm['device'] - - # NOTE(yamahata): register_image's command line can override - # <machine>.manifest.xml - if 'root_device_name' in properties: - root_device_name = properties['root_device_name'] - - return root_device_name - - -def mappings_prepend_dev(mappings): - """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type""" - for m in mappings: - virtual = m['virtual'] - if ((virtual == 'swap' or virtual.startswith('ephemeral')) and - (not m['device'].startswith('/'))): - m['device'] = '/dev/' + m['device'] - return mappings diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 5226cdf9a..dfdd62201 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -25,7 +25,9 @@ import webob from nova import exception from nova import flags from nova import log as logging +from nova import quota from nova.api.openstack import wsgi +from nova.compute import power_state as compute_power_state LOG = logging.getLogger('nova.api.openstack.common') @@ -36,6 +38,38 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +_STATUS_MAP = { + None: 'BUILD', + compute_power_state.NOSTATE: 'BUILD', + compute_power_state.RUNNING: 'ACTIVE', + compute_power_state.BLOCKED: 'ACTIVE', + compute_power_state.SUSPENDED: 'SUSPENDED', + compute_power_state.PAUSED: 'PAUSED', + compute_power_state.SHUTDOWN: 'SHUTDOWN', + compute_power_state.SHUTOFF: 'SHUTOFF', + compute_power_state.CRASHED: 'ERROR', + compute_power_state.FAILED: 'ERROR', + compute_power_state.BUILDING: 'BUILD', +} + + +def status_from_power_state(power_state): + """Map the power state to the server status string""" + return _STATUS_MAP[power_state] + + +def power_states_from_status(status): + """Map the server status string to a list of power states""" + power_states = [] + for power_state, status_map in _STATUS_MAP.iteritems(): + # Skip the 'None' state + if power_state is None: + continue + if status.lower() == status_map.lower(): + power_states.append(power_state) + return power_states + + def get_pagination_params(request): """Return marker, limit tuple from request. @@ -156,7 +190,7 @@ def remove_version_from_href(href): """ parsed_url = urlparse.urlsplit(href) new_path = re.sub(r'^/v[0-9]+\.[0-9]+(/|$)', r'\1', parsed_url.path, - count=1) + count=1) if new_path == parsed_url.path: msg = _('href %s does not contain version') % href @@ -193,6 +227,16 @@ def get_version_from_href(href): return version +def check_img_metadata_quota_limit(context, metadata): + if metadata is None: + return + num_metadata = len(metadata) + quota_metadata = quota.allowed_metadata_items(context, num_metadata) + if quota_metadata < num_metadata: + expl = _("Image metadata limit exceeded") + raise webob.exc.HTTPBadRequest(explanation=expl) + + class MetadataXMLDeserializer(wsgi.XMLDeserializer): def extract_metadata(self, metadata_node): diff --git a/nova/api/openstack/contrib/admin_only.py b/nova/api/openstack/contrib/admin_only.py new file mode 100644 index 000000000..e821c9e1f --- /dev/null +++ b/nova/api/openstack/contrib/admin_only.py @@ -0,0 +1,30 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Decorator for limiting extensions that should be admin-only.""" + +from functools import wraps +from nova import flags +FLAGS = flags.FLAGS + + +def admin_only(fnc): + @wraps(fnc) + def _wrapped(self, *args, **kwargs): + if FLAGS.allow_admin_api: + return fnc(self, *args, **kwargs) + return [] + _wrapped.func_name = fnc.func_name + return _wrapped diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 3d8049324..52c9c6cf9 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -18,12 +18,16 @@ from webob import exc from nova import exception +from nova import log as logging from nova import network from nova import rpc from nova.api.openstack import faults from nova.api.openstack import extensions +LOG = logging.getLogger('nova.api.openstack.contrib.floating_ips') + + def _translate_floating_ip_view(floating_ip): result = {'id': floating_ip['id'], 'ip': floating_ip['address']} @@ -97,8 +101,14 @@ class FloatingIPController(object): def delete(self, req, id): context = req.environ['nova.context'] - ip = self.network_api.get_floating_ip(context, id) + + if 'fixed_ip' in ip: + try: + self.disassociate(req, id, '') + except Exception as e: + LOG.exception(_("Error disassociating fixed_ip %s"), e) + self.network_api.release_floating_ip(context, address=ip) return {'released': { diff --git a/nova/api/openstack/contrib/hosts.py b/nova/api/openstack/contrib/hosts.py index 55e57e1a4..ecaa365b7 100644 --- a/nova/api/openstack/contrib/hosts.py +++ b/nova/api/openstack/contrib/hosts.py @@ -24,6 +24,7 @@ from nova import log as logging from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults +from nova.api.openstack.contrib import admin_only from nova.scheduler import api as scheduler_api @@ -70,7 +71,7 @@ class HostController(object): key = raw_key.lower().strip() val = raw_val.lower().strip() # NOTE: (dabo) Right now only 'status' can be set, but other - # actions may follow. + # settings may follow. if key == "status": if val[:6] in ("enable", "disabl"): return self._set_enabled_status(req, id, @@ -89,8 +90,30 @@ class HostController(object): LOG.audit(_("Setting host %(host)s to %(state)s.") % locals()) result = self.compute_api.set_host_enabled(context, host=host, enabled=enabled) + if result not in ("enabled", "disabled"): + # An error message was returned + raise webob.exc.HTTPBadRequest(explanation=result) return {"host": host, "status": result} + def _host_power_action(self, req, host, action): + """Reboots, shuts down or powers up the host.""" + context = req.environ['nova.context'] + try: + result = self.compute_api.host_power_action(context, host=host, + action=action) + except NotImplementedError as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + return {"host": host, "power_action": result} + + def startup(self, req, id): + return self._host_power_action(req, host=id, action="startup") + + def shutdown(self, req, id): + return self._host_power_action(req, host=id, action="shutdown") + + def reboot(self, req, id): + return self._host_power_action(req, host=id, action="reboot") + class Hosts(extensions.ExtensionDescriptor): def get_name(self): @@ -108,7 +131,10 @@ class Hosts(extensions.ExtensionDescriptor): def get_updated(self): return "2011-06-29T00:00:00+00:00" + @admin_only.admin_only def get_resources(self): - resources = [extensions.ResourceExtension('os-hosts', HostController(), - collection_actions={'update': 'PUT'}, member_actions={})] + resources = [extensions.ResourceExtension('os-hosts', + HostController(), collection_actions={'update': 'PUT'}, + member_actions={"startup": "GET", "shutdown": "GET", + "reboot": "GET"})] return resources diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index dbf922dbb..8daf12343 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -23,7 +23,7 @@ import sys import routes import webob.dec import webob.exc -from xml.etree import ElementTree +from lxml import etree from nova import exception from nova import flags @@ -32,6 +32,7 @@ from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil LOG = logging.getLogger('extensions') @@ -478,36 +479,38 @@ class ResourceExtension(object): class ExtensionsXMLSerializer(wsgi.XMLDictSerializer): + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def show(self, ext_dict): - ext = self._create_ext_elem(ext_dict['extension']) + ext = etree.Element('extension', nsmap=self.NSMAP) + self._populate_ext(ext, ext_dict['extension']) return self._to_xml(ext) def index(self, exts_dict): - exts = ElementTree.Element('extensions') + exts = etree.Element('extensions', nsmap=self.NSMAP) for ext_dict in exts_dict['extensions']: - exts.append(self._create_ext_elem(ext_dict)) + ext = etree.SubElement(exts, 'extension') + self._populate_ext(ext, ext_dict) return self._to_xml(exts) - def _create_ext_elem(self, ext_dict): - """Create an extension xml element from a dict.""" - ext_elem = ElementTree.Element('extension') + def _populate_ext(self, ext_elem, ext_dict): + """Populate an extension xml element from a dict.""" + ext_elem.set('name', ext_dict['name']) ext_elem.set('namespace', ext_dict['namespace']) ext_elem.set('alias', ext_dict['alias']) ext_elem.set('updated', ext_dict['updated']) - desc = ElementTree.Element('description') + desc = etree.Element('description') desc.text = ext_dict['description'] ext_elem.append(desc) for link in ext_dict.get('links', []): - elem = ElementTree.Element('atom:link') + elem = etree.SubElement(ext_elem, '{%s}link' % xmlutil.XMLNS_ATOM) elem.set('rel', link['rel']) elem.set('href', link['href']) elem.set('type', link['type']) - ext_elem.append(elem) return ext_elem def _to_xml(self, root): - """Convert the xml tree object to an xml string.""" - root.set('xmlns', wsgi.XMLNS_V11) - root.set('xmlns:atom', wsgi.XMLNS_ATOM) - return ElementTree.tostring(root, encoding='UTF-8') + """Convert the xml object to an xml string.""" + + return etree.tostring(root, encoding='UTF-8') diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index aaf64a123..4d615ea96 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -19,7 +19,6 @@ from webob import exc from nova import flags from nova import image -from nova import quota from nova import utils from nova.api.openstack import common from nova.api.openstack import wsgi @@ -40,15 +39,6 @@ class Controller(object): metadata = image.get('properties', {}) return metadata - def _check_quota_limit(self, context, metadata): - if metadata is None: - return - num_metadata = len(metadata) - quota_metadata = quota.allowed_metadata_items(context, num_metadata) - if quota_metadata < num_metadata: - expl = _("Image metadata limit exceeded") - raise exc.HTTPBadRequest(explanation=expl) - def index(self, req, image_id): """Returns the list of metadata for a given instance""" context = req.environ['nova.context'] @@ -70,7 +60,7 @@ class Controller(object): if 'metadata' in body: for key, value in body['metadata'].iteritems(): metadata[key] = value - self._check_quota_limit(context, metadata) + common.check_img_metadata_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) return dict(metadata=metadata) @@ -93,7 +83,7 @@ class Controller(object): img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id, img) metadata[id] = meta[id] - self._check_quota_limit(context, metadata) + common.check_img_metadata_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) return dict(meta=meta) @@ -102,7 +92,7 @@ class Controller(object): context = req.environ['nova.context'] img = self.image_service.show(context, image_id) metadata = body.get('metadata', {}) - self._check_quota_limit(context, metadata) + common.check_img_metadata_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) return dict(metadata=metadata) diff --git a/nova/api/openstack/schemas/atom-link.rng b/nova/api/openstack/schemas/atom-link.rng new file mode 100644 index 000000000..edba5eee6 --- /dev/null +++ b/nova/api/openstack/schemas/atom-link.rng @@ -0,0 +1,141 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + -*- rnc -*- + RELAX NG Compact Syntax Grammar for the + Atom Format Specification Version 11 +--> +<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"> + <start> + <choice> + <ref name="atomLink"/> + </choice> + </start> + <!-- Common attributes --> + <define name="atomCommonAttributes"> + <optional> + <attribute name="xml:base"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="xml:lang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <zeroOrMore> + <ref name="undefinedAttribute"/> + </zeroOrMore> + </define> + <!-- atom:link --> + <define name="atomLink"> + <element name="atom:link"> + <ref name="atomCommonAttributes"/> + <attribute name="href"> + <ref name="atomUri"/> + </attribute> + <optional> + <attribute name="rel"> + <choice> + <ref name="atomNCName"/> + <ref name="atomUri"/> + </choice> + </attribute> + </optional> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <optional> + <attribute name="hreflang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <optional> + <attribute name="title"/> + </optional> + <optional> + <attribute name="length"/> + </optional> + <ref name="undefinedContent"/> + </element> + </define> + <!-- Low-level simple types --> + <define name="atomNCName"> + <data type="string"> + <param name="minLength">1</param> + <param name="pattern">[^:]*</param> + </data> + </define> + <!-- Whatever a media type is, it contains at least one slash --> + <define name="atomMediaType"> + <data type="string"> + <param name="pattern">.+/.+</param> + </data> + </define> + <!-- As defined in RFC 3066 --> + <define name="atomLanguageTag"> + <data type="string"> + <param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param> + </data> + </define> + <!-- + Unconstrained; it's not entirely clear how IRI fit into + xsd:anyURI so let's not try to constrain it here + --> + <define name="atomUri"> + <text/> + </define> + <!-- Other Extensibility --> + <define name="undefinedAttribute"> + <attribute> + <anyName> + <except> + <name>xml:base</name> + <name>xml:lang</name> + <nsName ns=""/> + </except> + </anyName> + </attribute> + </define> + <define name="undefinedContent"> + <zeroOrMore> + <choice> + <text/> + <ref name="anyForeignElement"/> + </choice> + </zeroOrMore> + </define> + <define name="anyElement"> + <element> + <anyName/> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="anyForeignElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> +</grammar> diff --git a/nova/api/openstack/schemas/atom.rng b/nova/api/openstack/schemas/atom.rng new file mode 100644 index 000000000..c2df4e410 --- /dev/null +++ b/nova/api/openstack/schemas/atom.rng @@ -0,0 +1,597 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + -*- rnc -*- + RELAX NG Compact Syntax Grammar for the + Atom Format Specification Version 11 +--> +<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"> + <start> + <choice> + <ref name="atomFeed"/> + <ref name="atomEntry"/> + </choice> + </start> + <!-- Common attributes --> + <define name="atomCommonAttributes"> + <optional> + <attribute name="xml:base"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="xml:lang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <zeroOrMore> + <ref name="undefinedAttribute"/> + </zeroOrMore> + </define> + <!-- Text Constructs --> + <define name="atomPlainTextConstruct"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <choice> + <value>text</value> + <value>html</value> + </choice> + </attribute> + </optional> + <text/> + </define> + <define name="atomXHTMLTextConstruct"> + <ref name="atomCommonAttributes"/> + <attribute name="type"> + <value>xhtml</value> + </attribute> + <ref name="xhtmlDiv"/> + </define> + <define name="atomTextConstruct"> + <choice> + <ref name="atomPlainTextConstruct"/> + <ref name="atomXHTMLTextConstruct"/> + </choice> + </define> + <!-- Person Construct --> + <define name="atomPersonConstruct"> + <ref name="atomCommonAttributes"/> + <interleave> + <element name="atom:name"> + <text/> + </element> + <optional> + <element name="atom:uri"> + <ref name="atomUri"/> + </element> + </optional> + <optional> + <element name="atom:email"> + <ref name="atomEmailAddress"/> + </element> + </optional> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + </define> + <!-- Date Construct --> + <define name="atomDateConstruct"> + <ref name="atomCommonAttributes"/> + <data type="dateTime"/> + </define> + <!-- atom:feed --> + <define name="atomFeed"> + <element name="atom:feed"> + <s:rule context="atom:feed"> + <s:assert test="atom:author or not(atom:entry[not(atom:author)])">An atom:feed must have an atom:author unless all of its atom:entry children have an atom:author.</s:assert> + </s:rule> + <ref name="atomCommonAttributes"/> + <interleave> + <zeroOrMore> + <ref name="atomAuthor"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomCategory"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomContributor"/> + </zeroOrMore> + <optional> + <ref name="atomGenerator"/> + </optional> + <optional> + <ref name="atomIcon"/> + </optional> + <ref name="atomId"/> + <zeroOrMore> + <ref name="atomLink"/> + </zeroOrMore> + <optional> + <ref name="atomLogo"/> + </optional> + <optional> + <ref name="atomRights"/> + </optional> + <optional> + <ref name="atomSubtitle"/> + </optional> + <ref name="atomTitle"/> + <ref name="atomUpdated"/> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + <zeroOrMore> + <ref name="atomEntry"/> + </zeroOrMore> + </element> + </define> + <!-- atom:entry --> + <define name="atomEntry"> + <element name="atom:entry"> + <s:rule context="atom:entry"> + <s:assert test="atom:link[@rel='alternate'] or atom:link[not(@rel)] or atom:content">An atom:entry must have at least one atom:link element with a rel attribute of 'alternate' or an atom:content.</s:assert> + </s:rule> + <s:rule context="atom:entry"> + <s:assert test="atom:author or ../atom:author or atom:source/atom:author">An atom:entry must have an atom:author if its feed does not.</s:assert> + </s:rule> + <ref name="atomCommonAttributes"/> + <interleave> + <zeroOrMore> + <ref name="atomAuthor"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomCategory"/> + </zeroOrMore> + <optional> + <ref name="atomContent"/> + </optional> + <zeroOrMore> + <ref name="atomContributor"/> + </zeroOrMore> + <ref name="atomId"/> + <zeroOrMore> + <ref name="atomLink"/> + </zeroOrMore> + <optional> + <ref name="atomPublished"/> + </optional> + <optional> + <ref name="atomRights"/> + </optional> + <optional> + <ref name="atomSource"/> + </optional> + <optional> + <ref name="atomSummary"/> + </optional> + <ref name="atomTitle"/> + <ref name="atomUpdated"/> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + </element> + </define> + <!-- atom:content --> + <define name="atomInlineTextContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <choice> + <value>text</value> + <value>html</value> + </choice> + </attribute> + </optional> + <zeroOrMore> + <text/> + </zeroOrMore> + </element> + </define> + <define name="atomInlineXHTMLContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <attribute name="type"> + <value>xhtml</value> + </attribute> + <ref name="xhtmlDiv"/> + </element> + </define> + <define name="atomInlineOtherContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <zeroOrMore> + <choice> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="atomOutOfLineContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <attribute name="src"> + <ref name="atomUri"/> + </attribute> + <empty/> + </element> + </define> + <define name="atomContent"> + <choice> + <ref name="atomInlineTextContent"/> + <ref name="atomInlineXHTMLContent"/> + <ref name="atomInlineOtherContent"/> + <ref name="atomOutOfLineContent"/> + </choice> + </define> + <!-- atom:author --> + <define name="atomAuthor"> + <element name="atom:author"> + <ref name="atomPersonConstruct"/> + </element> + </define> + <!-- atom:category --> + <define name="atomCategory"> + <element name="atom:category"> + <ref name="atomCommonAttributes"/> + <attribute name="term"/> + <optional> + <attribute name="scheme"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="label"/> + </optional> + <ref name="undefinedContent"/> + </element> + </define> + <!-- atom:contributor --> + <define name="atomContributor"> + <element name="atom:contributor"> + <ref name="atomPersonConstruct"/> + </element> + </define> + <!-- atom:generator --> + <define name="atomGenerator"> + <element name="atom:generator"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="uri"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="version"/> + </optional> + <text/> + </element> + </define> + <!-- atom:icon --> + <define name="atomIcon"> + <element name="atom:icon"> + <ref name="atomCommonAttributes"/> + <ref name="atomUri"/> + </element> + </define> + <!-- atom:id --> + <define name="atomId"> + <element name="atom:id"> + <ref name="atomCommonAttributes"/> + <ref name="atomUri"/> + </element> + </define> + <!-- atom:logo --> + <define name="atomLogo"> + <element name="atom:logo"> + <ref name="atomCommonAttributes"/> + <ref name="atomUri"/> + </element> + </define> + <!-- atom:link --> + <define name="atomLink"> + <element name="atom:link"> + <ref name="atomCommonAttributes"/> + <attribute name="href"> + <ref name="atomUri"/> + </attribute> + <optional> + <attribute name="rel"> + <choice> + <ref name="atomNCName"/> + <ref name="atomUri"/> + </choice> + </attribute> + </optional> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <optional> + <attribute name="hreflang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <optional> + <attribute name="title"/> + </optional> + <optional> + <attribute name="length"/> + </optional> + <ref name="undefinedContent"/> + </element> + </define> + <!-- atom:published --> + <define name="atomPublished"> + <element name="atom:published"> + <ref name="atomDateConstruct"/> + </element> + </define> + <!-- atom:rights --> + <define name="atomRights"> + <element name="atom:rights"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:source --> + <define name="atomSource"> + <element name="atom:source"> + <ref name="atomCommonAttributes"/> + <interleave> + <zeroOrMore> + <ref name="atomAuthor"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomCategory"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomContributor"/> + </zeroOrMore> + <optional> + <ref name="atomGenerator"/> + </optional> + <optional> + <ref name="atomIcon"/> + </optional> + <optional> + <ref name="atomId"/> + </optional> + <zeroOrMore> + <ref name="atomLink"/> + </zeroOrMore> + <optional> + <ref name="atomLogo"/> + </optional> + <optional> + <ref name="atomRights"/> + </optional> + <optional> + <ref name="atomSubtitle"/> + </optional> + <optional> + <ref name="atomTitle"/> + </optional> + <optional> + <ref name="atomUpdated"/> + </optional> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + </element> + </define> + <!-- atom:subtitle --> + <define name="atomSubtitle"> + <element name="atom:subtitle"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:summary --> + <define name="atomSummary"> + <element name="atom:summary"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:title --> + <define name="atomTitle"> + <element name="atom:title"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:updated --> + <define name="atomUpdated"> + <element name="atom:updated"> + <ref name="atomDateConstruct"/> + </element> + </define> + <!-- Low-level simple types --> + <define name="atomNCName"> + <data type="string"> + <param name="minLength">1</param> + <param name="pattern">[^:]*</param> + </data> + </define> + <!-- Whatever a media type is, it contains at least one slash --> + <define name="atomMediaType"> + <data type="string"> + <param name="pattern">.+/.+</param> + </data> + </define> + <!-- As defined in RFC 3066 --> + <define name="atomLanguageTag"> + <data type="string"> + <param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param> + </data> + </define> + <!-- + Unconstrained; it's not entirely clear how IRI fit into + xsd:anyURI so let's not try to constrain it here + --> + <define name="atomUri"> + <text/> + </define> + <!-- Whatever an email address is, it contains at least one @ --> + <define name="atomEmailAddress"> + <data type="string"> + <param name="pattern">.+@.+</param> + </data> + </define> + <!-- Simple Extension --> + <define name="simpleExtensionElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <text/> + </element> + </define> + <!-- Structured Extension --> + <define name="structuredExtensionElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <choice> + <group> + <oneOrMore> + <attribute> + <anyName/> + </attribute> + </oneOrMore> + <zeroOrMore> + <choice> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </group> + <group> + <zeroOrMore> + <attribute> + <anyName/> + </attribute> + </zeroOrMore> + <group> + <optional> + <text/> + </optional> + <oneOrMore> + <ref name="anyElement"/> + </oneOrMore> + <zeroOrMore> + <choice> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </group> + </group> + </choice> + </element> + </define> + <!-- Other Extensibility --> + <define name="extensionElement"> + <choice> + <ref name="simpleExtensionElement"/> + <ref name="structuredExtensionElement"/> + </choice> + </define> + <define name="undefinedAttribute"> + <attribute> + <anyName> + <except> + <name>xml:base</name> + <name>xml:lang</name> + <nsName ns=""/> + </except> + </anyName> + </attribute> + </define> + <define name="undefinedContent"> + <zeroOrMore> + <choice> + <text/> + <ref name="anyForeignElement"/> + </choice> + </zeroOrMore> + </define> + <define name="anyElement"> + <element> + <anyName/> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="anyForeignElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <!-- XHTML --> + <define name="anyXHTML"> + <element> + <nsName ns="http://www.w3.org/1999/xhtml"/> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyXHTML"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="xhtmlDiv"> + <element name="xhtml:div"> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyXHTML"/> + </choice> + </zeroOrMore> + </element> + </define> +</grammar> diff --git a/nova/api/openstack/schemas/v1.1/extension.rng b/nova/api/openstack/schemas/v1.1/extension.rng new file mode 100644 index 000000000..336659755 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/extension.rng @@ -0,0 +1,11 @@ +<element name="extension" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <attribute name="alias"> <text/> </attribute> + <attribute name="name"> <text/> </attribute> + <attribute name="namespace"> <text/> </attribute> + <attribute name="updated"> <text/> </attribute> + <element name="description"> <text/> </element> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/extensions.rng b/nova/api/openstack/schemas/v1.1/extensions.rng new file mode 100644 index 000000000..4d8bff646 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/extensions.rng @@ -0,0 +1,6 @@ +<element name="extensions" xmlns="http://relaxng.org/ns/structure/1.0" + ns="http://docs.openstack.org/compute/api/v1.1"> + <zeroOrMore> + <externalRef href="extension.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index b0b014f86..2b235f79a 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -57,18 +57,12 @@ class Controller(object): context = req.environ['nova.context'] - try: - self.compute_api.update_or_create_instance_metadata(context, - server_id, - metadata) - except exception.InstanceNotFound: - msg = _('Server does not exist') - raise exc.HTTPNotFound(explanation=msg) + new_metadata = self._update_instance_metadata(context, + server_id, + metadata, + delete=False) - except quota.QuotaError as error: - self._handle_quota_error(error) - - return body + return {'metadata': new_metadata} def update(self, req, server_id, id, body): try: @@ -78,19 +72,22 @@ class Controller(object): raise exc.HTTPBadRequest(explanation=expl) try: - meta_value = meta_item.pop(id) + meta_value = meta_item[id] except (AttributeError, KeyError): expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) - if len(meta_item) > 0: + if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['nova.context'] - self._set_instance_metadata(context, server_id, meta_item) + self._update_instance_metadata(context, + server_id, + meta_item, + delete=False) - return {'meta': {id: meta_value}} + return {'meta': meta_item} def update_all(self, req, server_id, body): try: @@ -100,20 +97,26 @@ class Controller(object): raise exc.HTTPBadRequest(explanation=expl) context = req.environ['nova.context'] - self._set_instance_metadata(context, server_id, metadata) + new_metadata = self._update_instance_metadata(context, + server_id, + metadata, + delete=True) - return {'metadata': metadata} + return {'metadata': new_metadata} - def _set_instance_metadata(self, context, server_id, metadata): + def _update_instance_metadata(self, context, server_id, metadata, + delete=False): try: - self.compute_api.update_or_create_instance_metadata(context, - server_id, - metadata) + return self.compute_api.update_instance_metadata(context, + server_id, + metadata, + delete) + except exception.InstanceNotFound: msg = _('Server does not exist') raise exc.HTTPNotFound(explanation=msg) - except ValueError: + except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) @@ -138,12 +141,12 @@ class Controller(object): metadata = self._get_metadata(context, server_id) try: - meta_key = metadata[id] + meta_value = metadata[id] except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) - self.compute_api.delete_instance_metadata(context, server_id, meta_key) + self.compute_api.delete_instance_metadata(context, server_id, id) def _handle_quota_error(self, error): """Reraise quota errors as api-specific http exceptions.""" diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index d7c4e3018..77a304941 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -44,7 +44,7 @@ FLAGS = flags.FLAGS class Controller(object): - """ The Server API controller for the OpenStack API """ + """ The Server API base controller class for the OpenStack API """ def __init__(self): self.compute_api = compute.API() @@ -53,17 +53,21 @@ class Controller(object): def index(self, req): """ Returns a list of server names and ids for a given user """ try: - servers = self._items(req, is_detail=False) + servers = self._get_servers(req, is_detail=False) except exception.Invalid as err: return exc.HTTPBadRequest(explanation=str(err)) + except exception.NotFound: + return exc.HTTPNotFound() return servers def detail(self, req): """ Returns a list of server details for a given user """ try: - servers = self._items(req, is_detail=True) + servers = self._get_servers(req, is_detail=True) except exception.Invalid as err: return exc.HTTPBadRequest(explanation=str(err)) + except exception.NotFound as err: + return exc.HTTPNotFound() return servers def _build_view(self, req, instance, is_detail=False): @@ -75,22 +79,55 @@ class Controller(object): def _action_rebuild(self, info, request, instance_id): raise NotImplementedError() - def _items(self, req, is_detail): - """Returns a list of servers for a given user. - - builder - the response model builder + def _get_servers(self, req, is_detail): + """Returns a list of servers, taking into account any search + options specified. """ - query_str = req.str_GET - reservation_id = query_str.get('reservation_id') - project_id = query_str.get('project_id') - fixed_ip = query_str.get('fixed_ip') - recurse_zones = utils.bool_from_str(query_str.get('recurse_zones')) + + search_opts = {} + search_opts.update(req.str_GET) + + context = req.environ['nova.context'] + remove_invalid_options(context, search_opts, + self._get_server_search_options()) + + # Convert recurse_zones into a boolean + search_opts['recurse_zones'] = utils.bool_from_str( + search_opts.get('recurse_zones', False)) + + # If search by 'status', we need to convert it to 'state' + # If the status is unknown, bail. + # Leave 'state' in search_opts so compute can pass it on to + # child zones.. + if 'status' in search_opts: + status = search_opts['status'] + search_opts['state'] = common.power_states_from_status(status) + if len(search_opts['state']) == 0: + reason = _('Invalid server status: %(status)s') % locals() + LOG.error(reason) + raise exception.InvalidInput(reason=reason) + + # By default, compute's get_all() will return deleted instances. + # If an admin hasn't specified a 'deleted' search option, we need + # to filter out deleted instances by setting the filter ourselves. + # ... Unless 'changes-since' is specified, because 'changes-since' + # should return recently deleted images according to the API spec. + + if 'deleted' not in search_opts: + # Admin hasn't specified deleted filter + if 'changes-since' not in search_opts: + # No 'changes-since', so we need to find non-deleted servers + search_opts['deleted'] = False + else: + # This is the default, but just in case.. + search_opts['deleted'] = True + instance_list = self.compute_api.get_all( - req.environ['nova.context'], - reservation_id=reservation_id, - project_id=project_id, - fixed_ip=fixed_ip, - recurse_zones=recurse_zones) + context, search_opts=search_opts) + + # FIXME(comstud): 'changes-since' is not fully implemented. Where + # should this be filtered? + limited_list = self._limit_items(instance_list, req) servers = [self._build_view(req, inst, is_detail)['server'] for inst in limited_list] @@ -218,13 +255,14 @@ class Controller(object): props = {'instance_ref': server_ref} metadata = entity.get('metadata', {}) + context = req.environ["nova.context"] + common.check_img_metadata_quota_limit(context, metadata) try: props.update(metadata) except ValueError: msg = _("Invalid metadata") raise webob.exc.HTTPBadRequest(explanation=msg) - context = req.environ["nova.context"] image = self.compute_api.backup(context, instance_id, image_name, @@ -505,6 +543,7 @@ class Controller(object): class ControllerV10(Controller): + """v1.0 OpenStack API controller""" @scheduler_api.redirect_handler def delete(self, req, id): @@ -567,8 +606,13 @@ class ControllerV10(Controller): """ Determine the admin password for a server on creation """ return self.helper._get_server_admin_password_old_style(server) + def _get_server_search_options(self): + """Return server search options allowed by non-admin""" + return 'reservation_id', 'fixed_ip', 'name', 'recurse_zones' + class ControllerV11(Controller): + """v1.1 OpenStack API controller""" @scheduler_api.redirect_handler def delete(self, req, id): @@ -713,13 +757,14 @@ class ControllerV11(Controller): props = {'instance_ref': server_ref} metadata = entity.get('metadata', {}) + context = req.environ['nova.context'] + common.check_img_metadata_quota_limit(context, metadata) try: props.update(metadata) except ValueError: msg = _("Invalid metadata") raise webob.exc.HTTPBadRequest(explanation=msg) - context = req.environ['nova.context'] image = self.compute_api.snapshot(context, instance_id, image_name, @@ -740,9 +785,17 @@ class ControllerV11(Controller): """ Determine the admin password for a server on creation """ return self.helper._get_server_admin_password_new_style(server) + def _get_server_search_options(self): + """Return server search options allowed by non-admin""" + return ('reservation_id', 'name', 'recurse_zones', + 'status', 'image', 'flavor', 'changes-since') + class HeadersSerializer(wsgi.ResponseHeadersSerializer): + def create(self, response, data): + response.status_int = 202 + def delete(self, response, data): response.status_int = 204 @@ -920,3 +973,18 @@ def _get_metadata(): }, } return metadata + + +def remove_invalid_options(context, search_options, allowed_search_options): + """Remove search options that are not valid for non-admin API/context""" + if FLAGS.allow_admin_api and context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in search_options + if opt not in allowed_search_options] + unk_opt_str = ", ".join(unknown_options) + log_msg = _("Removing options '%(unk_opt_str)s' from query") % locals() + LOG.debug(log_msg) + for opt in unknown_options: + search_options.pop(opt, None) diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 873ce212a..912303d14 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -77,7 +77,9 @@ class ViewBuilder(object): "status": image_obj.get("status"), }) - if image["status"] == "SAVING": + if image["status"].upper() == "ACTIVE": + image["progress"] = 100 + else: image["progress"] = 0 return image diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 2873a8e0f..8222f6766 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -20,7 +20,6 @@ import hashlib import os from nova import exception -from nova.compute import power_state import nova.compute import nova.context from nova.api.openstack import common @@ -61,24 +60,11 @@ class ViewBuilder(object): def _build_detail(self, inst): """Returns a detailed model of a server.""" - power_mapping = { - None: 'BUILD', - power_state.NOSTATE: 'BUILD', - power_state.RUNNING: 'ACTIVE', - power_state.BLOCKED: 'ACTIVE', - power_state.SUSPENDED: 'SUSPENDED', - power_state.PAUSED: 'PAUSED', - power_state.SHUTDOWN: 'SHUTDOWN', - power_state.SHUTOFF: 'SHUTOFF', - power_state.CRASHED: 'ERROR', - power_state.FAILED: 'ERROR', - power_state.BUILDING: 'BUILD', - } inst_dict = { 'id': inst['id'], 'name': inst['display_name'], - 'status': power_mapping[inst.get('state')]} + 'status': common.status_from_power_state(inst.get('state'))} ctxt = nova.context.get_admin_context() compute_api = nova.compute.API() diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py new file mode 100644 index 000000000..97ad90ada --- /dev/null +++ b/nova/api/openstack/xmlutil.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path + +from lxml import etree + +from nova import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + + +def validate_schema(xml, schema_name): + if type(xml) is str: + xml = etree.fromstring(xml) + schema_path = os.path.join(utils.novadir(), + 'nova/api/openstack/schemas/v1.1/%s.rng' % schema_name) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index f7fd87bcd..a2bf267ed 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -166,7 +166,7 @@ class Controller(object): return self.helper._get_server_admin_password_old_style(server) -class ControllerV11(object): +class ControllerV11(Controller): """Controller for 1.1 Zone resources.""" def _get_server_admin_password(self, server): diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index d05c099d7..978ffb210 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -16,3 +16,4 @@ export NOVA_API_KEY="%(access)s" export NOVA_USERNAME="%(user)s" export NOVA_PROJECT_ID="%(project)s" export NOVA_URL="%(os)s" +export NOVA_VERSION="1.1" diff --git a/nova/block_device.py b/nova/block_device.py new file mode 100644 index 000000000..8d95e0029 --- /dev/null +++ b/nova/block_device.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp> +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + + +def properties_root_device_name(properties): + """get root device name from image meta data. + If it isn't specified, return None. + """ + root_device_name = None + + # NOTE(yamahata): see image_service.s3.s3create() + for bdm in properties.get('mappings', []): + if bdm['virtual'] == 'root': + root_device_name = bdm['device'] + + # NOTE(yamahata): register_image's command line can override + # <machine>.manifest.xml + if 'root_device_name' in properties: + root_device_name = properties['root_device_name'] + + return root_device_name + + +_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$') + + +def is_ephemeral(device_name): + return _ephemeral.match(device_name) + + +def ephemeral_num(ephemeral_name): + assert is_ephemeral(ephemeral_name) + return int(_ephemeral.sub('\\1', ephemeral_name)) + + +def is_swap_or_ephemeral(device_name): + return device_name == 'swap' or is_ephemeral(device_name) + + +def mappings_prepend_dev(mappings): + """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type""" + for m in mappings: + virtual = m['virtual'] + if (is_swap_or_ephemeral(virtual) and + (not m['device'].startswith('/'))): + m['device'] = '/dev/' + m['device'] + return mappings + + +_dev = re.compile('^/dev/') + + +def strip_dev(device_name): + """remove leading '/dev/'""" + return _dev.sub('', device_name) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4f91b60b0..410a48613 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -19,9 +19,11 @@ """Handles all requests relating to instances (guest vms).""" import eventlet +import novaclient import re import time +from nova import block_device from nova import db from nova import exception from nova import flags @@ -32,7 +34,6 @@ from nova import quota from nova import rpc from nova import utils from nova import volume -from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.compute import power_state from nova.compute.utils import terminate_volumes @@ -229,7 +230,7 @@ class API(base.Base): if reservation_id is None: reservation_id = utils.generate_uid('r') - root_device_name = ec2utils.properties_root_device_name( + root_device_name = block_device.properties_root_device_name( image['properties']) base_options = { @@ -261,34 +262,64 @@ class API(base.Base): return (num_instances, base_options, image) - def _update_image_block_device_mapping(self, elevated_context, instance_id, + @staticmethod + def _ephemeral_size(instance_type, ephemeral_name): + num = block_device.ephemeral_num(ephemeral_name) + + # TODO(yamahata): ephemeralN where N > 0 + # Only ephemeral0 is allowed for now because InstanceTypes + # table only allows single local disk, local_gb. + # In order to enhance it, we need to add a new columns to + # instance_types table. + if num > 0: + return 0 + + return instance_type.get('local_gb') + + def _update_image_block_device_mapping(self, elevated_context, + instance_type, instance_id, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping """ - for bdm in ec2utils.mappings_prepend_dev(mappings): + instance_type = (instance_type or + instance_types.get_default_instance_type()) + + for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug(_("bdm %s"), bdm) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue - assert (virtual_name == 'swap' or - virtual_name.startswith('ephemeral')) + if not block_device.is_swap_or_ephemeral(virtual_name): + continue + + size = 0 + if virtual_name == 'swap': + size = instance_type.get('swap', 0) + elif block_device.is_ephemeral(virtual_name): + size = self._ephemeral_size(instance_type, virtual_name) + + if size == 0: + continue + values = { 'instance_id': instance_id, 'device_name': bdm['device'], - 'virtual_name': virtual_name, } + 'virtual_name': virtual_name, + 'volume_size': size} self.db.block_device_mapping_update_or_create(elevated_context, values) - def _update_block_device_mapping(self, elevated_context, instance_id, + def _update_block_device_mapping(self, elevated_context, + instance_type, instance_id, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping """ + LOG.debug(_("block_device_mapping %s"), block_device_mapping) for bdm in block_device_mapping: - LOG.debug(_('bdm %s'), bdm) assert 'device_name' in bdm values = {'instance_id': instance_id} @@ -297,10 +328,18 @@ class API(base.Base): 'no_device'): values[key] = bdm.get(key) + virtual_name = bdm.get('virtual_name') + if (virtual_name is not None and + block_device.is_ephemeral(virtual_name)): + size = self._ephemeral_size(instance_type, virtual_name) + if size == 0: + continue + values['volume_size'] = size + # NOTE(yamahata): NoDevice eliminates devices defined in image # files by command line option. # (--block-device-mapping) - if bdm.get('virtual_name') == 'NoDevice': + if virtual_name == 'NoDevice': values['no_device'] = True for k in ('delete_on_termination', 'volume_id', 'snapshot_id', 'volume_id', 'volume_size', @@ -310,8 +349,8 @@ class API(base.Base): self.db.block_device_mapping_update_or_create(elevated_context, values) - def create_db_entry_for_new_instance(self, context, image, base_options, - security_group, block_device_mapping, num=1): + def create_db_entry_for_new_instance(self, context, instance_type, image, + base_options, security_group, block_device_mapping, num=1): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). @@ -344,12 +383,12 @@ class API(base.Base): security_group_id) # BlockDeviceMapping table - self._update_image_block_device_mapping(elevated, instance_id, - image['properties'].get('mappings', [])) - self._update_block_device_mapping(elevated, instance_id, + self._update_image_block_device_mapping(elevated, instance_type, + instance_id, image['properties'].get('mappings', [])) + self._update_block_device_mapping(elevated, instance_type, instance_id, image['properties'].get('block_device_mapping', [])) # override via command line option - self._update_block_device_mapping(elevated, instance_id, + self._update_block_device_mapping(elevated, instance_type, instance_id, block_device_mapping) # Set sane defaults if not specified @@ -470,7 +509,8 @@ class API(base.Base): instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): - instance = self.create_db_entry_for_new_instance(context, image, + instance = self.create_db_entry_for_new_instance(context, + instance_type, image, base_options, security_group, block_device_mapping, num=num) instances.append(instance) @@ -687,59 +727,84 @@ class API(base.Base): """ return self.get(context, instance_id) - def get_all(self, context, project_id=None, reservation_id=None, - fixed_ip=None, recurse_zones=False): + def get_all(self, context, search_opts=None): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. """ - if reservation_id is not None: - recurse_zones = True - instances = self.db.instance_get_all_by_reservation( - context, reservation_id) - elif fixed_ip is not None: + if search_opts is None: + search_opts = {} + + LOG.debug(_("Searching by: %s") % str(search_opts)) + + # Fixups for the DB call + filters = {} + + def _remap_flavor_filter(flavor_id): + instance_type = self.db.instance_type_get_by_flavor_id( + context, flavor_id) + filters['instance_type_id'] = instance_type['id'] + + def _remap_fixed_ip_filter(fixed_ip): + # Turn fixed_ip into a regexp match. Since '.' matches + # any character, we need to use regexp escaping for it. + filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.') + + # search_option to filter_name mapping. + filter_mapping = { + 'image': 'image_ref', + 'name': 'display_name', + 'instance_name': 'name', + 'recurse_zones': None, + 'flavor': _remap_flavor_filter, + 'fixed_ip': _remap_fixed_ip_filter} + + # copy from search_opts, doing various remappings as necessary + for opt, value in search_opts.iteritems(): + # Do remappings. + # Values not in the filter_mapping table are copied as-is. + # If remapping is None, option is not copied + # If the remapping is a string, it is the filter_name to use try: - instances = self.db.fixed_ip_get_instance(context, fixed_ip) - except exception.FloatingIpNotFound, e: - if not recurse_zones: - raise - instances = None - elif project_id or not context.is_admin: - if not context.project_id: - instances = self.db.instance_get_all_by_user( - context, context.user_id) + remap_object = filter_mapping[opt] + except KeyError: + filters[opt] = value else: - if project_id is None: - project_id = context.project_id - instances = self.db.instance_get_all_by_project( - context, project_id) - else: - instances = self.db.instance_get_all(context) + if remap_object: + if isinstance(remap_object, basestring): + filters[remap_object] = value + else: + remap_object(value) + + recurse_zones = search_opts.get('recurse_zones', False) + if 'reservation_id' in filters: + recurse_zones = True - if instances is None: - instances = [] - elif not isinstance(instances, list): - instances = [instances] + instances = self.db.instance_get_all_by_filters(context, filters) if not recurse_zones: return instances + # Recurse zones. Need admin context for this. Send along + # the un-modified search options we received.. admin_context = context.elevated() children = scheduler_api.call_zone_method(admin_context, "list", + errors_to_ignore=[novaclient.exceptions.NotFound], novaclient_collection_name="servers", - reservation_id=reservation_id, - project_id=project_id, - fixed_ip=fixed_ip, - recurse_zones=True) + search_opts=search_opts) for zone, servers in children: + # 'servers' can be None if a 404 was returned by a zone + if servers is None: + continue for server in servers: # Results are ready to send to user. No need to scrub. server._info['_is_precooked'] = True instances.append(server._info) + return instances def _cast_compute_message(self, method, context, instance_id, host=None, @@ -1011,7 +1076,12 @@ class API(base.Base): def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._call_compute_message("set_host_enabled", context, - instance_id=None, host=host, params={"enabled": enabled}) + host=host, params={"enabled": enabled}) + + def host_power_action(self, context, host, action): + """Reboots, shuts down or powers up the host.""" + return self._call_compute_message("host_power_action", context, + host=host, params={"action": action}) @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): @@ -1184,11 +1254,20 @@ class API(base.Base): """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance_id, key) - def update_or_create_instance_metadata(self, context, instance_id, - metadata): - """Updates or creates instance metadata.""" - combined_metadata = self.get_instance_metadata(context, instance_id) - combined_metadata.update(metadata) - self._check_metadata_properties_quota(context, combined_metadata) - self.db.instance_metadata_update_or_create(context, instance_id, - metadata) + def update_instance_metadata(self, context, instance_id, + metadata, delete=False): + """Updates or creates instance metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + if delete: + _metadata = metadata + else: + _metadata = self.get_instance_metadata(context, instance_id) + _metadata.update(metadata) + + self._check_metadata_properties_quota(context, _metadata) + self.db.instance_metadata_update(context, instance_id, _metadata, True) + return _metadata diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 41de1657e..c8a328325 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -45,6 +45,7 @@ import functools from eventlet import greenthread import nova.context +from nova import block_device from nova import exception from nova import flags import nova.image @@ -260,6 +261,8 @@ class ComputeManager(manager.SchedulerDependentManager): volume_api = volume.API() block_device_mapping = [] + swap = None + ephemerals = [] for bdm in self.db.block_device_mapping_get_all_by_instance( context, instance_id): LOG.debug(_("setting up bdm %s"), bdm) @@ -267,11 +270,18 @@ class ComputeManager(manager.SchedulerDependentManager): if bdm['no_device']: continue if bdm['virtual_name']: - # TODO(yamahata): - # block devices for swap and ephemeralN will be - # created by virt driver locally in compute node. - assert (bdm['virtual_name'] == 'swap' or - bdm['virtual_name'].startswith('ephemeral')) + virtual_name = bdm['virtual_name'] + device_name = bdm['device_name'] + assert block_device.is_swap_or_ephemeral(virtual_name) + if virtual_name == 'swap': + swap = {'device_name': device_name, + 'swap_size': bdm['volume_size']} + elif block_device.is_ephemeral(virtual_name): + eph = {'num': block_device.ephemeral_num(virtual_name), + 'virtual_name': virtual_name, + 'device_name': device_name, + 'size': bdm['volume_size']} + ephemerals.append(eph) continue if ((bdm['snapshot_id'] is not None) and @@ -307,7 +317,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'mount_device': bdm['device_name']}) - return block_device_mapping + return (swap, ephemerals, block_device_mapping) def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" @@ -350,13 +360,21 @@ class ComputeManager(manager.SchedulerDependentManager): # all vif creation and network injection, maybe this is correct network_info = [] - bd_mapping = self._setup_block_device_mapping(context, instance_id) + (swap, ephemerals, + block_device_mapping) = self._setup_block_device_mapping( + context, instance_id) + block_device_info = { + 'root_device_name': instance['root_device_name'], + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} # TODO(vish) check to make sure the availability zone matches self._update_state(context, instance_id, power_state.BUILDING) try: - self.driver.spawn(context, instance, network_info, bd_mapping) + self.driver.spawn(context, instance, + network_info, block_device_info) except Exception as ex: # pylint: disable=W0702 msg = _("Instance '%(instance_id)s' failed to spawn. Is " "virtualization enabled in the BIOS? Details: " @@ -960,8 +978,12 @@ class ComputeManager(manager.SchedulerDependentManager): result)) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) - def set_host_enabled(self, context, instance_id=None, host=None, - enabled=None): + def host_power_action(self, context, host=None, action=None): + """Reboots, shuts down or powers up the host.""" + return self.driver.host_power_action(host, action) + + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) + def set_host_enabled(self, context, host=None, enabled=None): """Sets the specified host's ability to accept new instances.""" return self.driver.set_host_enabled(host, enabled) diff --git a/nova/db/api.py b/nova/db/api.py index 789e9bc97..b12448ab2 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -395,15 +395,6 @@ def fixed_ip_get_by_virtual_interface(context, vif_id): return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id) -def fixed_ip_get_instance(context, address): - """Get an instance for a fixed ip by address.""" - return IMPL.fixed_ip_get_instance(context, address) - - -def fixed_ip_get_instance_v6(context, address): - return IMPL.fixed_ip_get_instance_v6(context, address) - - def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) @@ -515,6 +506,11 @@ def instance_get_all(context): return IMPL.instance_get_all(context) +def instance_get_all_by_filters(context, filters): + """Get all instances that match all filters.""" + return IMPL.instance_get_all_by_filters(context, filters) + + def instance_get_active_by_window(context, begin, end=None): """Get instances active during a certain time window.""" return IMPL.instance_get_active_by_window(context, begin, end) @@ -536,10 +532,20 @@ def instance_get_all_by_host(context, host): def instance_get_all_by_reservation(context, reservation_id): - """Get all instance belonging to a reservation.""" + """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) +def instance_get_by_fixed_ip(context, address): + """Get an instance for a fixed ip by address.""" + return IMPL.instance_get_by_fixed_ip(context, address) + + +def instance_get_by_fixed_ipv6(context, address): + """Get an instance for a fixed ip by IPv6 address.""" + return IMPL.instance_get_by_fixed_ipv6(context, address) + + def instance_get_fixed_addresses(context, instance_id): """Get the fixed ip address of an instance.""" return IMPL.instance_get_fixed_addresses(context, instance_id) @@ -1413,9 +1419,9 @@ def instance_metadata_delete(context, instance_id, key): IMPL.instance_metadata_delete(context, instance_id, key) -def instance_metadata_update_or_create(context, instance_id, metadata): - """Create or update instance metadata.""" - IMPL.instance_metadata_update_or_create(context, instance_id, metadata) +def instance_metadata_update(context, instance_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 20c717255..fafea8ca0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -18,8 +18,10 @@ """ Implementation of SQLAlchemy backend. """ +import re import warnings +from nova import block_device from nova import db from nova import exception from nova import flags @@ -882,28 +884,6 @@ def fixed_ip_get_by_virtual_interface(context, vif_id): return rv -@require_context -def fixed_ip_get_instance(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - return fixed_ip_ref.instance - - -@require_context -def fixed_ip_get_instance_v6(context, address): - session = get_session() - - # convert IPv6 address to mac - mac = ipv6.to_mac(address) - - # get virtual interface - vif_ref = virtual_interface_get_by_address(context, mac) - - # look up instance based on instance_id from vif row - result = session.query(models.Instance).\ - filter_by(id=vif_ref['instance_id']) - return result - - @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) @@ -1222,6 +1202,114 @@ def instance_get_all(context): all() +@require_context +def instance_get_all_by_filters(context, filters): + """Return instances that match all filters. Deleted instances + will be returned by default, unless there's a filter that says + otherwise""" + + def _regexp_filter_by_ipv6(instance, filter_re): + for interface in instance['virtual_interfaces']: + fixed_ipv6 = interface.get('fixed_ipv6') + if fixed_ipv6 and filter_re.match(fixed_ipv6): + return True + return False + + def _regexp_filter_by_ip(instance, filter_re): + for interface in instance['virtual_interfaces']: + for fixed_ip in interface['fixed_ips']: + if not fixed_ip or not fixed_ip['address']: + continue + if filter_re.match(fixed_ip['address']): + return True + for floating_ip in fixed_ip.get('floating_ips', []): + if not floating_ip or not floating_ip['address']: + continue + if filter_re.match(floating_ip['address']): + return True + return False + + def _regexp_filter_by_column(instance, filter_name, filter_re): + try: + v = getattr(instance, filter_name) + except AttributeError: + return True + if v and filter_re.match(str(v)): + return True + return False + + def _exact_match_filter(query, column, value): + """Do exact match against a column. value to match can be a list + so you can match any value in the list. + """ + if isinstance(value, list): + column_attr = getattr(models.Instance, column) + return query.filter(column_attr.in_(value)) + else: + filter_dict = {} + filter_dict[column] = value + return query.filter_by(**filter_dict) + + session = get_session() + query_prefix = session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload_all('virtual_interfaces.network')).\ + options(joinedload_all( + 'virtual_interfaces.fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')) + + # Make a copy of the filters dictionary to use going forward, as we'll + # be modifying it and we shouldn't affect the caller's use of it. + filters = filters.copy() + + if not context.is_admin: + # If we're not admin context, add appropriate filter.. + if context.project_id: + filters['project_id'] = context.project_id + else: + filters['user_id'] = context.user_id + + # Filters for exact matches that we can do along with the SQL query... + # For other filters that don't match this, we will do regexp matching + exact_match_filter_names = ['project_id', 'user_id', 'image_ref', + 'state', 'instance_type_id', 'deleted'] + + query_filters = [key for key in filters.iterkeys() + if key in exact_match_filter_names] + + for filter_name in query_filters: + # Do the matching and remove the filter from the dictionary + # so we don't try it again below.. + query_prefix = _exact_match_filter(query_prefix, filter_name, + filters.pop(filter_name)) + + instances = query_prefix.all() + + if not instances: + return [] + + # Now filter on everything else for regexp matching.. + # For filters not in the list, we'll attempt to use the filter_name + # as a column name in Instance.. + regexp_filter_funcs = {'ip6': _regexp_filter_by_ipv6, + 'ip': _regexp_filter_by_ip} + + for filter_name in filters.iterkeys(): + filter_func = regexp_filter_funcs.get(filter_name, None) + filter_re = re.compile(str(filters[filter_name])) + if filter_func: + filter_l = lambda instance: filter_func(instance, filter_re) + else: + filter_l = lambda instance: _regexp_filter_by_column(instance, + filter_name, filter_re) + instances = filter(filter_l, instances) + + return instances + + @require_admin_context def instance_get_active_by_window(context, begin, end=None): """Return instances that were continuously active over the given window""" @@ -1290,30 +1378,48 @@ def instance_get_all_by_project(context, project_id): @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() + query = session.query(models.Instance).\ + filter_by(reservation_id=reservation_id).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')) if is_admin_context(context): - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(reservation_id=reservation_id).\ - filter_by(deleted=can_read_deleted(context)).\ - all() + return query.\ + filter_by(deleted=can_read_deleted(context)).\ + all() elif is_user_context(context): - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(project_id=context.project_id).\ - filter_by(reservation_id=reservation_id).\ - filter_by(deleted=False).\ - all() + return query.\ + filter_by(project_id=context.project_id).\ + filter_by(deleted=False).\ + all() + + +@require_context +def instance_get_by_fixed_ip(context, address): + """Return instance ref by exact match of FixedIP""" + fixed_ip_ref = fixed_ip_get_by_address(context, address) + return fixed_ip_ref.instance + + +@require_context +def instance_get_by_fixed_ipv6(context, address): + """Return instance ref by exact match of IPv6""" + session = get_session() + + # convert IPv6 address to mac + mac = ipv6.to_mac(address) + + # get virtual interface + vif_ref = virtual_interface_get_by_address(context, mac) + + # look up instance based on instance_id from vif row + result = session.query(models.Instance).\ + filter_by(id=vif_ref['instance_id']) + return result @require_admin_context @@ -1355,7 +1461,7 @@ def instance_get_fixed_addresses_v6(context, instance_id): network_refs = network_get_all_by_instance(context, instance_id) # compile a list of cidr_v6 prefixes sorted by network id prefixes = [ref.cidr_v6 for ref in - sorted(network_refs, key=lambda ref: ref.id)] + sorted(network_refs, key=lambda ref: ref.id)] # get vifs associated with instance vif_refs = virtual_interface_get_by_instance(context, instance_ref.id) # compile list of the mac_addresses for vifs sorted by network id @@ -1399,9 +1505,10 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: - instance_metadata_delete_all(context, instance_id) - instance_metadata_update_or_create(context, instance_id, - values.pop('metadata')) + instance_metadata_update(context, + instance_id, + values.pop('metadata'), + delete=True) with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, @@ -1768,7 +1875,9 @@ def network_get_by_cidr(context, cidr): session = get_session() result = session.query(models.Network).\ filter(or_(models.Network.cidr == cidr, - models.Network.cidr_v6 == cidr)).first() + models.Network.cidr_v6 == cidr)).\ + filter_by(deleted=False).\ + first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) @@ -2352,6 +2461,20 @@ def block_device_mapping_update_or_create(context, values): else: result.update(values) + # NOTE(yamahata): same virtual device name can be specified multiple + # times. So delete the existing ones. + virtual_name = values['virtual_name'] + if (virtual_name is not None and + block_device.is_swap_or_ephemeral(virtual_name)): + session.query(models.BlockDeviceMapping).\ + filter_by(instance_id=values['instance_id']).\ + filter_by(virtual_name=virtual_name).\ + filter(models.BlockDeviceMapping.device_name != + values['device_name']).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + @require_context def block_device_mapping_get_all_by_instance(context, instance_id): @@ -3314,21 +3437,37 @@ def instance_metadata_get_item(context, instance_id, key, session=None): @require_context @require_instance_exists -def instance_metadata_update_or_create(context, instance_id, metadata): +def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() - original_metadata = instance_metadata_get(context, instance_id) + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = instance_metadata_get(context, instance_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = instance_metadata_get_item(context, instance_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) meta_ref = None - for key, value in metadata.iteritems(): + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.iteritems(): + + # update the value whether it exists or not + item = {"value": meta_value} + try: - meta_ref = instance_metadata_get_item(context, instance_id, key, - session) + meta_ref = instance_metadata_get_item(context, instance_id, + meta_key, session) + + # if the item doesn't exist, we also need to set key and instance_id except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() - meta_ref.update({"key": key, "value": value, - "instance_id": instance_id, - "deleted": False}) + item.update({"key": meta_key, "instance_id": instance_id}) + + meta_ref.update(item) meta_ref.save(session=session) return metadata @@ -3420,7 +3559,7 @@ def instance_type_extra_specs_delete(context, instance_type_id, key): @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, - session=None): + session=None): if not session: session = get_session() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9f4c7a0aa..939fde199 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -180,6 +180,7 @@ class Instance(BASE, NovaBase): image_ref = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) + server_name = Column(String(255)) # image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) diff --git a/nova/exception.py b/nova/exception.py index c050da58f..e3e042729 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -25,6 +25,7 @@ SHOULD include dedicated exception logging. """ from functools import wraps +import sys from nova import log as logging @@ -96,6 +97,10 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None, try: return f(*args, **kw) except Exception, e: + # Save exception since it can be clobbered during processing + # below before we can re-raise + exc_info = sys.exc_info() + if notifier: payload = dict(args=args, exception=e) payload.update(kw) @@ -122,7 +127,9 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None, LOG.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) - raise + + # re-raise original exception since it may have been clobbered + raise exc_info[0], exc_info[1], exc_info[2] return wraps(f)(wrapped) return inner @@ -206,6 +213,12 @@ class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") diff --git a/nova/flags.py b/nova/flags.py index 12c6d1356..eb6366ed9 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -317,7 +317,7 @@ DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions', DEFINE_string('osapi_host', '$my_ip', 'ip of api server') DEFINE_string('osapi_scheme', 'http', 'prefix for openstack') DEFINE_integer('osapi_port', 8774, 'OpenStack API port') -DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack') +DEFINE_string('osapi_path', '/v1.1/', 'suffix for openstack') DEFINE_integer('osapi_max_limit', 1000, 'max number of items returned in a collection response') diff --git a/nova/image/glance.py b/nova/image/glance.py index da93f0d1c..9060f6a91 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -19,7 +19,9 @@ from __future__ import absolute_import +import copy import datetime +import json import random from glance.common import exception as glance_exception @@ -223,6 +225,7 @@ class GlanceImageService(service.BaseImageService): self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) + image_meta = _convert_to_string(image_meta) try: image_meta = self.client.update_image(image_id, image_meta, data) except glance_exception.NotFound: @@ -251,11 +254,19 @@ class GlanceImageService(service.BaseImageService): pass @classmethod + def _translate_to_service(cls, image_meta): + image_meta = super(GlanceImageService, + cls)._translate_to_service(image_meta) + image_meta = _convert_to_string(image_meta) + return image_meta + + @classmethod def _translate_to_base(cls, image_meta): """Override translation to handle conversion to datetime objects.""" image_meta = service.BaseImageService._propertify_metadata( image_meta, cls.SERVICE_IMAGE_ATTRS) image_meta = _convert_timestamps_to_datetimes(image_meta) + image_meta = _convert_from_string(image_meta) return image_meta @@ -281,3 +292,38 @@ def _parse_glance_iso8601_timestamp(timestamp): raise ValueError(_('%(timestamp)s does not follow any of the ' 'signatures: %(ISO_FORMATS)s') % locals()) + + +# TODO(yamahata): use block-device-mapping extension to glance +def _json_loads(properties, attr): + prop = properties[attr] + if isinstance(prop, basestring): + properties[attr] = json.loads(prop) + + +def _json_dumps(properties, attr): + prop = properties[attr] + if not isinstance(prop, basestring): + properties[attr] = json.dumps(prop) + + +_CONVERT_PROPS = ('block_device_mapping', 'mappings') + + +def _convert(method, metadata): + metadata = copy.deepcopy(metadata) # don't touch original metadata + properties = metadata.get('properties') + if properties: + for attr in _CONVERT_PROPS: + if attr in properties: + method(properties, attr) + + return metadata + + +def _convert_from_string(metadata): + return _convert(_json_loads, metadata) + + +def _convert_to_string(metadata): + return _convert(_json_dumps, metadata) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 137b671c0..55cea5f8f 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -17,7 +17,8 @@ Handles all requests relating to schedulers. """ -import novaclient +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions from nova import db from nova import exception @@ -112,7 +113,7 @@ def _wrap_method(function, self): def _process(func, zone): """Worker stub for green thread pool. Give the worker an authenticated nova client and zone info.""" - nova = novaclient.OpenStack(zone.username, zone.password, None, + nova = novaclient.Client(zone.username, zone.password, None, zone.api_url) nova.authenticate() return func(nova, zone) @@ -132,10 +133,10 @@ def call_zone_method(context, method_name, errors_to_ignore=None, zones = db.zone_get_all(context) for zone in zones: try: - nova = novaclient.OpenStack(zone.username, zone.password, None, + nova = novaclient.Client(zone.username, zone.password, None, zone.api_url) nova.authenticate() - except novaclient.exceptions.BadRequest, e: + except novaclient_exceptions.BadRequest, e: url = zone.api_url LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") % locals()) @@ -188,7 +189,7 @@ def _issue_novaclient_command(nova, zone, collection, if method_name in ['find', 'findall']: try: return getattr(manager, method_name)(**kwargs) - except novaclient.NotFound: + except novaclient_exceptions.NotFound: url = zone.api_url LOG.debug(_("%(collection)s.%(method_name)s didn't find " "anything matching '%(kwargs)s' on '%(url)s'" % @@ -200,7 +201,7 @@ def _issue_novaclient_command(nova, zone, collection, item = args.pop(0) try: result = manager.get(item) - except novaclient.NotFound: + except novaclient_exceptions.NotFound: url = zone.api_url LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" % locals())) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index a47bf7fe7..047dafa6f 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -24,7 +24,9 @@ import operator import json import M2Crypto -import novaclient + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions from nova import crypto from nova import db @@ -118,10 +120,9 @@ class ZoneAwareScheduler(driver.Scheduler): % locals()) nova = None try: - nova = novaclient.OpenStack(zone.username, zone.password, None, - url) + nova = novaclient.Client(zone.username, zone.password, None, url) nova.authenticate() - except novaclient.exceptions.BadRequest, e: + except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " "to talk to zone at %(url)s.") % locals()) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index efdac06e1..97bdf3d44 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -18,10 +18,11 @@ ZoneManager oversees all communications with child Zones. """ import datetime -import novaclient import thread import traceback +from novaclient import v1_1 as novaclient + from eventlet import greenpool from nova import db @@ -89,8 +90,8 @@ class ZoneState(object): def _call_novaclient(zone): """Call novaclient. Broken out for testing purposes.""" - client = novaclient.OpenStack(zone.username, zone.password, None, - zone.api_url) + client = novaclient.Client(zone.username, zone.password, None, + zone.api_url) return client.zones.info()._info diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a67a28a4e..d11fbf788 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -71,14 +71,18 @@ def fake_wsgi(self, req): return self.application -def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True): +def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True, + fake_auth_context=None): if not inner_app10: inner_app10 = openstack.APIRouterV10() if not inner_app11: inner_app11 = openstack.APIRouterV11() if fake_auth: - ctxt = context.RequestContext('fake', 'fake') + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext('fake', 'fake') api10 = openstack.FaultWrapper(wsgi.InjectContext(ctxt, limits.RateLimitingMiddleware(inner_app10))) api11 = openstack.FaultWrapper(wsgi.InjectContext(ctxt, diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 6f9cbca72..41edf84b4 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -18,7 +18,7 @@ import json import os.path import webob -from xml.etree import ElementTree +from lxml import etree from nova import context from nova import test @@ -26,6 +26,7 @@ from nova.api import openstack from nova.api.openstack import extensions from nova.api.openstack import flavors from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil from nova.tests.api.openstack import fakes NS = "{http://docs.openstack.org/compute/api/v1.1}" @@ -139,7 +140,7 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(200, response.status_int) print response.body - root = ElementTree.XML(response.body) + root = etree.XML(response.body) self.assertEqual(root.tag.split('extensions')[0], NS) # Make sure we have all the extensions. @@ -155,6 +156,8 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(fox_ext.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension') + xmlutil.validate_schema(root, 'extensions') + def test_get_extension_xml(self): app = openstack.APIRouterV11() ext_midware = extensions.ExtensionMiddleware(app) @@ -162,9 +165,10 @@ class ExtensionControllerTest(test.TestCase): request.accept = "application/xml" response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) - print response.body + xml = response.body + print xml - root = ElementTree.XML(response.body) + root = etree.XML(xml) self.assertEqual(root.tag.split('extension')[0], NS) self.assertEqual(root.get('alias'), 'FOXNSOX') self.assertEqual(root.get('name'), 'Fox In Socks') @@ -174,6 +178,8 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(root.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension') + xmlutil.validate_schema(root, 'extension') + class ResourceExtensionTest(test.TestCase): @@ -353,7 +359,8 @@ class ExtensionsXMLSerializerTest(test.TestCase): } xml = serializer.serialize(data, 'show') - root = ElementTree.XML(xml) + print xml + root = etree.XML(xml) ext_dict = data['extension'] self.assertEqual(root.findtext('{0}description'.format(NS)), ext_dict['description']) @@ -367,6 +374,8 @@ class ExtensionsXMLSerializerTest(test.TestCase): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) + xmlutil.validate_schema(root, 'extension') + def test_serialize_extensions(self): serializer = extensions.ExtensionsXMLSerializer() data = { @@ -414,7 +423,7 @@ class ExtensionsXMLSerializerTest(test.TestCase): xml = serializer.serialize(data, 'index') print xml - root = ElementTree.XML(xml) + root = etree.XML(xml) ext_elems = root.findall('{0}extension'.format(NS)) self.assertEqual(len(ext_elems), 2) for i, ext_elem in enumerate(ext_elems): @@ -430,3 +439,5 @@ class ExtensionsXMLSerializerTest(test.TestCase): for i, link in enumerate(ext_dict['links']): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) + + xmlutil.validate_schema(root, 'extensions') diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 38495bbe7..383ed2e03 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -379,6 +379,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, "status": "ACTIVE", + "progress": 100, }, } @@ -402,6 +403,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, "status": "QUEUED", + "progress": 0, 'server': { 'id': 42, "links": [{ @@ -444,6 +446,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): updated="%(expected_now)s" created="%(expected_now)s" status="ACTIVE" + progress="100" xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" /> """ % (locals())) @@ -463,6 +466,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): updated="%(expected_now)s" created="%(expected_now)s" status="ACTIVE" + progress="100" xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" /> """ % (locals())) @@ -587,6 +591,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, }, { 'id': 124, @@ -594,6 +599,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', + 'progress': 0, }, { 'id': 125, @@ -608,7 +614,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'active snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, - 'status': 'ACTIVE' + 'status': 'ACTIVE', + 'progress': 100, }, { 'id': 127, @@ -616,6 +623,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', + 'progress': 0, }, { 'id': 129, @@ -623,6 +631,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, }] self.assertDictListMatch(expected, response_list) @@ -643,6 +652,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/123", @@ -662,6 +672,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', + 'progress': 0, 'server': { 'id': 42, "links": [{ @@ -723,6 +734,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, 'server': { 'id': 42, "links": [{ @@ -753,6 +765,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', + 'progress': 0, 'server': { 'id': 42, "links": [{ @@ -780,6 +793,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/129", @@ -1001,7 +1015,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_meta = json.loads(res.body)['image'] expected = {'id': 123, 'name': 'public image', 'updated': self.NOW_API_FORMAT, - 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE'} + 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100} self.assertDictMatch(image_meta, expected) def test_get_image_non_existent(self): diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index bf18bc1b0..717e11c00 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -9,6 +9,7 @@ import webob from nova import context from nova import db from nova import utils +from nova import flags from nova.api.openstack import create_instance_helper from nova.compute import instance_types from nova.compute import power_state @@ -18,6 +19,9 @@ from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes +FLAGS = flags.FLAGS + + def return_server_by_id(context, id): return _get_instance() @@ -370,6 +374,26 @@ class ServerActionsTest(test.TestCase): self.assertEqual(202, response.status_int) self.assertTrue(response.headers['Location']) + def test_create_backup_with_too_much_metadata(self): + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + 'metadata': {'123': 'asdf'}, + }, + } + for num in range(FLAGS.quota_metadata_items + 1): + body['createBackup']['metadata']['foo%i' % num] = "bar" + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + def test_create_backup_no_name(self): """Name is required for backups""" self.flags(allow_admin_api=True) @@ -809,6 +833,22 @@ class ServerActionsTestV11(test.TestCase): location = response.headers['Location'] self.assertEqual('http://localhost/v1.1/images/123', location) + def test_create_image_with_too_much_metadata(self): + body = { + 'createImage': { + 'name': 'Snapshot 1', + 'metadata': {}, + }, + } + for num in range(FLAGS.quota_metadata_items + 1): + body['createImage']['metadata']['foo%i' % num] = "bar" + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + def test_create_image_no_name(self): body = { 'createImage': {}, @@ -943,9 +983,7 @@ class TestServerActionXMLDeserializerV11(test.TestCase): flavorRef="http://localhost/flavors/3"/>""" request = self.deserializer.deserialize(serial_request, 'action') expected = { - "resize": { - "flavorRef": "http://localhost/flavors/3" - }, + "resize": {"flavorRef": "http://localhost/flavors/3"}, } self.assertEquals(request['body'], expected) diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index 08a6a062a..ec446f0f0 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -29,11 +29,11 @@ import nova.wsgi FLAGS = flags.FLAGS -def return_create_instance_metadata_max(context, server_id, metadata): +def return_create_instance_metadata_max(context, server_id, metadata, delete): return stub_max_server_metadata() -def return_create_instance_metadata(context, server_id, metadata): +def return_create_instance_metadata(context, server_id, metadata, delete): return stub_server_metadata() @@ -202,21 +202,30 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(404, res.status_int) def test_create(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'POST' req.content_type = "application/json" - expected = {"metadata": {"key1": "value1"}} - req.body = json.dumps(expected) + input = {"metadata": {"key9": "value9"}} + req.body = json.dumps(input) res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) res_dict = json.loads(res.body) - self.assertEqual(expected, res_dict) + input['metadata'].update({ + "key1": "value1", + "key2": "value2", + "key3": "value3", + }) + self.assertEqual(input, res_dict) def test_create_xml(self): - self.stubs.Set(nova.db.api, "instance_metadata_update_or_create", + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) + self.stubs.Set(nova.db.api, "instance_metadata_update", return_create_instance_metadata) req = webob.Request.blank("/v1.1/servers/1/metadata") req.method = "POST" @@ -225,22 +234,29 @@ class ServerMetaDataTest(test.TestCase): request_metadata = minidom.parseString(""" <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="key3">value3</meta> - <meta key="key2">value2</meta> - <meta key="key1">value1</meta> + <meta key="key5">value5</meta> </metadata> """.replace(" ", "").replace("\n", "")) req.body = str(request_metadata.toxml()) response = req.get_response(fakes.wsgi_app()) + expected_metadata = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="key3">value3</meta> + <meta key="key2">value2</meta> + <meta key="key1">value1</meta> + <meta key="key5">value5</meta> + </metadata> + """.replace(" ", "").replace("\n", "")) + self.assertEqual(200, response.status_int) actual_metadata = minidom.parseString(response.body) - self.assertEqual(request_metadata.toxml(), actual_metadata.toxml()) + self.assertEqual(expected_metadata.toxml(), actual_metadata.toxml()) def test_create_empty_body(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'POST' @@ -258,7 +274,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(404, res.status_int) def test_update_all(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'PUT' @@ -276,7 +292,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(expected, res_dict) def test_update_all_empty_container(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'PUT' @@ -289,7 +305,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'PUT' @@ -300,7 +316,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_all_malformed_data(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'PUT' @@ -320,7 +336,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(404, res.status_int) def test_update_item(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' @@ -334,7 +350,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(expected, res_dict) def test_update_item_xml(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata/key9') req.method = 'PUT' @@ -361,7 +377,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(404, res.status_int) def test_update_item_empty_body(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' @@ -370,7 +386,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_item_too_many_keys(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' @@ -380,7 +396,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_item_body_uri_mismatch(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/metadata/bad') req.method = 'PUT' @@ -390,7 +406,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_too_many_metadata_items_on_create(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata) data = {"metadata": {}} for num in range(FLAGS.quota_metadata_items + 1): @@ -404,7 +420,7 @@ class ServerMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_to_many_metadata_items_on_update_item(self): - self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata_max) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8dc8d5eaa..85b1d0181 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -236,7 +236,8 @@ class ServersTest(test.TestCase): fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid) - self.stubs.Set(nova.db.api, 'instance_get_all', return_servers) + self.stubs.Set(nova.db.api, 'instance_get_all_by_filters', + return_servers) self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server_by_uuid) @@ -1098,6 +1099,277 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 400) self.assertTrue(res.body.find('marker param') > -1) + def test_get_servers_with_bad_option_v1_0(self): + # 1.0 API ignores unknown options + def fake_get_all(compute_self, context, search_opts=None): + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + req = webob.Request.blank('/v1.0/servers?unknownoption=whee') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_with_bad_option_v1_1(self): + # 1.1 API also ignores unknown options + def fake_get_all(compute_self, context, search_opts=None): + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + req = webob.Request.blank('/v1.1/servers?unknownoption=whee') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_allows_image_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('image' in search_opts) + self.assertEqual(search_opts['image'], '12345') + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + self.flags(allow_admin_api=False) + + req = webob.Request.blank('/v1.1/servers?image=12345') + res = req.get_response(fakes.wsgi_app()) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_allows_flavor_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('flavor' in search_opts) + # flavor is an integer ID + self.assertEqual(search_opts['flavor'], '12345') + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + self.flags(allow_admin_api=False) + + req = webob.Request.blank('/v1.1/servers?flavor=12345') + res = req.get_response(fakes.wsgi_app()) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_allows_status_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('state' in search_opts) + self.assertEqual(set(search_opts['state']), + set([power_state.RUNNING, power_state.BLOCKED])) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + self.flags(allow_admin_api=False) + + req = webob.Request.blank('/v1.1/servers?status=active') + res = req.get_response(fakes.wsgi_app()) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_invalid_status_v1_1(self): + """Test getting servers by invalid status""" + + self.flags(allow_admin_api=False) + + req = webob.Request.blank('/v1.1/servers?status=running') + res = req.get_response(fakes.wsgi_app()) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 400) + self.assertTrue(res.body.find('Invalid server status') > -1) + + def test_get_servers_allows_name_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('name' in search_opts) + self.assertEqual(search_opts['name'], 'whee.*') + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + self.flags(allow_admin_api=False) + + req = webob.Request.blank('/v1.1/servers?name=whee.*') + res = req.get_response(fakes.wsgi_app()) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_unknown_or_admin_options1(self): + """Test getting servers by admin-only or unknown options. + This tests when admin_api is off. Make sure the admin and + unknown options are stripped before they get to + compute_api.get_all() + """ + + self.flags(allow_admin_api=False) + + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + # Allowed by user + self.assertTrue('name' in search_opts) + self.assertTrue('status' in search_opts) + # Allowed only by admins with admin API on + self.assertFalse('ip' in search_opts) + self.assertFalse('unknown_option' in search_opts) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + query_str = "name=foo&ip=10.*&status=active&unknown_option=meow" + req = webob.Request.blank('/v1.1/servers?%s' % query_str) + # Request admin context + context = nova.context.RequestContext('testuser', 'testproject', + is_admin=True) + res = req.get_response(fakes.wsgi_app(fake_auth_context=context)) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_unknown_or_admin_options2(self): + """Test getting servers by admin-only or unknown options. + This tests when admin_api is on, but context is a user. + Make sure the admin and unknown options are stripped before + they get to compute_api.get_all() + """ + + self.flags(allow_admin_api=True) + + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + # Allowed by user + self.assertTrue('name' in search_opts) + self.assertTrue('status' in search_opts) + # Allowed only by admins with admin API on + self.assertFalse('ip' in search_opts) + self.assertFalse('unknown_option' in search_opts) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + query_str = "name=foo&ip=10.*&status=active&unknown_option=meow" + req = webob.Request.blank('/v1.1/servers?%s' % query_str) + # Request admin context + context = nova.context.RequestContext('testuser', 'testproject', + is_admin=False) + res = req.get_response(fakes.wsgi_app(fake_auth_context=context)) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_unknown_or_admin_options3(self): + """Test getting servers by admin-only or unknown options. + This tests when admin_api is on and context is admin. + All options should be passed through to compute_api.get_all() + """ + + self.flags(allow_admin_api=True) + + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + # Allowed by user + self.assertTrue('name' in search_opts) + self.assertTrue('status' in search_opts) + # Allowed only by admins with admin API on + self.assertTrue('ip' in search_opts) + self.assertTrue('unknown_option' in search_opts) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + query_str = "name=foo&ip=10.*&status=active&unknown_option=meow" + req = webob.Request.blank('/v1.1/servers?%s' % query_str) + # Request admin context + context = nova.context.RequestContext('testuser', 'testproject', + is_admin=True) + res = req.get_response(fakes.wsgi_app(fake_auth_context=context)) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_admin_allows_ip_v1_1(self): + """Test getting servers by ip with admin_api enabled and + admin context + """ + self.flags(allow_admin_api=True) + + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('ip' in search_opts) + self.assertEqual(search_opts['ip'], '10\..*') + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + req = webob.Request.blank('/v1.1/servers?ip=10\..*') + # Request admin context + context = nova.context.RequestContext('testuser', 'testproject', + is_admin=True) + res = req.get_response(fakes.wsgi_app(fake_auth_context=context)) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_admin_allows_ip6_v1_1(self): + """Test getting servers by ip6 with admin_api enabled and + admin context + """ + self.flags(allow_admin_api=True) + + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('ip6' in search_opts) + self.assertEqual(search_opts['ip6'], 'ffff.*') + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + req = webob.Request.blank('/v1.1/servers?ip6=ffff.*') + # Request admin context + context = nova.context.RequestContext('testuser', 'testproject', + is_admin=True) + res = req.get_response(fakes.wsgi_app(fake_auth_context=context)) + # The following assert will fail if either of the asserts in + # fake_get_all() fail + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + def _setup_for_create_instance(self): """Shared implementation for tests below that create instance""" def instance_create(context, inst): @@ -1159,7 +1431,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) + self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) @@ -1333,7 +1605,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) + self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual(1, server['id']) @@ -1428,7 +1700,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) + self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] self.assertEqual(expected_flavor, server['flavor']) self.assertEqual(expected_image, server['image']) @@ -1473,7 +1745,7 @@ class ServersTest(test.TestCase): req.body = json.dumps(body) req.headers['content-type'] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) + self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] self.assertEqual(server['adminPass'], body['server']['adminPass']) @@ -1665,6 +1937,7 @@ class ServersTest(test.TestCase): def test_get_all_server_details_v1_0(self): req = webob.Request.blank('/v1.0/servers/detail') res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) for i, s in enumerate(res_dict['servers']): @@ -1720,7 +1993,7 @@ class ServersTest(test.TestCase): return [stub_instance(i, 'fake', 'fake', None, None, i % 2) for i in xrange(5)] - self.stubs.Set(nova.db.api, 'instance_get_all_by_project', + self.stubs.Set(nova.db.api, 'instance_get_all_by_filters', return_servers_with_host) req = webob.Request.blank('/v1.0/servers/detail') @@ -2673,13 +2946,13 @@ class TestServerInstanceCreation(test.TestCase): def test_create_instance_with_no_personality(self): request, response, injected_files = \ self._create_instance_with_personality_json(personality=None) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) self.assertEquals(injected_files, []) def test_create_instance_with_no_personality_xml(self): request, response, injected_files = \ self._create_instance_with_personality_xml(personality=None) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) self.assertEquals(injected_files, []) def test_create_instance_with_personality(self): @@ -2689,7 +2962,7 @@ class TestServerInstanceCreation(test.TestCase): personality = [(path, b64contents)] request, response, injected_files = \ self._create_instance_with_personality_json(personality) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) self.assertEquals(injected_files, [(path, contents)]) def test_create_instance_with_personality_xml(self): @@ -2699,7 +2972,7 @@ class TestServerInstanceCreation(test.TestCase): personality = [(path, b64contents)] request, response, injected_files = \ self._create_instance_with_personality_xml(personality) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) self.assertEquals(injected_files, [(path, contents)]) def test_create_instance_with_personality_no_path(self): @@ -2762,7 +3035,7 @@ class TestServerInstanceCreation(test.TestCase): request = self._get_create_request_json(body_dict) compute_api, response = \ self._run_create_instance_with_mock_compute_api(request) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) def test_create_instance_with_three_personalities(self): files = [ @@ -2775,7 +3048,7 @@ class TestServerInstanceCreation(test.TestCase): personality.append((path, base64.b64encode(content))) request, response, injected_files = \ self._create_instance_with_personality_json(personality) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) self.assertEquals(injected_files, files) def test_create_instance_personality_empty_content(self): @@ -2784,13 +3057,13 @@ class TestServerInstanceCreation(test.TestCase): personality = [(path, contents)] request, response, injected_files = \ self._create_instance_with_personality_json(personality) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) self.assertEquals(injected_files, [(path, contents)]) def test_create_instance_admin_pass_json(self): request, response, dummy = \ self._create_instance_with_personality_json(None) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) response = json.loads(response.body) self.assertTrue('adminPass' in response['server']) self.assertEqual(16, len(response['server']['adminPass'])) @@ -2798,7 +3071,7 @@ class TestServerInstanceCreation(test.TestCase): def test_create_instance_admin_pass_xml(self): request, response, dummy = \ self._create_instance_with_personality_xml(None) - self.assertEquals(response.status_int, 200) + self.assertEquals(response.status_int, 202) dom = minidom.parseString(response.body) server = dom.childNodes[0] self.assertEquals(server.nodeName, 'server') diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 5a40f578f..0ff508ffa 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -235,3 +235,39 @@ class TestMutatorDateTimeTests(BaseGlanceTest): 'updated_at': None, 'deleted_at': None} return fixture + + +class TestGlanceSerializer(unittest.TestCase): + def test_serialize(self): + metadata = {'name': 'image1', + 'is_public': True, + 'foo': 'bar', + 'properties': { + 'prop1': 'propvalue1', + 'mappings': [ + {'virtual': 'aaa', + 'device': 'bbb'}, + {'virtual': 'xxx', + 'device': 'yyy'}], + 'block_device_mapping': [ + {'virtual_device': 'fake', + 'device_name': '/dev/fake'}, + {'virtual_device': 'ephemeral0', + 'device_name': '/dev/fake0'}]}} + + converted_expected = { + 'name': 'image1', + 'is_public': True, + 'foo': 'bar', + 'properties': { + 'prop1': 'propvalue1', + 'mappings': + '[{"device": "bbb", "virtual": "aaa"}, ' + '{"device": "yyy", "virtual": "xxx"}]', + 'block_device_mapping': + '[{"virtual_device": "fake", "device_name": "/dev/fake"}, ' + '{"virtual_device": "ephemeral0", ' + '"device_name": "/dev/fake0"}]'}} + converted = glance._convert_to_string(metadata) + self.assertEqual(converted, converted_expected) + self.assertEqual(glance._convert_from_string(converted), metadata) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index f60eb6433..330dab5e5 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -21,9 +21,11 @@ Tests For Scheduler import datetime import mox -import novaclient.exceptions import stubout +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions + from mox import IgnoreArg from nova import context from nova import db @@ -1036,10 +1038,10 @@ class FakeServerCollection(object): class FakeEmptyServerCollection(object): def get(self, f): - raise novaclient.NotFound(1) + raise novaclient_exceptions.NotFound(1) def find(self, name): - raise novaclient.NotFound(2) + raise novaclient_exceptions.NotFound(2) class FakeNovaClient(object): @@ -1085,7 +1087,7 @@ class FakeZonesProxy(object): raise Exception('testing') -class FakeNovaClientOpenStack(object): +class FakeNovaClientZones(object): def __init__(self, *args, **kwargs): self.zones = FakeZonesProxy() @@ -1098,7 +1100,7 @@ class CallZoneMethodTest(test.TestCase): super(CallZoneMethodTest, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + self.stubs.Set(novaclient, 'Client', FakeNovaClientZones) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index d9b1d39c9..2011ae756 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -27,6 +27,7 @@ import random import StringIO import webob +from nova import block_device from nova import context from nova import exception from nova import test @@ -147,10 +148,12 @@ class Ec2utilsTestCase(test.TestCase): properties0 = {'mappings': mappings} properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings} - root_device_name = ec2utils.properties_root_device_name(properties0) + root_device_name = block_device.properties_root_device_name( + properties0) self.assertEqual(root_device_name, '/dev/sda1') - root_device_name = ec2utils.properties_root_device_name(properties1) + root_device_name = block_device.properties_root_device_name( + properties1) self.assertEqual(root_device_name, '/dev/sdb') def test_mapping_prepend_dev(self): @@ -184,7 +187,7 @@ class Ec2utilsTestCase(test.TestCase): 'device': '/dev/sdc1'}, {'virtual': 'ephemeral1', 'device': '/dev/sdc1'}] - self.assertDictListMatch(ec2utils.mappings_prepend_dev(mappings), + self.assertDictListMatch(block_device.mappings_prepend_dev(mappings), expected_result) @@ -336,6 +339,33 @@ class ApiEc2TestCase(test.TestCase): self.ec2.delete_security_group(security_group_name) + def test_group_name_valid_chars_security_group(self): + """ Test that we sanely handle invalid security group names. + API Spec states we should only accept alphanumeric characters, + spaces, dashes, and underscores. """ + self.expect_http() + self.mox.ReplayAll() + + # Test block group_name of non alphanumeric characters, spaces, + # dashes, and underscores. + security_group_name = "aa #^% -=99" + + self.assertRaises(EC2ResponseError, self.ec2.create_security_group, + security_group_name, 'test group') + + def test_group_name_valid_length_security_group(self): + """Test that we sanely handle invalid security group names. + API Spec states that the length should not exceed 255 chars """ + self.expect_http() + self.mox.ReplayAll() + + # Test block group_name > 255 chars + security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc") + for x in range(random.randint(256, 266))) + + self.assertRaises(EC2ResponseError, self.ec2.create_security_group, + security_group_name, 'test group') + def test_authorize_revoke_security_group_cidr(self): """ Test that we can add and remove CIDR based rules diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py new file mode 100644 index 000000000..b8e9b35e2 --- /dev/null +++ b/nova/tests/test_block_device.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for Block Device utility functions. +""" + +from nova import block_device +from nova import test + + +class BlockDeviceTestCase(test.TestCase): + def test_properties(self): + root_device0 = '/dev/sda' + root_device1 = '/dev/sdb' + mappings = [{'virtual': 'root', + 'device': root_device0}] + + properties0 = {'mappings': mappings} + properties1 = {'mappings': mappings, + 'root_device_name': root_device1} + + self.assertEqual(block_device.properties_root_device_name({}), None) + self.assertEqual( + block_device.properties_root_device_name(properties0), + root_device0) + self.assertEqual( + block_device.properties_root_device_name(properties1), + root_device1) + + def test_ephemeral(self): + self.assertFalse(block_device.is_ephemeral('ephemeral')) + self.assertTrue(block_device.is_ephemeral('ephemeral0')) + self.assertTrue(block_device.is_ephemeral('ephemeral1')) + self.assertTrue(block_device.is_ephemeral('ephemeral11')) + self.assertFalse(block_device.is_ephemeral('root')) + self.assertFalse(block_device.is_ephemeral('swap')) + self.assertFalse(block_device.is_ephemeral('/dev/sda1')) + + self.assertEqual(block_device.ephemeral_num('ephemeral0'), 0) + self.assertEqual(block_device.ephemeral_num('ephemeral1'), 1) + self.assertEqual(block_device.ephemeral_num('ephemeral11'), 11) + + self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral')) + self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0')) + self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1')) + self.assertTrue(block_device.is_swap_or_ephemeral('swap')) + self.assertFalse(block_device.is_swap_or_ephemeral('root')) + self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1')) + + def test_mappings_prepend_dev(self): + mapping = [ + {'virtual': 'ami', 'device': '/dev/sda'}, + {'virtual': 'root', 'device': 'sda'}, + {'virtual': 'ephemeral0', 'device': 'sdb'}, + {'virtual': 'swap', 'device': 'sdc'}, + {'virtual': 'ephemeral1', 'device': 'sdd'}, + {'virtual': 'ephemeral2', 'device': 'sde'}] + + expected = [ + {'virtual': 'ami', 'device': '/dev/sda'}, + {'virtual': 'root', 'device': 'sda'}, + {'virtual': 'ephemeral0', 'device': '/dev/sdb'}, + {'virtual': 'swap', 'device': '/dev/sdc'}, + {'virtual': 'ephemeral1', 'device': '/dev/sdd'}, + {'virtual': 'ephemeral2', 'device': '/dev/sde'}] + + prepended = block_device.mappings_prepend_dev(mapping) + self.assertEqual(prepended.sort(), expected.sort()) + + def test_strip_dev(self): + self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda') + self.assertEqual(block_device.strip_dev('sda'), 'sda') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index e891fa197..b2afc53c9 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,6 +17,8 @@ # under the License. import mox +import functools + from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA @@ -892,13 +894,16 @@ class CloudTestCase(test.TestCase): def test_modify_image_attribute(self): modify_image_attribute = self.cloud.modify_image_attribute + fake_metadata = {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'is_public': False} + def fake_show(meh, context, id): - return {'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'is_public': False} + return fake_metadata def fake_update(meh, context, image_id, metadata, data=None): - return metadata + fake_metadata.update(metadata) + return fake_metadata self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) @@ -1464,3 +1469,147 @@ class CloudTestCase(test.TestCase): # TODO(yamahata): clean up snapshot created by CreateImage. self._restart_compute_service() + + @staticmethod + def _fake_bdm_get(ctxt, id): + return [{'volume_id': 87654321, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': None, + 'delete_on_termination': True, + 'device_name': '/dev/sdh'}, + {'volume_id': None, + 'snapshot_id': 98765432, + 'no_device': None, + 'virtual_name': None, + 'delete_on_termination': True, + 'device_name': '/dev/sdi'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': True, + 'virtual_name': None, + 'delete_on_termination': None, + 'device_name': None}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral0', + 'delete_on_termination': None, + 'device_name': '/dev/sdb'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'swap', + 'delete_on_termination': None, + 'device_name': '/dev/sdc'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral1', + 'delete_on_termination': None, + 'device_name': '/dev/sdd'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral2', + 'delete_on_termination': None, + 'device_name': '/dev/sd3'}, + ] + + def test_get_instance_mapping(self): + """Make sure that _get_instance_mapping works""" + ctxt = None + instance_ref0 = {'id': 0, + 'root_device_name': None} + instance_ref1 = {'id': 0, + 'root_device_name': '/dev/sda1'} + + self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', + self._fake_bdm_get) + + expected = {'ami': 'sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sdb', + 'swap': '/dev/sdc', + 'ephemeral1': '/dev/sdd', + 'ephemeral2': '/dev/sd3'} + + self.assertEqual(self.cloud._format_instance_mapping(ctxt, + instance_ref0), + cloud._DEFAULT_MAPPINGS) + self.assertEqual(self.cloud._format_instance_mapping(ctxt, + instance_ref1), + expected) + + def test_describe_instance_attribute(self): + """Make sure that describe_instance_attribute works""" + self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', + self._fake_bdm_get) + + def fake_get(ctxt, instance_id): + return { + 'id': 0, + 'root_device_name': '/dev/sdh', + 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], + 'state_description': 'stopping', + 'instance_type': {'name': 'fake_type'}, + 'kernel_id': 1, + 'ramdisk_id': 2, + 'user_data': 'fake-user data', + } + self.stubs.Set(self.cloud.compute_api, 'get', fake_get) + + def fake_volume_get(ctxt, volume_id, session=None): + if volume_id == 87654321: + return {'id': volume_id, + 'attach_time': '13:56:24', + 'status': 'in-use'} + raise exception.VolumeNotFound(volume_id=volume_id) + self.stubs.Set(db.api, 'volume_get', fake_volume_get) + + get_attribute = functools.partial( + self.cloud.describe_instance_attribute, + self.context, 'i-12345678') + + bdm = get_attribute('blockDeviceMapping') + bdm['blockDeviceMapping'].sort() + + expected_bdm = {'instance_id': 'i-12345678', + 'rootDeviceType': 'ebs', + 'blockDeviceMapping': [ + {'deviceName': '/dev/sdh', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': True, + 'volumeId': 87654321, + 'attachTime': '13:56:24'}}]} + expected_bdm['blockDeviceMapping'].sort() + self.assertEqual(bdm, expected_bdm) + # NOTE(yamahata): this isn't supported + # get_attribute('disableApiTermination') + groupSet = get_attribute('groupSet') + groupSet['groupSet'].sort() + expected_groupSet = {'instance_id': 'i-12345678', + 'groupSet': [{'groupId': 'fake0'}, + {'groupId': 'fake1'}]} + expected_groupSet['groupSet'].sort() + self.assertEqual(groupSet, expected_groupSet) + self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), + {'instance_id': 'i-12345678', + 'instanceInitiatedShutdownBehavior': 'stop'}) + self.assertEqual(get_attribute('instanceType'), + {'instance_id': 'i-12345678', + 'instanceType': 'fake_type'}) + self.assertEqual(get_attribute('kernel'), + {'instance_id': 'i-12345678', + 'kernel': 'aki-00000001'}) + self.assertEqual(get_attribute('ramdisk'), + {'instance_id': 'i-12345678', + 'ramdisk': 'ari-00000002'}) + self.assertEqual(get_attribute('rootDeviceName'), + {'instance_id': 'i-12345678', + 'rootDeviceName': '/dev/sdh'}) + # NOTE(yamahata): this isn't supported + # get_attribute('sourceDestCheck') + self.assertEqual(get_attribute('userData'), + {'instance_id': 'i-12345678', + 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'}) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index bbf9ddcc6..80f7ff489 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -26,6 +26,7 @@ from nova.compute import power_state from nova import context from nova import db from nova.db.sqlalchemy import models +from nova.db.sqlalchemy import api as sqlalchemy_api from nova import exception from nova import flags import nova.image.fake @@ -73,8 +74,11 @@ class ComputeTestCase(test.TestCase): self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show) - def _create_instance(self, params={}): + def _create_instance(self, params=None): """Create a test instance""" + + if params is None: + params = {} inst = {} inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' @@ -864,6 +868,458 @@ class ComputeTestCase(test.TestCase): self.assertEqual(len(instances), 1) self.assertEqual(power_state.SHUTOFF, instances[0]['state']) + def test_get_all_by_name_regexp(self): + """Test searching instances by name (display_name)""" + c = context.get_admin_context() + instance_id1 = self._create_instance({'display_name': 'woot'}) + instance_id2 = self._create_instance({ + 'display_name': 'woo', + 'id': 20}) + instance_id3 = self._create_instance({ + 'display_name': 'not-woot', + 'id': 30}) + + instances = self.compute_api.get_all(c, + search_opts={'name': 'woo.*'}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id1 in instance_ids) + self.assertTrue(instance_id2 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'name': 'woot.*'}) + instance_ids = [instance.id for instance in instances] + self.assertEqual(len(instances), 1) + self.assertTrue(instance_id1 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'name': '.*oot.*'}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id1 in instance_ids) + self.assertTrue(instance_id3 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'name': 'n.*'}) + self.assertEqual(len(instances), 1) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id3 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'name': 'noth.*'}) + self.assertEqual(len(instances), 0) + + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_all_by_instance_name_regexp(self): + """Test searching instances by name""" + self.flags(instance_name_template='instance-%d') + + c = context.get_admin_context() + instance_id1 = self._create_instance() + instance_id2 = self._create_instance({'id': 2}) + instance_id3 = self._create_instance({'id': 10}) + + instances = self.compute_api.get_all(c, + search_opts={'instance_name': 'instance.*'}) + self.assertEqual(len(instances), 3) + + instances = self.compute_api.get_all(c, + search_opts={'instance_name': '.*\-\d$'}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id1 in instance_ids) + self.assertTrue(instance_id2 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'instance_name': 'i.*2'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_by_fixed_ip(self): + """Test getting 1 instance by Fixed IP""" + c = context.get_admin_context() + instance_id1 = self._create_instance() + instance_id2 = self._create_instance({'id': 20}) + instance_id3 = self._create_instance({'id': 30}) + + vif_ref1 = db.virtual_interface_create(c, + {'address': '12:34:56:78:90:12', + 'instance_id': instance_id1, + 'network_id': 1}) + vif_ref2 = db.virtual_interface_create(c, + {'address': '90:12:34:56:78:90', + 'instance_id': instance_id2, + 'network_id': 1}) + + db.fixed_ip_create(c, + {'address': '1.1.1.1', + 'instance_id': instance_id1, + 'virtual_interface_id': vif_ref1['id']}) + db.fixed_ip_create(c, + {'address': '1.1.2.1', + 'instance_id': instance_id2, + 'virtual_interface_id': vif_ref2['id']}) + + # regex not allowed + instances = self.compute_api.get_all(c, + search_opts={'fixed_ip': '.*'}) + self.assertEqual(len(instances), 0) + + instances = self.compute_api.get_all(c, + search_opts={'fixed_ip': '1.1.3.1'}) + self.assertEqual(len(instances), 0) + + instances = self.compute_api.get_all(c, + search_opts={'fixed_ip': '1.1.1.1'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id1) + + instances = self.compute_api.get_all(c, + search_opts={'fixed_ip': '1.1.2.1'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + db.virtual_interface_delete(c, vif_ref1['id']) + db.virtual_interface_delete(c, vif_ref2['id']) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + + def test_get_all_by_ip_regexp(self): + """Test searching by Floating and Fixed IP""" + c = context.get_admin_context() + instance_id1 = self._create_instance({'display_name': 'woot'}) + instance_id2 = self._create_instance({ + 'display_name': 'woo', + 'id': 20}) + instance_id3 = self._create_instance({ + 'display_name': 'not-woot', + 'id': 30}) + + vif_ref1 = db.virtual_interface_create(c, + {'address': '12:34:56:78:90:12', + 'instance_id': instance_id1, + 'network_id': 1}) + vif_ref2 = db.virtual_interface_create(c, + {'address': '90:12:34:56:78:90', + 'instance_id': instance_id2, + 'network_id': 1}) + vif_ref3 = db.virtual_interface_create(c, + {'address': '34:56:78:90:12:34', + 'instance_id': instance_id3, + 'network_id': 1}) + + db.fixed_ip_create(c, + {'address': '1.1.1.1', + 'instance_id': instance_id1, + 'virtual_interface_id': vif_ref1['id']}) + db.fixed_ip_create(c, + {'address': '1.1.2.1', + 'instance_id': instance_id2, + 'virtual_interface_id': vif_ref2['id']}) + fix_addr = db.fixed_ip_create(c, + {'address': '1.1.3.1', + 'instance_id': instance_id3, + 'virtual_interface_id': vif_ref3['id']}) + fix_ref = db.fixed_ip_get_by_address(c, fix_addr) + flo_ref = db.floating_ip_create(c, + {'address': '10.0.0.2', + 'fixed_ip_id': fix_ref['id']}) + + # ends up matching 2nd octet here.. so all 3 match + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*\.1'}) + self.assertEqual(len(instances), 3) + + instances = self.compute_api.get_all(c, + search_opts={'ip': '1.*'}) + self.assertEqual(len(instances), 3) + + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*\.1.\d+$'}) + self.assertEqual(len(instances), 1) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id1 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*\.2.+'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + instances = self.compute_api.get_all(c, + search_opts={'ip': '10.*'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id3) + + db.virtual_interface_delete(c, vif_ref1['id']) + db.virtual_interface_delete(c, vif_ref2['id']) + db.virtual_interface_delete(c, vif_ref3['id']) + db.floating_ip_destroy(c, '10.0.0.2') + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_all_by_ipv6_regexp(self): + """Test searching by IPv6 address""" + + c = context.get_admin_context() + instance_id1 = self._create_instance({'display_name': 'woot'}) + instance_id2 = self._create_instance({ + 'display_name': 'woo', + 'id': 20}) + instance_id3 = self._create_instance({ + 'display_name': 'not-woot', + 'id': 30}) + + vif_ref1 = db.virtual_interface_create(c, + {'address': '12:34:56:78:90:12', + 'instance_id': instance_id1, + 'network_id': 1}) + vif_ref2 = db.virtual_interface_create(c, + {'address': '90:12:34:56:78:90', + 'instance_id': instance_id2, + 'network_id': 1}) + vif_ref3 = db.virtual_interface_create(c, + {'address': '34:56:78:90:12:34', + 'instance_id': instance_id3, + 'network_id': 1}) + + # This will create IPv6 addresses of: + # 1: fd00::1034:56ff:fe78:9012 + # 20: fd00::9212:34ff:fe56:7890 + # 30: fd00::3656:78ff:fe90:1234 + + instances = self.compute_api.get_all(c, + search_opts={'ip6': '.*1034.*'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id1) + + instances = self.compute_api.get_all(c, + search_opts={'ip6': '^fd00.*'}) + self.assertEqual(len(instances), 3) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id1 in instance_ids) + self.assertTrue(instance_id2 in instance_ids) + self.assertTrue(instance_id3 in instance_ids) + + instances = self.compute_api.get_all(c, + search_opts={'ip6': '^.*12.*34.*'}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id2 in instance_ids) + self.assertTrue(instance_id3 in instance_ids) + + db.virtual_interface_delete(c, vif_ref1['id']) + db.virtual_interface_delete(c, vif_ref2['id']) + db.virtual_interface_delete(c, vif_ref3['id']) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_all_by_multiple_options_at_once(self): + """Test searching by multiple options at once""" + c = context.get_admin_context() + instance_id1 = self._create_instance({'display_name': 'woot'}) + instance_id2 = self._create_instance({ + 'display_name': 'woo', + 'id': 20}) + instance_id3 = self._create_instance({ + 'display_name': 'not-woot', + 'id': 30}) + + vif_ref1 = db.virtual_interface_create(c, + {'address': '12:34:56:78:90:12', + 'instance_id': instance_id1, + 'network_id': 1}) + vif_ref2 = db.virtual_interface_create(c, + {'address': '90:12:34:56:78:90', + 'instance_id': instance_id2, + 'network_id': 1}) + vif_ref3 = db.virtual_interface_create(c, + {'address': '34:56:78:90:12:34', + 'instance_id': instance_id3, + 'network_id': 1}) + + db.fixed_ip_create(c, + {'address': '1.1.1.1', + 'instance_id': instance_id1, + 'virtual_interface_id': vif_ref1['id']}) + db.fixed_ip_create(c, + {'address': '1.1.2.1', + 'instance_id': instance_id2, + 'virtual_interface_id': vif_ref2['id']}) + fix_addr = db.fixed_ip_create(c, + {'address': '1.1.3.1', + 'instance_id': instance_id3, + 'virtual_interface_id': vif_ref3['id']}) + fix_ref = db.fixed_ip_get_by_address(c, fix_addr) + flo_ref = db.floating_ip_create(c, + {'address': '10.0.0.2', + 'fixed_ip_id': fix_ref['id']}) + + # ip ends up matching 2nd octet here.. so all 3 match ip + # but 'name' only matches one + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*\.1', 'name': 'not.*'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id3) + + # ip ends up matching any ip with a '2' in it.. so instance + # 2 and 3.. but name should only match #2 + # but 'name' only matches one + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*2', 'name': '^woo.*'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + # same as above but no match on name (name matches instance_id1 + # but the ip query doesn't + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*2.*', 'name': '^woot.*'}) + self.assertEqual(len(instances), 0) + + # ip matches all 3... ipv6 matches #2+#3...name matches #3 + instances = self.compute_api.get_all(c, + search_opts={'ip': '.*\.1', + 'name': 'not.*', + 'ip6': '^.*12.*34.*'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id3) + + db.virtual_interface_delete(c, vif_ref1['id']) + db.virtual_interface_delete(c, vif_ref2['id']) + db.virtual_interface_delete(c, vif_ref3['id']) + db.floating_ip_destroy(c, '10.0.0.2') + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_all_by_image(self): + """Test searching instances by image""" + + c = context.get_admin_context() + instance_id1 = self._create_instance({'image_ref': '1234'}) + instance_id2 = self._create_instance({ + 'id': 2, + 'image_ref': '4567'}) + instance_id3 = self._create_instance({ + 'id': 10, + 'image_ref': '4567'}) + + instances = self.compute_api.get_all(c, + search_opts={'image': '123'}) + self.assertEqual(len(instances), 0) + + instances = self.compute_api.get_all(c, + search_opts={'image': '1234'}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id1) + + instances = self.compute_api.get_all(c, + search_opts={'image': '4567'}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id2 in instance_ids) + self.assertTrue(instance_id3 in instance_ids) + + # Test passing a list as search arg + instances = self.compute_api.get_all(c, + search_opts={'image': ['1234', '4567']}) + self.assertEqual(len(instances), 3) + + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_all_by_flavor(self): + """Test searching instances by image""" + + c = context.get_admin_context() + instance_id1 = self._create_instance({'instance_type_id': 1}) + instance_id2 = self._create_instance({ + 'id': 2, + 'instance_type_id': 2}) + instance_id3 = self._create_instance({ + 'id': 10, + 'instance_type_id': 2}) + + # NOTE(comstud): Migrations set up the instance_types table + # for us. Therefore, we assume the following is true for + # these tests: + # instance_type_id 1 == flavor 3 + # instance_type_id 2 == flavor 1 + # instance_type_id 3 == flavor 4 + # instance_type_id 4 == flavor 5 + # instance_type_id 5 == flavor 2 + + instances = self.compute_api.get_all(c, + search_opts={'flavor': 5}) + self.assertEqual(len(instances), 0) + + self.assertRaises(exception.FlavorNotFound, + self.compute_api.get_all, + c, search_opts={'flavor': 99}) + + instances = self.compute_api.get_all(c, + search_opts={'flavor': 3}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id1) + + instances = self.compute_api.get_all(c, + search_opts={'flavor': 1}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id2 in instance_ids) + self.assertTrue(instance_id3 in instance_ids) + + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + + def test_get_all_by_state(self): + """Test searching instances by state""" + + c = context.get_admin_context() + instance_id1 = self._create_instance({'state': power_state.SHUTDOWN}) + instance_id2 = self._create_instance({ + 'id': 2, + 'state': power_state.RUNNING}) + instance_id3 = self._create_instance({ + 'id': 10, + 'state': power_state.RUNNING}) + + instances = self.compute_api.get_all(c, + search_opts={'state': power_state.SUSPENDED}) + self.assertEqual(len(instances), 0) + + instances = self.compute_api.get_all(c, + search_opts={'state': power_state.SHUTDOWN}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id1) + + instances = self.compute_api.get_all(c, + search_opts={'state': power_state.RUNNING}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id2 in instance_ids) + self.assertTrue(instance_id3 in instance_ids) + + # Test passing a list as search arg + instances = self.compute_api.get_all(c, + search_opts={'state': [power_state.SHUTDOWN, + power_state.RUNNING]}) + self.assertEqual(len(instances), 3) + + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', @@ -877,15 +1333,17 @@ class ComputeTestCase(test.TestCase): return bdm def test_update_block_device_mapping(self): + swap_size = 1 + instance_type = {'swap': swap_size} instance_id = self._create_instance() mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, - {'virtual': 'swap', 'device': 'sdb1'}, - {'virtual': 'swap', 'device': 'sdb2'}, - {'virtual': 'swap', 'device': 'sdb3'}, {'virtual': 'swap', 'device': 'sdb4'}, + {'virtual': 'swap', 'device': 'sdb3'}, + {'virtual': 'swap', 'device': 'sdb2'}, + {'virtual': 'swap', 'device': 'sdb1'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, @@ -927,32 +1385,36 @@ class ComputeTestCase(test.TestCase): 'no_device': True}] self.compute_api._update_image_block_device_mapping( - self.context, instance_id, mappings) + self.context, instance_type, instance_id, mappings) bdms = [self._parse_db_block_device_mapping(bdm_ref) for bdm_ref in db.block_device_mapping_get_all_by_instance( self.context, instance_id)] expected_result = [ - {'virtual_name': 'swap', 'device_name': '/dev/sdb1'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb2'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb3'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb4'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb1', + 'volume_size': swap_size}, {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'}, - {'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'}, - {'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}] + + # NOTE(yamahata): ATM only ephemeral0 is supported. + # they're ignored for now + #{'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'}, + #{'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'} + ] bdms.sort() expected_result.sort() self.assertDictListMatch(bdms, expected_result) self.compute_api._update_block_device_mapping( - self.context, instance_id, block_device_mapping) + self.context, instance_types.get_default_instance_type(), + instance_id, block_device_mapping) bdms = [self._parse_db_block_device_mapping(bdm_ref) for bdm_ref in db.block_device_mapping_get_all_by_instance( self.context, instance_id)] expected_result = [ {'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb1'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb1', + 'volume_size': swap_size}, {'snapshot_id': 0x23456789, 'device_name': '/dev/sdb2'}, {'snapshot_id': 0x3456789A, 'device_name': '/dev/sdb3'}, {'no_device': True, 'device_name': '/dev/sdb4'}, @@ -974,3 +1436,13 @@ class ComputeTestCase(test.TestCase): self.context, instance_id): db.block_device_mapping_destroy(self.context, bdm['id']) self.compute.terminate_instance(self.context, instance_id) + + def test_ephemeral_size(self): + local_size = 2 + inst_type = {'local_gb': local_size} + self.assertEqual(self.compute_api._ephemeral_size(inst_type, + 'ephemeral0'), + local_size) + self.assertEqual(self.compute_api._ephemeral_size(inst_type, + 'ephemeral1'), + 0) diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py index 548f81f8b..a724db9da 100644 --- a/nova/tests/test_hosts.py +++ b/nova/tests/test_hosts.py @@ -48,6 +48,10 @@ def stub_set_host_enabled(context, host, enabled): return status +def stub_host_power_action(context, host, action): + return action + + class FakeRequest(object): environ = {"nova.context": context.get_admin_context()} @@ -62,6 +66,8 @@ class HostTestCase(test.TestCase): self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list) self.stubs.Set(self.controller.compute_api, 'set_host_enabled', stub_set_host_enabled) + self.stubs.Set(self.controller.compute_api, 'host_power_action', + stub_host_power_action) def test_list_hosts(self): """Verify that the compute hosts are returned.""" @@ -87,6 +93,18 @@ class HostTestCase(test.TestCase): result_c2 = self.controller.update(self.req, "host_c2", body=en_body) self.assertEqual(result_c2["status"], "disabled") + def test_host_startup(self): + result = self.controller.startup(self.req, "host_c1") + self.assertEqual(result["power_action"], "startup") + + def test_host_shutdown(self): + result = self.controller.shutdown(self.req, "host_c1") + self.assertEqual(result["power_action"], "shutdown") + + def test_host_reboot(self): + result = self.controller.reboot(self.req, "host_c1") + self.assertEqual(result["power_action"], "reboot") + def test_bad_status_value(self): bad_body = {"status": "bad"} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f8b866985..c04851d59 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -169,6 +169,7 @@ class LibvirtConnTestCase(test.TestCase): 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '123456', + 'local_gb': 20, 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): @@ -744,6 +745,42 @@ class LibvirtConnTestCase(test.TestCase): ip = conn.get_host_ip_addr() self.assertEquals(ip, FLAGS.my_ip) + def test_volume_in_mapping(self): + conn = connection.LibvirtConnection(False) + swap = {'device_name': '/dev/sdb', + 'swap_size': 1} + ephemerals = [{'num': 0, + 'virtual_name': 'ephemeral0', + 'device_name': '/dev/sdc1', + 'size': 1}, + {'num': 2, + 'virtual_name': 'ephemeral2', + 'device_name': '/dev/sdd', + 'size': 1}] + block_device_mapping = [{'mount_device': '/dev/sde', + 'device_path': 'fake_device'}, + {'mount_device': '/dev/sdf', + 'device_path': 'fake_device'}] + block_device_info = { + 'root_device_name': '/dev/sda', + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} + + def _assert_volume_in_mapping(device_name, true_or_false): + self.assertEquals(conn._volume_in_mapping(device_name, + block_device_info), + true_or_false) + + _assert_volume_in_mapping('sda', False) + _assert_volume_in_mapping('sdb', True) + _assert_volume_in_mapping('sdc1', True) + _assert_volume_in_mapping('sdd', True) + _assert_volume_in_mapping('sde', True) + _assert_volume_in_mapping('sdf', True) + _assert_volume_in_mapping('sdg', False) + _assert_volume_in_mapping('sdh1', False) + class NWFilterFakes: def __init__(self): diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index c862726ab..ad678714e 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -43,16 +43,21 @@ class MetadataTestCase(test.TestCase): 'reservation_id': 'r-xxxxxxxx', 'user_data': '', 'image_ref': 7, + 'fixed_ips': [], + 'root_device_name': '/dev/sda1', 'hostname': 'test'}) def instance_get(*args, **kwargs): return self.instance + def instance_get_list(*args, **kwargs): + return [self.instance] + def floating_get(*args, **kwargs): return '99.99.99.99' self.stubs.Set(api, 'instance_get', instance_get) - self.stubs.Set(api, 'fixed_ip_get_instance', instance_get) + self.stubs.Set(api, 'instance_get_all_by_filters', instance_get_list) self.stubs.Set(api, 'instance_get_floating_address', floating_get) self.app = metadatarequesthandler.MetadataRequestHandler() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py new file mode 100644 index 000000000..388f075af --- /dev/null +++ b/nova/tests/test_virt.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import test +from nova.virt import driver + +FLAGS = flags.FLAGS + + +class TestVirtDriver(test.TestCase): + def test_block_device(self): + swap = {'device_name': '/dev/sdb', + 'swap_size': 1} + ephemerals = [{'num': 0, + 'virtual_name': 'ephemeral0', + 'device_name': '/dev/sdc1', + 'size': 1}] + block_device_mapping = [{'mount_device': '/dev/sde', + 'device_path': 'fake_device'}] + block_device_info = { + 'root_device_name': '/dev/sda', + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} + + empty_block_device_info = {} + + self.assertEqual( + driver.block_device_info_get_root(block_device_info), '/dev/sda') + self.assertEqual( + driver.block_device_info_get_root(empty_block_device_info), None) + self.assertEqual( + driver.block_device_info_get_root(None), None) + + self.assertEqual( + driver.block_device_info_get_swap(block_device_info), swap) + self.assertEqual(driver.block_device_info_get_swap( + empty_block_device_info)['device_name'], None) + self.assertEqual(driver.block_device_info_get_swap( + empty_block_device_info)['swap_size'], 0) + self.assertEqual( + driver.block_device_info_get_swap({'swap': None})['device_name'], + None) + self.assertEqual( + driver.block_device_info_get_swap({'swap': None})['swap_size'], + 0) + self.assertEqual( + driver.block_device_info_get_swap(None)['device_name'], None) + self.assertEqual( + driver.block_device_info_get_swap(None)['swap_size'], 0) + + self.assertEqual( + driver.block_device_info_get_ephemerals(block_device_info), + ephemerals) + self.assertEqual( + driver.block_device_info_get_ephemerals(empty_block_device_info), + []) + self.assertEqual( + driver.block_device_info_get_ephemerals(None), + []) + + def test_swap_is_usable(self): + self.assertFalse(driver.swap_is_usable(None)) + self.assertFalse(driver.swap_is_usable({'device_name': None})) + self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb', + 'swap_size': 0})) + self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb', + 'swap_size': 1})) diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py index a943fee27..9efa23015 100644 --- a/nova/tests/test_zones.py +++ b/nova/tests/test_zones.py @@ -18,7 +18,6 @@ Tests For ZoneManager import datetime import mox -import novaclient from nova import context from nova import db diff --git a/nova/utils.py b/nova/utils.py index 046eba741..36a545fb6 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -239,7 +239,7 @@ def abspath(s): def novadir(): import nova - return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0] + return os.path.abspath(nova.__file__).split('nova/__init__.py')[0] def default_flagfile(filename='nova.conf', args=None): diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 4f3cfefad..df4a66ac2 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -32,6 +32,33 @@ class InstanceInfo(object): self.state = state +def block_device_info_get_root(block_device_info): + block_device_info = block_device_info or {} + return block_device_info.get('root_device_name') + + +def block_device_info_get_swap(block_device_info): + block_device_info = block_device_info or {} + return block_device_info.get('swap') or {'device_name': None, + 'swap_size': 0} + + +def swap_is_usable(swap): + return swap and swap['device_name'] and swap['swap_size'] > 0 + + +def block_device_info_get_ephemerals(block_device_info): + block_device_info = block_device_info or {} + ephemerals = block_device_info.get('ephemerals') or [] + return ephemerals + + +def block_device_info_get_mapping(block_device_info): + block_device_info = block_device_info or {} + block_device_mapping = block_device_info.get('block_device_mapping') or [] + return block_device_mapping + + class ComputeDriver(object): """Base class for compute drivers. @@ -65,8 +92,8 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def spawn(self, context, instance, network_info, - block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """Launch a VM for the specified instance""" raise NotImplementedError() @@ -282,6 +309,10 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + raise NotImplementedError() + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" # TODO(Vek): Need to pass context in for access to auth_token diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 80abcc644..93c54a27d 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -129,8 +129,8 @@ class FakeConnection(driver.ComputeDriver): info_list.append(self._map_to_instance_info(instance)) return info_list - def spawn(self, context, instance, network_info, - block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """ Create a new instance/VM/domain on the virtualization platform. @@ -512,6 +512,10 @@ class FakeConnection(driver.ComputeDriver): """Return fake Host Status of ram, disk, network.""" return self.host_status + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + pass + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 3428a7fc1..43658a6c2 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -138,8 +138,8 @@ class HyperVConnection(driver.ComputeDriver): return instance_infos - def spawn(self, context, instance, network_info, - block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: @@ -499,6 +499,10 @@ class HyperVConnection(driver.ComputeDriver): """See xenapi_conn.py implementation.""" pass + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + pass + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index a75636390..210e2b0fb 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -3,24 +3,22 @@ <memory>${memory_kb}</memory> <os> #if $type == 'lxc' - #set $disk_prefix = '' #set $disk_bus = '' <type>exe</type> <init>/sbin/init</init> #else if $type == 'uml' - #set $disk_prefix = 'ubd' #set $disk_bus = 'uml' <type>uml</type> <kernel>/usr/bin/linux</kernel> - <root>/dev/ubda</root> + #set $root_device_name = $getVar('root_device_name', '/dev/ubda') + <root>${root_device_name}</root> #else #if $type == 'xen' - #set $disk_prefix = 'sd' #set $disk_bus = 'scsi' <type>linux</type> - <root>/dev/xvda</root> + #set $root_device_name = $getVar('root_device_name', '/dev/xvda') + <root>${root_device_name}</root> #else - #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' <type>hvm</type> #end if @@ -33,7 +31,8 @@ #if $type == 'xen' <cmdline>ro</cmdline> #else - <cmdline>root=/dev/vda console=ttyS0</cmdline> + #set $root_device_name = $getVar('root_device_name', '/dev/vda') + <cmdline>root=${root_device_name} console=ttyS0</cmdline> #end if #if $getVar('ramdisk', None) <initrd>${ramdisk}</initrd> @@ -71,16 +70,30 @@ <disk type='file'> <driver type='${driver_type}'/> <source file='${basepath}/disk'/> - <target dev='${disk_prefix}a' bus='${disk_bus}'/> + <target dev='${root_device}' bus='${disk_bus}'/> </disk> #end if - #if $getVar('local', False) + #if $getVar('local_device', False) <disk type='file'> <driver type='${driver_type}'/> <source file='${basepath}/disk.local'/> - <target dev='${disk_prefix}b' bus='${disk_bus}'/> + <target dev='${local_device}' bus='${disk_bus}'/> </disk> #end if + #for $eph in $ephemerals + <disk type='block'> + <driver type='${driver_type}'/> + <source dev='${basepath}/${eph.device_path}'/> + <target dev='${eph.device}' bus='${disk_bus}'/> + </disk> + #end for + #if $getVar('swap_device', False) + <disk type='file'> + <driver type='${driver_type}'/> + <source file='${basepath}/disk.swap'/> + <target dev='${swap_device}' bus='${disk_bus}'/> + </disk> + #end if #for $vol in $volumes <disk type='${vol.type}'> <driver type='raw'/> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index d4160b280..16efa7292 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -54,6 +54,7 @@ from xml.etree import ElementTree from eventlet import greenthread from eventlet import tpool +from nova import block_device from nova import context as nova_context from nova import db from nova import exception @@ -151,8 +152,8 @@ def _late_load_cheetah(): Template = t.Template -def _strip_dev(mount_path): - return re.sub(r'^/dev/', '', mount_path) +def _get_eph_disk(ephemeral): + return 'disk.eph' + str(ephemeral['num']) class LibvirtConnection(driver.ComputeDriver): @@ -574,15 +575,14 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, context, instance, network_info, - block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): xml = self.to_xml(instance, False, network_info=network_info, - block_device_mapping=block_device_mapping) - block_device_mapping = block_device_mapping or [] + block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_image(context, instance, xml, network_info=network_info, - block_device_mapping=block_device_mapping) + block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) self.firewall_driver.apply_instance_filter(instance) @@ -759,11 +759,14 @@ class LibvirtConnection(driver.ComputeDriver): utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? + def _create_swap(self, target, swap_gb): + """Create a swap file of specified size""" + self._create_local(target, swap_gb) + utils.execute('mkswap', target) + def _create_image(self, context, inst, libvirt_xml, suffix='', disk_images=None, network_info=None, - block_device_mapping=None): - block_device_mapping = block_device_mapping or [] - + block_device_info=None): if not suffix: suffix = '' @@ -822,8 +825,8 @@ class LibvirtConnection(driver.ComputeDriver): size = None root_fname += "_sm" - if not self._volume_in_mapping(self.root_mount_device, - block_device_mapping): + if not self._volume_in_mapping(self.default_root_device, + block_device_info): self._cache_image(fn=self._fetch_image, context=context, target=basepath('disk'), @@ -834,13 +837,38 @@ class LibvirtConnection(driver.ComputeDriver): project_id=inst['project_id'], size=size) - if inst_type['local_gb'] and not self._volume_in_mapping( - self.local_mount_device, block_device_mapping): + local_gb = inst['local_gb'] + if local_gb and not self._volume_in_mapping( + self.default_local_device, block_device_info): self._cache_image(fn=self._create_local, target=basepath('disk.local'), - fname="local_%s" % inst_type['local_gb'], + fname="local_%s" % local_gb, cow=FLAGS.use_cow_images, - local_gb=inst_type['local_gb']) + local_gb=local_gb) + + for eph in driver.block_device_info_get_ephemerals(block_device_info): + self._cache_image(fn=self._create_local, + target=basepath(_get_eph_disk(eph)), + fname="local_%s" % eph['size'], + cow=FLAGS.use_cow_images, + local_gb=eph['size']) + + swap_gb = 0 + + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + swap_gb = swap['swap_size'] + elif (inst_type['swap'] > 0 and + not self._volume_in_mapping(self.default_swap_device, + block_device_info)): + swap_gb = inst_type['swap'] + + if swap_gb > 0: + self._cache_image(fn=self._create_swap, + target=basepath('disk.swap'), + fname="swap_%s" % swap_gb, + cow=FLAGS.use_cow_images, + swap_gb=swap_gb) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -921,16 +949,35 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'uml': utils.execute('sudo', 'chown', 'root', basepath('disk')) - root_mount_device = 'vda' # FIXME for now. it's hard coded. - local_mount_device = 'vdb' # FIXME for now. it's hard coded. - - def _volume_in_mapping(self, mount_device, block_device_mapping): - mount_device_ = _strip_dev(mount_device) - for vol in block_device_mapping: - vol_mount_device = _strip_dev(vol['mount_device']) - if vol_mount_device == mount_device_: - return True - return False + if FLAGS.libvirt_type == 'uml': + _disk_prefix = 'ubd' + elif FLAGS.libvirt_type == 'xen': + _disk_prefix = 'sd' + elif FLAGS.libvirt_type == 'lxc': + _disk_prefix = '' + else: + _disk_prefix = 'vd' + + default_root_device = _disk_prefix + 'a' + default_local_device = _disk_prefix + 'b' + default_swap_device = _disk_prefix + 'c' + + def _volume_in_mapping(self, mount_device, block_device_info): + block_device_list = [block_device.strip_dev(vol['mount_device']) + for vol in + driver.block_device_info_get_mapping( + block_device_info)] + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + block_device_list.append( + block_device.strip_dev(swap['device_name'])) + block_device_list += [block_device.strip_dev(ephemeral['device_name']) + for ephemeral in + driver.block_device_info_get_ephemerals( + block_device_info)] + + LOG.debug(_("block_device_list %s"), block_device_list) + return block_device.strip_dev(mount_device) in block_device_list def _get_volume_device_info(self, device_path): if device_path.startswith('/dev/'): @@ -942,8 +989,9 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.InvalidDevicePath(path=device_path) def _prepare_xml_info(self, instance, rescue=False, network_info=None, - block_device_mapping=None): - block_device_mapping = block_device_mapping or [] + block_device_info=None): + block_device_mapping = driver.block_device_info_get_mapping( + block_device_info) # TODO(adiantum) remove network_info creation code # when multinics will be completed if not network_info: @@ -962,17 +1010,27 @@ class LibvirtConnection(driver.ComputeDriver): driver_type = 'raw' for vol in block_device_mapping: - vol['mount_device'] = _strip_dev(vol['mount_device']) + vol['mount_device'] = block_device.strip_dev(vol['mount_device']) (vol['type'], vol['protocol'], vol['name']) = \ self._get_volume_device_info(vol['device_path']) - ebs_root = self._volume_in_mapping(self.root_mount_device, - block_device_mapping) - if self._volume_in_mapping(self.local_mount_device, - block_device_mapping): - local_gb = False - else: - local_gb = inst_type['local_gb'] + ebs_root = self._volume_in_mapping(self.default_root_device, + block_device_info) + + local_device = False + if not (self._volume_in_mapping(self.default_local_device, + block_device_info) or + 0 in [eph['num'] for eph in + driver.block_device_info_get_ephemerals( + block_device_info)]): + if instance['local_gb'] > 0: + local_device = self.default_local_device + + ephemerals = [] + for eph in driver.block_device_info_get_ephemerals(block_device_info): + ephemerals.append({'device_path': _get_eph_disk(eph), + 'device': block_device.strip_dev( + eph['device_name'])}) xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], @@ -981,12 +1039,35 @@ class LibvirtConnection(driver.ComputeDriver): 'memory_kb': inst_type['memory_mb'] * 1024, 'vcpus': inst_type['vcpus'], 'rescue': rescue, - 'local': local_gb, + 'disk_prefix': self._disk_prefix, 'driver_type': driver_type, 'vif_type': FLAGS.libvirt_vif_type, 'nics': nics, 'ebs_root': ebs_root, - 'volumes': block_device_mapping} + 'local_device': local_device, + 'volumes': block_device_mapping, + 'ephemerals': ephemerals} + + root_device_name = driver.block_device_info_get_root(block_device_info) + if root_device_name: + xml_info['root_device'] = block_device.strip_dev(root_device_name) + xml_info['root_device_name'] = root_device_name + else: + # NOTE(yamahata): + # for nova.api.ec2.cloud.CloudController.get_metadata() + xml_info['root_device'] = self.default_root_device + db.instance_update( + nova_context.get_admin_context(), instance['id'], + {'root_device_name': '/dev/' + self.default_root_device}) + + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + xml_info['swap_device'] = block_device.strip_dev( + swap['device_name']) + elif (inst_type['swap'] > 0 and + not self._volume_in_mapping(self.default_swap_device, + block_device_info)): + xml_info['swap_device'] = self.default_swap_device if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'): xml_info['vncserver_host'] = FLAGS.vncserver_host @@ -1002,12 +1083,11 @@ class LibvirtConnection(driver.ComputeDriver): return xml_info def to_xml(self, instance, rescue=False, network_info=None, - block_device_mapping=None): - block_device_mapping = block_device_mapping or [] + block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) xml_info = self._prepare_xml_info(instance, rescue, network_info, - block_device_mapping) + block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -1562,6 +1642,10 @@ class LibvirtConnection(driver.ComputeDriver): """See xenapi_conn.py implementation.""" pass + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + pass + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index 3d209fa99..aaa384374 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -191,6 +191,10 @@ class VMWareESXConnection(driver.ComputeDriver): """This method is supported only by libvirt."""
return
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a78413370..50aa0d3b2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -282,6 +282,7 @@ class VMOps(object): 'architecture': instance.architecture}) def _check_agent_version(): + LOG.debug(_("Querying agent version")) if instance.os_type == 'windows': # Windows will generally perform a setup process on first boot # that can take a couple of minutes and then reboot. So we @@ -292,7 +293,6 @@ class VMOps(object): else: version = self.get_agent_version(instance) if not version: - LOG.info(_('No agent version returned by instance')) return LOG.info(_('Instance agent version: %s') % version) @@ -327,6 +327,10 @@ class VMOps(object): LOG.debug(_("Setting admin password")) self.set_admin_password(instance, admin_password) + def _reset_network(): + LOG.debug(_("Resetting network")) + self.reset_network(instance, vm_ref) + # NOTE(armando): Do we really need to do this in virt? # NOTE(tr3buchet): not sure but wherever we do it, we need to call # reset_network afterwards @@ -341,7 +345,7 @@ class VMOps(object): _check_agent_version() _inject_files() _set_admin_password() - self.reset_network(instance, vm_ref) + _reset_network() return True except Exception, exc: LOG.warn(exc) @@ -597,13 +601,13 @@ class VMOps(object): transaction_id = str(uuid.uuid4()) args = {'id': transaction_id} resp = self._make_agent_call('version', instance, '', args) - if resp is None: - # No response from the agent - return - resp_dict = json.loads(resp) + if resp['returncode'] != '0': + LOG.error(_('Failed to query agent version: %(resp)r') % + locals()) + return None # Some old versions of the Windows agent have a trailing \\r\\n # (ie CRLF escaped) for some reason. Strip that off. - return resp_dict['message'].replace('\\r\\n', '') + return resp['message'].replace('\\r\\n', '') if timeout: vm_ref = self._get_vm_opaque_ref(instance) @@ -634,13 +638,10 @@ class VMOps(object): transaction_id = str(uuid.uuid4()) args = {'id': transaction_id, 'url': url, 'md5sum': md5sum} resp = self._make_agent_call('agentupdate', instance, '', args) - if resp is None: - # No response from the agent - return - resp_dict = json.loads(resp) - if resp_dict['returncode'] != '0': - raise RuntimeError(resp_dict['message']) - return resp_dict['message'] + if resp['returncode'] != '0': + LOG.error(_('Failed to update agent: %(resp)r') % locals()) + return None + return resp['message'] def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance. @@ -659,18 +660,13 @@ class VMOps(object): key_init_args = {'id': key_init_transaction_id, 'pub': str(dh.get_public())} resp = self._make_agent_call('key_init', instance, '', key_init_args) - if resp is None: - # No response from the agent - return - resp_dict = json.loads(resp) # Successful return code from key_init is 'D0' - if resp_dict['returncode'] != 'D0': - # There was some sort of error; the message will contain - # a description of the error. - raise RuntimeError(resp_dict['message']) + if resp['returncode'] != 'D0': + LOG.error(_('Failed to exchange keys: %(resp)r') % locals()) + return None # Some old versions of the Windows agent have a trailing \\r\\n # (ie CRLF escaped) for some reason. Strip that off. - agent_pub = int(resp_dict['message'].replace('\\r\\n', '')) + agent_pub = int(resp['message'].replace('\\r\\n', '')) dh.compute_shared(agent_pub) # Some old versions of Linux and Windows agent expect trailing \n # on password to work correctly. @@ -679,17 +675,14 @@ class VMOps(object): password_transaction_id = str(uuid.uuid4()) password_args = {'id': password_transaction_id, 'enc_pass': enc_pass} resp = self._make_agent_call('password', instance, '', password_args) - if resp is None: - # No response from the agent - return - resp_dict = json.loads(resp) # Successful return code from password is '0' - if resp_dict['returncode'] != '0': - raise RuntimeError(resp_dict['message']) + if resp['returncode'] != '0': + LOG.error(_('Failed to update password: %(resp)r') % locals()) + return None db.instance_update(nova_context.get_admin_context(), instance['id'], dict(admin_pass=new_pass)) - return resp_dict['message'] + return resp['message'] def inject_file(self, instance, path, contents): """Write a file to the VM instance. @@ -712,12 +705,10 @@ class VMOps(object): # If the agent doesn't support file injection, a NotImplementedError # will be raised with the appropriate message. resp = self._make_agent_call('inject_file', instance, '', args) - resp_dict = json.loads(resp) - if resp_dict['returncode'] != '0': - # There was some other sort of error; the message will contain - # a description of the error. - raise RuntimeError(resp_dict['message']) - return resp_dict['message'] + if resp['returncode'] != '0': + LOG.error(_('Failed to inject file: %(resp)r') % locals()) + return None + return resp['message'] def _shutdown(self, instance, vm_ref, hard=True): """Shutdown an instance.""" @@ -1031,11 +1022,23 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + def host_power_action(self, host, action): + """Reboots or shuts down the host.""" + args = {"action": json.dumps(action)} + methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"} + json_resp = self._call_xenhost(methods[action], args) + resp = json.loads(json_resp) + return resp["power_action"] + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" args = {"enabled": json.dumps(enabled)} - json_resp = self._call_xenhost("set_host_enabled", args) - resp = json.loads(json_resp) + xenapi_resp = self._call_xenhost("set_host_enabled", args) + try: + resp = json.loads(xenapi_resp) + except TypeError as e: + # Already logged; return the message + return xenapi_resp.details[-1] return resp["status"] def _call_xenhost(self, method, arg_dict): @@ -1051,7 +1054,7 @@ class VMOps(object): #args={"params": arg_dict}) ret = self._session.wait_for_task(task, task_id) except self.XenAPI.Failure as e: - ret = None + ret = e LOG.error(_("The call to %(method)s returned an error: %(e)s.") % locals()) return ret @@ -1166,8 +1169,19 @@ class VMOps(object): def _make_agent_call(self, method, vm, path, addl_args=None): """Abstracts out the interaction with the agent xenapi plugin.""" - return self._make_plugin_call('agent', method=method, vm=vm, + ret = self._make_plugin_call('agent', method=method, vm=vm, path=path, addl_args=addl_args) + if isinstance(ret, dict): + return ret + try: + return json.loads(ret) + except TypeError: + instance_id = vm.id + LOG.error(_('The agent call to %(method)s returned an invalid' + ' response: %(ret)r. VM id=%(instance_id)s;' + ' path=%(path)s; args=%(addl_args)r') % locals()) + return {'returncode': 'error', + 'message': 'unable to deserialize response'} def _make_plugin_call(self, plugin, method, vm, path, addl_args=None, vm_ref=None): @@ -1185,20 +1199,20 @@ class VMOps(object): ret = self._session.wait_for_task(task, instance_id) except self.XenAPI.Failure, e: ret = None - err_trace = e.details[-1] - err_msg = err_trace.splitlines()[-1] - strargs = str(args) + err_msg = e.details[-1].splitlines()[-1] if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' - 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) + 'VM id=%(instance_id)s; args=%(args)r') % locals()) + return {'returncode': 'timeout', 'message': err_msg} elif 'NOT IMPLEMENTED:' in err_msg: LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not' ' supported by the agent. VM id=%(instance_id)s;' - ' args=%(strargs)s') % locals()) - raise NotImplementedError(err_msg) + ' args=%(args)r') % locals()) + return {'returncode': 'notimplemented', 'message': err_msg} else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' - 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) + 'VM id=%(instance_id)s; args=%(args)r') % locals()) + return {'returncode': 'error', 'message': err_msg} return ret def add_to_xenstore(self, vm, path, key, value): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 49ae2623e..91df80950 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -184,8 +184,8 @@ class XenAPIConnection(driver.ComputeDriver): def list_instances_detail(self): return self._vmops.list_instances_detail() - def spawn(self, context, instance, network_info, - block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """Create VM instance""" self._vmops.spawn(context, instance, network_info) @@ -332,6 +332,19 @@ class XenAPIConnection(driver.ComputeDriver): True, run the update first.""" return self.HostState.get_host_stats(refresh=refresh) + def host_power_action(self, host, action): + """The only valid values for 'action' on XenServer are 'reboot' or + 'shutdown', even though the API also accepts 'startup'. As this is + not technically possible on XenServer, since the host is the same + physical machine as the hypervisor, if this is requested, we need to + raise an exception. + """ + if action in ("reboot", "shutdown"): + return self._vmops.host_power_action(host, action) + else: + msg = _("Host startup on XenServer is not supported.") + raise NotImplementedError(msg) + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._vmops.set_host_enabled(host, enabled) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index 292bbce12..cd9694ce1 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -39,6 +39,7 @@ import pluginlib_nova as pluginlib pluginlib.configure_logging("xenhost") host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") +config_file_path = "/usr/etc/xenhost.conf" def jsonify(fnc): @@ -103,6 +104,104 @@ def set_host_enabled(self, arg_dict): return {"status": status} +def _write_config_dict(dct): + conf_file = file(config_file_path, "w") + json.dump(dct, conf_file) + conf_file.close() + + +def _get_config_dict(): + """Returns a dict containing the key/values in the config file. + If the file doesn't exist, it is created, and an empty dict + is returned. + """ + try: + conf_file = file(config_file_path) + config_dct = json.load(conf_file) + conf_file.close() + except IOError: + # File doesn't exist + config_dct = {} + # Create the file + _write_config_dict(config_dct) + return config_dct + + +@jsonify +def get_config(self, arg_dict): + """Return the value stored for the specified key, or None if no match.""" + conf = _get_config_dict() + params = arg_dict["params"] + try: + dct = json.loads(params) + except Exception, e: + dct = params + key = dct["key"] + ret = conf.get(key) + if ret is None: + # Can't jsonify None + return "None" + return ret + + +@jsonify +def set_config(self, arg_dict): + """Write the specified key/value pair, overwriting any existing value.""" + conf = _get_config_dict() + params = arg_dict["params"] + try: + dct = json.loads(params) + except Exception, e: + dct = params + key = dct["key"] + val = dct["value"] + if val is None: + # Delete the key, if present + conf.pop(key, None) + else: + conf.update({key: val}) + _write_config_dict(conf) + + +def _power_action(action): + host_uuid = _get_host_uuid() + # Host must be disabled first + result = _run_command("xe host-disable") + if result: + raise pluginlib.PluginError(result) + # All running VMs must be shutdown + result = _run_command("xe vm-shutdown --multiple power-state=running") + if result: + raise pluginlib.PluginError(result) + cmds = {"reboot": "xe host-reboot", "startup": "xe host-power-on", + "shutdown": "xe host-shutdown"} + result = _run_command(cmds[action]) + # Should be empty string + if result: + raise pluginlib.PluginError(result) + return {"power_action": action} + + +@jsonify +def host_reboot(self, arg_dict): + """Reboots the host.""" + return _power_action("reboot") + + +@jsonify +def host_shutdown(self, arg_dict): + """Reboots the host.""" + return _power_action("shutdown") + + +@jsonify +def host_start(self, arg_dict): + """Starts the host. Currently not feasible, since the host + runs on the same machine as Xen. + """ + return _power_action("startup") + + @jsonify def host_data(self, arg_dict): """Runs the commands on the xenstore host to return the current status @@ -115,6 +214,9 @@ def host_data(self, arg_dict): # We have the raw dict of values. Extract those that we need, # and convert the data types as needed. ret_dict = cleanup(parsed_data) + # Add any config settings + config = _get_config_dict() + ret_dict.update(config) return ret_dict @@ -217,4 +319,9 @@ def cleanup(dct): if __name__ == "__main__": XenAPIPlugin.dispatch( {"host_data": host_data, - "set_host_enabled": set_host_enabled}) + "set_host_enabled": set_host_enabled, + "host_shutdown": host_shutdown, + "host_reboot": host_reboot, + "host_start": host_start, + "get_config": get_config, + "set_config": set_config}) diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py index de69c98a2..8c8fa35b8 100644 --- a/smoketests/test_netadmin.py +++ b/smoketests/test_netadmin.py @@ -109,9 +109,12 @@ class SecurityGroupTests(base.UserSmokeTestCase): def __public_instance_is_accessible(self): id_url = "latest/meta-data/instance-id" - options = "-s --max-time 1" + options = "-f -s --max-time 1" command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) - instance_id = commands.getoutput(command).strip() + status, output = commands.getstatusoutput(command) + instance_id = output.strip() + if status > 0: + return False if not instance_id: return False if instance_id != self.data['instance'].id: diff --git a/tools/ajaxterm/README.txt b/tools/ajaxterm/README.txt index a649771c5..4b0ae99af 100644 --- a/tools/ajaxterm/README.txt +++ b/tools/ajaxterm/README.txt @@ -1,120 +1,120 @@ -= [http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm Ajaxterm] = - -Ajaxterm is a web based terminal. It was totally inspired and works almost -exactly like http://anyterm.org/ except it's much easier to install (see -comparaison with anyterm below). - -Ajaxterm written in python (and some AJAX javascript for client side) and depends only on python2.3 or better.[[BR]] -Ajaxterm is '''very simple to install''' on Linux, MacOS X, FreeBSD, Solaris, cygwin and any Unix that runs python2.3.[[BR]] -Ajaxterm was written by Antony Lesuisse (email: al AT udev.org), License Public Domain. - -Use the [/qweb/forum/viewforum.php?id=2 Forum], if you have any question or remark. - -== News == - - * 2006-10-29: v0.10 allow space in login, cgi launch fix, redhat init - * 2006-07-12: v0.9 change uid, daemon fix (Daniel Fischer) - * 2006-07-04: v0.8 add login support to ssh (Sven Geggus), change max width to 256 - * 2006-05-31: v0.7 minor fixes, daemon option - * 2006-05-23: v0.6 Applied debian and gentoo patches, renamed to Ajaxterm, default port 8022 - -== Download and Install == - - * Release: [/qweb/files/Ajaxterm-0.10.tar.gz Ajaxterm-0.10.tar.gz] - * Browse src: [/qweb/trac/browser/trunk/ajaxterm/ ajaxterm/] - -To install Ajaxterm issue the following commands: -{{{ -wget http://antony.lesuisse.org/qweb/files/Ajaxterm-0.10.tar.gz -tar zxvf Ajaxterm-0.10.tar.gz -cd Ajaxterm-0.10 -./ajaxterm.py -}}} -Then point your browser to this URL : http://localhost:8022/ - -== Screenshot == - -{{{ -#!html -<center><img src="/qweb/trac/attachment/wiki/AjaxTerm/scr.png?format=raw" alt="ajaxterm screenshot" style=""/></center> -}}} - -== Documentation and Caveats == - - * Ajaxterm only support latin1, if you use Ubuntu or any LANG==en_US.UTF-8 distribution don't forget to "unset LANG". - - * If run as root ajaxterm will run /bin/login, otherwise it will run ssh - localhost. To use an other command use the -c option. - - * By default Ajaxterm only listen at 127.0.0.1:8022. For remote access, it is - strongly recommended to use '''https SSL/TLS''', and that is simple to - configure if you use the apache web server using mod_proxy.[[BR]][[BR]] - Using ssl will also speed up ajaxterm (probably because of keepalive).[[BR]][[BR]] - Here is an configuration example: - -{{{ - Listen 443 - NameVirtualHost *:443 - - <VirtualHost *:443> - ServerName localhost - SSLEngine On - SSLCertificateKeyFile ssl/apache.pem - SSLCertificateFile ssl/apache.pem - - ProxyRequests Off - <Proxy *> - Order deny,allow - Allow from all - </Proxy> - ProxyPass /ajaxterm/ http://localhost:8022/ - ProxyPassReverse /ajaxterm/ http://localhost:8022/ - </VirtualHost> -}}} - - * Using GET HTTP request seems to speed up ajaxterm, just click on GET in the - interface, but be warned that your keystrokes might be loggued (by apache or - any proxy). I usually enable it after the login. - - * Ajaxterm commandline usage: - -{{{ -usage: ajaxterm.py [options] - -options: - -h, --help show this help message and exit - -pPORT, --port=PORT Set the TCP port (default: 8022) - -cCMD, --command=CMD set the command (default: /bin/login or ssh localhost) - -l, --log log requests to stderr (default: quiet mode) - -d, --daemon run as daemon in the background - -PPIDFILE, --pidfile=PIDFILE - set the pidfile (default: /var/run/ajaxterm.pid) - -iINDEX_FILE, --index=INDEX_FILE - default index file (default: ajaxterm.html) - -uUID, --uid=UID Set the daemon's user id -}}} - - * Ajaxterm was first written as a demo for qweb (my web framework), but - actually doesn't use many features of qweb. - - * Compared to anyterm: - * There are no partial updates, ajaxterm updates either all the screen or - nothing. That make the code simpler and I also think it's faster. HTTP - replies are always gzencoded. When used in 80x25 mode, almost all of - them are below the 1500 bytes (size of an ethernet frame) and we just - replace the screen with the reply (no javascript string handling). - * Ajaxterm polls the server for updates with an exponentially growing - timeout when the screen hasn't changed. The timeout is also resetted as - soon as a key is pressed. Anyterm blocks on a pending request and use a - parallel connection for keypresses. The anyterm approch is better - when there aren't any keypress. - - * Ajaxterm files are released in the Public Domain, (except [http://sarissa.sourceforge.net/doc/ sarissa*] which are LGPL). - -== TODO == - - * insert mode ESC [ 4 h - * change size x,y from gui (sending signal) - * vt102 graphic codepage - * use innerHTML or prototype instead of sarissa - += [http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm Ajaxterm] =
+
+Ajaxterm is a web based terminal. It was totally inspired and works almost
+exactly like http://anyterm.org/ except it's much easier to install (see
+comparaison with anyterm below).
+
+Ajaxterm written in python (and some AJAX javascript for client side) and depends only on python2.3 or better.[[BR]]
+Ajaxterm is '''very simple to install''' on Linux, MacOS X, FreeBSD, Solaris, cygwin and any Unix that runs python2.3.[[BR]]
+Ajaxterm was written by Antony Lesuisse (email: al AT udev.org), License Public Domain.
+
+Use the [/qweb/forum/viewforum.php?id=2 Forum], if you have any question or remark.
+
+== News ==
+
+ * 2006-10-29: v0.10 allow space in login, cgi launch fix, redhat init
+ * 2006-07-12: v0.9 change uid, daemon fix (Daniel Fischer)
+ * 2006-07-04: v0.8 add login support to ssh (Sven Geggus), change max width to 256
+ * 2006-05-31: v0.7 minor fixes, daemon option
+ * 2006-05-23: v0.6 Applied debian and gentoo patches, renamed to Ajaxterm, default port 8022
+
+== Download and Install ==
+
+ * Release: [/qweb/files/Ajaxterm-0.10.tar.gz Ajaxterm-0.10.tar.gz]
+ * Browse src: [/qweb/trac/browser/trunk/ajaxterm/ ajaxterm/]
+
+To install Ajaxterm issue the following commands:
+{{{
+wget http://antony.lesuisse.org/qweb/files/Ajaxterm-0.10.tar.gz
+tar zxvf Ajaxterm-0.10.tar.gz
+cd Ajaxterm-0.10
+./ajaxterm.py
+}}}
+Then point your browser to this URL : http://localhost:8022/
+
+== Screenshot ==
+
+{{{
+#!html
+<center><img src="/qweb/trac/attachment/wiki/AjaxTerm/scr.png?format=raw" alt="ajaxterm screenshot" style=""/></center>
+}}}
+
+== Documentation and Caveats ==
+
+ * Ajaxterm only support latin1, if you use Ubuntu or any LANG==en_US.UTF-8 distribution don't forget to "unset LANG".
+
+ * If run as root ajaxterm will run /bin/login, otherwise it will run ssh
+ localhost. To use an other command use the -c option.
+
+ * By default Ajaxterm only listen at 127.0.0.1:8022. For remote access, it is
+ strongly recommended to use '''https SSL/TLS''', and that is simple to
+ configure if you use the apache web server using mod_proxy.[[BR]][[BR]]
+ Using ssl will also speed up ajaxterm (probably because of keepalive).[[BR]][[BR]]
+ Here is an configuration example:
+
+{{{
+ Listen 443
+ NameVirtualHost *:443
+
+ <VirtualHost *:443>
+ ServerName localhost
+ SSLEngine On
+ SSLCertificateKeyFile ssl/apache.pem
+ SSLCertificateFile ssl/apache.pem
+
+ ProxyRequests Off
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+ </Proxy>
+ ProxyPass /ajaxterm/ http://localhost:8022/
+ ProxyPassReverse /ajaxterm/ http://localhost:8022/
+ </VirtualHost>
+}}}
+
+ * Using GET HTTP request seems to speed up ajaxterm, just click on GET in the
+ interface, but be warned that your keystrokes might be loggued (by apache or
+ any proxy). I usually enable it after the login.
+
+ * Ajaxterm commandline usage:
+
+{{{
+usage: ajaxterm.py [options]
+
+options:
+ -h, --help show this help message and exit
+ -pPORT, --port=PORT Set the TCP port (default: 8022)
+ -cCMD, --command=CMD set the command (default: /bin/login or ssh localhost)
+ -l, --log log requests to stderr (default: quiet mode)
+ -d, --daemon run as daemon in the background
+ -PPIDFILE, --pidfile=PIDFILE
+ set the pidfile (default: /var/run/ajaxterm.pid)
+ -iINDEX_FILE, --index=INDEX_FILE
+ default index file (default: ajaxterm.html)
+ -uUID, --uid=UID Set the daemon's user id
+}}}
+
+ * Ajaxterm was first written as a demo for qweb (my web framework), but
+ actually doesn't use many features of qweb.
+
+ * Compared to anyterm:
+ * There are no partial updates, ajaxterm updates either all the screen or
+ nothing. That make the code simpler and I also think it's faster. HTTP
+ replies are always gzencoded. When used in 80x25 mode, almost all of
+ them are below the 1500 bytes (size of an ethernet frame) and we just
+ replace the screen with the reply (no javascript string handling).
+ * Ajaxterm polls the server for updates with an exponentially growing
+ timeout when the screen hasn't changed. The timeout is also resetted as
+ soon as a key is pressed. Anyterm blocks on a pending request and use a
+ parallel connection for keypresses. The anyterm approch is better
+ when there aren't any keypress.
+
+ * Ajaxterm files are released in the Public Domain, (except [http://sarissa.sourceforge.net/doc/ sarissa*] which are LGPL).
+
+== TODO ==
+
+ * insert mode ESC [ 4 h
+ * change size x,y from gui (sending signal)
+ * vt102 graphic codepage
+ * use innerHTML or prototype instead of sarissa
+
diff --git a/tools/pip-requires b/tools/pip-requires index 23e707034..60b502ffd 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -9,7 +9,8 @@ boto==1.9b carrot==0.10.5 eventlet lockfile==0.8 -python-novaclient==2.5.9 +lxml==2.3 +python-novaclient==2.6.0 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 |
