diff options
| author | Sandy Walsh <sandy.walsh@rackspace.com> | 2010-12-09 10:22:50 -0400 |
|---|---|---|
| committer | Sandy Walsh <sandy.walsh@rackspace.com> | 2010-12-09 10:22:50 -0400 |
| commit | e6079449dc034234dc24e77b94bbcc4a257387d9 (patch) | |
| tree | df2d66c6c03d328069f4a8524dbaa897afd65f81 /nova | |
| parent | f0b53131569cd409a95c68b435ec56a69dcdc897 (diff) | |
| parent | 3c114a7dd596dfb12de42577711d97d1a98d10ec (diff) | |
| download | nova-e6079449dc034234dc24e77b94bbcc4a257387d9.tar.gz nova-e6079449dc034234dc24e77b94bbcc4a257387d9.tar.xz nova-e6079449dc034234dc24e77b94bbcc4a257387d9.zip | |
Fixed Authors conflict and re-merged with trunk
Diffstat (limited to 'nova')
45 files changed, 1731 insertions, 1171 deletions
diff --git a/nova/api/cloud.py b/nova/api/cloud.py deleted file mode 100644 index b8f15019f..000000000 --- a/nova/api/cloud.py +++ /dev/null @@ -1,58 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Methods for API calls to control instances via AMQP. -""" - - -from nova import db -from nova import flags -from nova import rpc - -FLAGS = flags.FLAGS - - -def reboot(instance_id, context=None): - """Reboot the given instance.""" - instance_ref = db.instance_get_by_internal_id(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_ref['id']}}) - - -def rescue(instance_id, context): - """Rescue the given instance.""" - instance_ref = db.instance_get_by_internal_id(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_ref['id']}}) - - -def unrescue(instance_id, context): - """Unrescue the given instance.""" - instance_ref = db.instance_get_by_internal_id(context, instance_id) - host = instance_ref['host'] - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_ref['id']}}) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9327bf0d4..05f8c3d0b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -39,9 +39,8 @@ from nova import flags from nova import quota from nova import rpc from nova import utils -from nova.compute.instance_types import INSTANCE_TYPES -from nova.api import cloud -from nova.image.s3 import S3ImageService +from nova.compute import api as compute_api +from nova.compute import instance_types FLAGS = flags.FLAGS @@ -50,11 +49,6 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager') InvalidInputException = exception.InvalidInputException -class QuotaError(exception.ApiError): - """Quota Exceeeded""" - pass - - def _gen_key(context, user_id, key_name): """Generate a key @@ -99,8 +93,9 @@ class CloudController(object): """ def __init__(self): self.network_manager = utils.import_object(FLAGS.network_manager) - self.compute_manager = utils.import_object(FLAGS.compute_manager) - self.image_service = S3ImageService() + self.image_service = utils.import_object(FLAGS.image_service) + self.compute_api = compute_api.ComputeAPI(self.network_manager, + self.image_service) self.setup() def __str__(self): @@ -124,10 +119,10 @@ class CloudController(object): def _get_mpi_data(self, context, project_id): result = {} - for instance in db.instance_get_all_by_project(context, project_id): + for instance in self.compute_api.get_instances(context, project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], - INSTANCE_TYPES[instance['instance_type']]['vcpus']) + instance['vcpus']) key = str(instance['key_name']) if key in result: result[key].append(line) @@ -260,7 +255,7 @@ class CloudController(object): return True def describe_security_groups(self, context, group_name=None, **kwargs): - self._ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) if context.user.is_admin(): groups = db.security_group_get_all(context) else: @@ -358,7 +353,7 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): - self._ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -383,7 +378,7 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): - self._ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -419,7 +414,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): - self._ensure_default_security_group(context) + self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError('group %s already exists' % group_name) @@ -443,7 +438,7 @@ class CloudController(object): # instance_id is passed in as a list of instances ec2_id = instance_id[0] internal_id = ec2_id_to_internal_id(ec2_id) - instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_ref = self.compute_api.get_instance(context, internal_id) output = rpc.call(context, '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), @@ -505,9 +500,8 @@ class CloudController(object): if quota.allowed_volumes(context, 1, size) < 1: logging.warn("Quota exceeeded for %s, tried to create %sG volume", context.project_id, size) - raise QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % - size) + raise quota.QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % size) vol = {} vol['size'] = size vol['user_id'] = context.user.id @@ -541,7 +535,7 @@ class CloudController(object): if volume_ref['attach_status'] == "attached": raise exception.ApiError("Volume is already attached") internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_ref = self.compute_api.get_instance(context, internal_id) host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), @@ -619,11 +613,7 @@ class CloudController(object): instances = db.instance_get_all_by_reservation(context, reservation_id) else: - if context.user.is_admin(): - instances = db.instance_get_all(context) - else: - instances = db.instance_get_all_by_project(context, - context.project_id) + instances = self.compute_api.get_instances(context) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -699,8 +689,8 @@ class CloudController(object): if quota.allowed_floating_ips(context, 1) < 1: logging.warn("Quota exceeeded for %s, tried to allocate address", context.project_id) - raise QuotaError("Address quota exceeded. You cannot " - "allocate any more addresses") + raise quota.QuotaError("Address quota exceeded. You cannot " + "allocate any more addresses") network_topic = self._get_network_topic(context) public_ip = rpc.call(context, network_topic, @@ -720,7 +710,7 @@ class CloudController(object): def associate_address(self, context, instance_id, public_ip, **kwargs): internal_id = ec2_id_to_internal_id(instance_id) - instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_ref = self.compute_api.get_instance(context, internal_id) fixed_address = db.instance_get_fixed_address(context, instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) @@ -752,218 +742,49 @@ class CloudController(object): "args": {"network_id": network_ref['id']}}) return db.queue_get_for(context, FLAGS.network_topic, host) - def _ensure_default_security_group(self, context): - try: - db.security_group_get_by_name(context, - context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user.id, - 'project_id': context.project_id} - group = db.security_group_create(context, values) - def run_instances(self, context, **kwargs): - instance_type = kwargs.get('instance_type', 'm1.small') - if instance_type not in INSTANCE_TYPES: - raise exception.ApiError("Unknown instance type: %s", - instance_type) - # check quota - max_instances = int(kwargs.get('max_count', 1)) - min_instances = int(kwargs.get('min_count', max_instances)) - num_instances = quota.allowed_instances(context, - max_instances, - instance_type) - if num_instances < min_instances: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_instances) - raise QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - # make sure user can access the image - # vpn image is private so it doesn't show up on lists - vpn = kwargs['image_id'] == FLAGS.vpn_image_id - - if not vpn: - image = self.image_service.show(context, kwargs['image_id']) - - # FIXME(ja): if image is vpn, this breaks - # get defaults from imagestore - image_id = image['imageId'] - kernel_id = image.get('kernelId', FLAGS.default_kernel) - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # API parameters overrides of defaults - kernel_id = kwargs.get('kernel_id', kernel_id) - ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) - - # make sure we have access to kernel and ramdisk - self.image_service.show(context, kernel_id) - self.image_service.show(context, ramdisk_id) - - logging.debug("Going to run %s instances...", num_instances) - launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - key_data = None - if 'key_name' in kwargs: - key_pair_ref = db.key_pair_get(context, - context.user.id, - kwargs['key_name']) - key_data = key_pair_ref['public_key'] - - security_group_arg = kwargs.get('security_group', ["default"]) - if not type(security_group_arg) is list: - security_group_arg = [security_group_arg] - - security_groups = [] - self._ensure_default_security_group(context) - for security_group_name in security_group_arg: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - reservation_id = utils.generate_uid('r') - base_options = {} - base_options['state_description'] = 'scheduling' - base_options['image_id'] = image_id - base_options['kernel_id'] = kernel_id - base_options['ramdisk_id'] = ramdisk_id - base_options['reservation_id'] = reservation_id - base_options['key_data'] = key_data - base_options['key_name'] = kwargs.get('key_name', None) - base_options['user_id'] = context.user.id - base_options['project_id'] = context.project_id - base_options['user_data'] = kwargs.get('user_data', '') - - base_options['display_name'] = kwargs.get('display_name') - base_options['display_description'] = kwargs.get('display_description') - - type_data = INSTANCE_TYPES[instance_type] - base_options['instance_type'] = instance_type - base_options['memory_mb'] = type_data['memory_mb'] - base_options['vcpus'] = type_data['vcpus'] - base_options['local_gb'] = type_data['local_gb'] - elevated = context.elevated() - - for num in range(num_instances): - - instance_ref = self.compute_manager.create_instance(context, - security_groups, - mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - inst_id = instance_ref['id'] - - internal_id = instance_ref['internal_id'] - ec2_id = internal_id_to_ec2_id(internal_id) - - self.compute_manager.update_instance(context, - inst_id, - hostname=ec2_id) - - # TODO(vish): This probably should be done in the scheduler - # or in compute as a call. The network should be - # allocated after the host is assigned and setup - # can happen at the same time. - address = self.network_manager.allocate_fixed_ip(context, - inst_id, - vpn) - network_topic = self._get_network_topic(context) - rpc.cast(elevated, - network_topic, - {"method": "setup_fixed_ip", - "args": {"address": address}}) - - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": inst_id}}) - logging.debug("Casting to scheduler for %s/%s's instance %s" % - (context.project.name, context.user.name, inst_id)) - return self._format_run_instances(context, reservation_id) + max_count = int(kwargs.get('max_count', 1)) + instances = self.compute_api.create_instances(context, + instance_types.get_by_type(kwargs.get('instance_type', None)), + kwargs['image_id'], + min_count=int(kwargs.get('min_count', max_count)), + max_count=max_count, + kernel_id=kwargs.get('kernel_id'), + ramdisk_id=kwargs.get('ramdisk_id'), + display_name=kwargs.get('display_name'), + description=kwargs.get('display_description'), + key_name=kwargs.get('key_name'), + security_group=kwargs.get('security_group'), + generate_hostname=internal_id_to_ec2_id) + return self._format_run_instances(context, + instances[0]['reservation_id']) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. - - instance_id is a kwarg so its name cannot be modified. - """ - ec2_id_list = instance_id + instance_id is a kwarg so its name cannot be modified.""" logging.debug("Going to start terminating instances") - for id_str in ec2_id_list: - internal_id = ec2_id_to_internal_id(id_str) - logging.debug("Going to try and terminate %s" % id_str) - try: - instance_ref = db.instance_get_by_internal_id(context, - internal_id) - except exception.NotFound: - logging.warning("Instance %s was not found during terminate", - id_str) - continue - - if (instance_ref['state_description'] == 'terminating'): - logging.warning("Instance %s is already being terminated", - id_str) - continue - now = datetime.datetime.utcnow() - self.compute_manager.update_instance(context, - instance_ref['id'], - state_description='terminating', - state=0, - terminated_at=now) - - # FIXME(ja): where should network deallocate occur? - address = db.instance_get_floating_address(context, - instance_ref['id']) - if address: - logging.debug("Disassociating address %s" % address) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - network_topic = self._get_network_topic(context) - rpc.cast(context, - network_topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) - - address = db.instance_get_fixed_address(context, - instance_ref['id']) - if address: - logging.debug("Deallocating address %s" % address) - # NOTE(vish): Currently, nothing needs to be done on the - # network node until release. If this changes, - # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context.elevated(), - address) - - host = instance_ref['host'] - if host: - rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_ref['id']}}) - else: - db.instance_destroy(context, instance_ref['id']) + for ec2_id in instance_id: + internal_id = ec2_id_to_internal_id(ec2_id) + self.compute_api.delete_instance(context, internal_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" for ec2_id in instance_id: internal_id = ec2_id_to_internal_id(ec2_id) - cloud.reboot(internal_id, context=context) + self.compute_api.reboot(context, internal_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" internal_id = ec2_id_to_internal_id(instance_id) - cloud.rescue(internal_id, context=context) + self.compute_api.rescue(context, internal_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" internal_id = ec2_id_to_internal_id(instance_id) - cloud.unrescue(internal_id, context=context) + self.compute_api.unrescue(context, internal_id) return True def update_instance(self, context, ec2_id, **kwargs): @@ -974,7 +795,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: internal_id = ec2_id_to_internal_id(ec2_id) - inst = db.instance_get_by_internal_id(context, internal_id) + inst = self.compute_api.get_instance(context, internal_id) db.instance_update(context, inst['id'], kwargs) return True @@ -994,8 +815,11 @@ class CloudController(object): return True def describe_images(self, context, image_id=None, **kwargs): - imageSet = self.image_service.index(context, image_id) - return {'imagesSet': imageSet} + # Note: image_id is a list! + images = self.image_service.index(context) + if image_id: + images = filter(lambda x: x['imageId'] in image_id, images) + return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): self.image_service.deregister(context, image_id) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 80b27c7e5..c9efe5222 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -25,6 +25,7 @@ import time import logging import routes +import traceback import webob.dec import webob.exc import webob @@ -65,6 +66,7 @@ class API(wsgi.Middleware): return req.get_response(self.application) except Exception as ex: logging.warn("Caught error: %s" % str(ex)) + logging.debug(traceback.format_exc()) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index ff428ff70..205035915 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -23,10 +23,7 @@ class Context(object): class BasicApiAuthManager(object): """ Implements a somewhat rudimentary version of OpenStack Auth""" - def __init__(self, host=None, db_driver=None): - if not host: - host = FLAGS.host - self.host = host + def __init__(self, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) @@ -47,7 +44,7 @@ class BasicApiAuthManager(object): except KeyError: return faults.Fault(webob.exc.HTTPUnauthorized()) - token, user = self._authorize_user(username, key) + token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() res.headers['X-Auth-Token'] = token.token_hash @@ -82,8 +79,13 @@ class BasicApiAuthManager(object): return {'id': user.id} return None - def _authorize_user(self, username, key): - """ Generates a new token and assigns it to a user """ + def _authorize_user(self, username, key, req): + """Generates a new token and assigns it to a user. + + username - string + key - string API key + req - webob.Request object + """ user = self.auth.get_user_from_access_key(key) if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, @@ -91,12 +93,10 @@ class BasicApiAuthManager(object): token_dict = {} token_dict['token_hash'] = token_hash token_dict['cdn_management_url'] = '' - token_dict['server_management_url'] = self._get_server_mgmt_url() + # Same as auth url, e.g. http://foo.org:8774/baz/v1.0 + token_dict['server_management_url'] = req.url token_dict['storage_url'] = '' token_dict['user_id'] = user.id token = self.db.auth_create_token(self.context, token_dict) return token, user return None, None - - def _get_server_mgmt_url(self): - return 'https://%s/v1.0/' % self.host diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index e69e51439..224a7ef0b 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -47,7 +47,7 @@ class Fault(webob.exc.HTTPException): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "cloudServersFault") + fault_name = self._fault_names.get(code, "computeFault") fault_data = { fault_name: { 'code': code, diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 1d8aa2fa4..6f2f6fed9 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,34 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import time - -import webob from webob import exc -from nova import flags -from nova import rpc -from nova import utils -from nova import wsgi from nova import context -from nova.api import cloud +from nova import exception +from nova import wsgi from nova.api.openstack import faults +from nova.auth import manager as auth_manager +from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack -import nova.image.service - -FLAGS = flags.FLAGS - - -def _filter_params(inst_dict): - """ Extracts all updatable parameters for a server update request """ - keys = dict(name='name', admin_pass='adminPass') - new_attrs = {} - for k, v in keys.items(): - if v in inst_dict: - new_attrs[k] = inst_dict[v] - return new_attrs def _entity_list(entities): @@ -63,7 +46,7 @@ def _entity_detail(inst): inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', - flavorId='instance_type', name='server_name', id='id') + flavorId='instance_type', name='display_name', id='internal_id') for k, v in mapped_keys.iteritems(): inst_dict[k] = inst[v] @@ -78,7 +61,7 @@ def _entity_detail(inst): def _entity_inst(inst): """ Filters all model attributes save for id and name """ - return dict(server=dict(id=inst['id'], name=inst['server_name'])) + return dict(server=dict(id=inst['internal_id'], name=inst['display_name'])) class Controller(wsgi.Controller): @@ -88,14 +71,10 @@ class Controller(wsgi.Controller): 'application/xml': { "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress", "progress"]}}} + "status", "progress"]}}} - def __init__(self, db_driver=None): - if not db_driver: - db_driver = FLAGS.db_driver - self.db_driver = utils.import_object(db_driver) - self.network_manager = utils.import_object(FLAGS.network_manager) - self.compute_manager = utils.import_object(FLAGS.compute_manager) + def __init__(self): + self.compute_api = compute_api.ComputeAPI() super(Controller, self).__init__() def index(self, req): @@ -113,7 +92,7 @@ class Controller(wsgi.Controller): """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - instance_list = self.db_driver.instance_get_all_by_user(ctxt, user_id) + instance_list = self.compute_api.get_instances(ctxt) limited_list = nova.api.openstack.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) @@ -122,7 +101,7 @@ class Controller(wsgi.Controller): """ Returns server details by server id """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - inst = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) + inst = self.compute_api.get_instance(ctxt, int(id)) if inst: if inst.user_id == user_id: return _entity_detail(inst) @@ -132,52 +111,53 @@ class Controller(wsgi.Controller): """ Destroys a server """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) - if instance and instance['user_id'] == user_id: - self.db_driver.instance_destroy(ctxt, id) - return faults.Fault(exc.HTTPAccepted()) - return faults.Fault(exc.HTTPNotFound()) + try: + self.compute_api.delete_instance(ctxt, int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() def create(self, req): """ Creates a new server for a given user """ - env = self._deserialize(req.body, req) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) - #try: - inst = self._build_server_instance(req, env) - #except Exception, e: - # return faults.Fault(exc.HTTPUnprocessableEntity()) - user_id = req.environ['nova.context']['user']['id'] - rpc.cast(context.RequestContext(user_id, user_id), - FLAGS.compute_topic, - {"method": "run_instance", - "args": {"instance_id": inst['id']}}) - return _entity_inst(inst) + ctxt = context.RequestContext(user_id, user_id) + key_pair = auth_manager.AuthManager.get_key_pairs(ctxt)[0] + instances = self.compute_api.create_instances(ctxt, + instance_types.get_by_flavor_id(env['server']['flavorId']), + env['server']['imageId'], + display_name=env['server']['name'], + description=env['server']['name'], + key_name=key_pair['name'], + key_data=key_pair['public_key']) + return _entity_inst(instances[0]) def update(self, req, id): """ Updates the server name or password """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) - inst_dict = self._deserialize(req.body, req) - if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) - instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) - if not instance or instance.user_id != user_id: - return faults.Fault(exc.HTTPNotFound()) + update_dict = {} + if 'adminPass' in inst_dict['server']: + update_dict['admin_pass'] = inst_dict['server']['adminPass'] + if 'name' in inst_dict['server']: + update_dict['display_name'] = inst_dict['server']['name'] - self.db_driver.instance_update(ctxt, - int(id), - _filter_params(inst_dict['server'])) - return faults.Fault(exc.HTTPNoContent()) + try: + self.compute_api.update_instance(ctxt, instance['id'], + **update_dict) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPNoContent() def action(self, req, id): - """ multi-purpose method used to reboot, rebuild, and + """ Multi-purpose method used to reboot, rebuild, and resize a server """ user_id = req.environ['nova.context']['user']['id'] ctxt = context.RequestContext(user_id, user_id) @@ -185,92 +165,11 @@ class Controller(wsgi.Controller): try: reboot_type = input_dict['reboot']['type'] except Exception: - raise faults.Fault(webob.exc.HTTPNotImplemented()) - inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id)) - if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): + raise faults.Fault(exc.HTTPNotImplemented()) + try: + # TODO(gundlach): pass reboot_type, support soft reboot in + # virt driver + self.compute_api.reboot(ctxt, id) + except: return faults.Fault(exc.HTTPUnprocessableEntity()) - cloud.reboot(id) - - def _build_server_instance(self, req, env): - """Build instance data structure and save it to the data store.""" - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = {} - - user_id = req.environ['nova.context']['user']['id'] - ctxt = context.RequestContext(user_id, user_id) - - flavor_id = env['server']['flavorId'] - - instance_type, flavor = [(k, v) for k, v in - instance_types.INSTANCE_TYPES.iteritems() - if v['flavorid'] == flavor_id][0] - - image_id = env['server']['imageId'] - img_service = utils.import_object(FLAGS.image_service) - - image = img_service.show(image_id) - - if not image: - raise Exception("Image not found") - - inst['server_name'] = env['server']['name'] - inst['image_id'] = image_id - inst['user_id'] = user_id - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - inst['project_id'] = user_id - - inst['state_description'] = 'scheduling' - inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel) - inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk) - inst['reservation_id'] = utils.generate_uid('r') - - inst['display_name'] = env['server']['name'] - inst['display_description'] = env['server']['name'] - - #TODO(dietz) this may be ill advised - key_pair_ref = self.db_driver.key_pair_get_all_by_user( - None, user_id)[0] - - inst['key_data'] = key_pair_ref['public_key'] - inst['key_name'] = key_pair_ref['name'] - - #TODO(dietz) stolen from ec2 api, see TODO there - inst['security_group'] = 'default' - - # Flavor related attributes - inst['instance_type'] = instance_type - inst['memory_mb'] = flavor['memory_mb'] - inst['vcpus'] = flavor['vcpus'] - inst['local_gb'] = flavor['local_gb'] - inst['mac_address'] = utils.generate_mac() - inst['launch_index'] = 0 - - ref = self.compute_manager.create_instance(ctxt, **inst) - inst['id'] = ref['internal_id'] - - inst['hostname'] = str(ref['internal_id']) - self.compute_manager.update_instance(ctxt, inst['id'], **inst) - - address = self.network_manager.allocate_fixed_ip(ctxt, - inst['id']) - - # TODO(vish): This probably should be done in the scheduler - # network is setup when host is assigned - network_topic = self._get_network_topic(ctxt) - rpc.call(ctxt, - network_topic, - {"method": "setup_fixed_ip", - "args": {"address": address}}) - return inst - - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - network_ref = self.network_manager.get_network(context) - host = network_ref['host'] - if not host: - host = rpc.call(context, - FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) - return self.db_driver.queue_get_for(context, FLAGS.network_topic, host) + return exc.HTTPAccepted() diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d65..c10939d74 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') +flags.DEFINE_boolean('ldap_user_modify_only', False, + 'Modify attributes for users instead of creating/deleting') flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects') flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', @@ -89,8 +91,7 @@ class LdapDriver(object): def get_user(self, uid): """Retrieve user by id""" - attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + attr = self.__get_ldap_user(uid) return self.__to_user(attr) def get_user_from_access_key(self, access): @@ -110,7 +111,12 @@ class LdapDriver(object): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, '(objectclass=novaUser)') - return [self.__to_user(attr) for attr in attrs] + users = [] + for attr in attrs: + user = self.__to_user(attr) + if user is not None: + users.append(user) + return users def get_projects(self, uid=None): """Retrieve list of projects""" @@ -125,21 +131,52 @@ class LdapDriver(object): """Create a user""" if self.__user_exists(name): raise exception.Duplicate("LDAP user %s already exists" % name) - attr = [ - ('objectclass', ['person', - 'organizationalPerson', - 'inetOrgPerson', - 'novaUser']), - ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), - ('sn', [name]), - ('cn', [name]), - ('secretKey', [secret_key]), - ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), - ] - self.conn.add_s(self.__uid_to_dn(name), attr) - return self.__to_user(dict(attr)) + if FLAGS.ldap_user_modify_only: + if self.__ldap_user_exists(name): + # Retrieve user by name + user = self.__get_ldap_user(name) + # Entry could be malformed, test for missing attrs. + # Malformed entries are useless, replace attributes found. + attr = [] + if 'secretKey' in user.keys(): + attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + [secret_key])) + else: + attr.append((self.ldap.MOD_ADD, 'secretKey', \ + [secret_key])) + if 'accessKey' in user.keys(): + attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + [access_key])) + else: + attr.append((self.ldap.MOD_ADD, 'accessKey', \ + [access_key])) + if 'isAdmin' in user.keys(): + attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \ + [str(is_admin).upper()])) + else: + attr.append((self.ldap.MOD_ADD, 'isAdmin', \ + [str(is_admin).upper()])) + self.conn.modify_s(self.__uid_to_dn(name), attr) + return self.get_user(name) + else: + raise exception.NotFound("LDAP object for %s doesn't exist" + % name) + else: + attr = [ + ('objectclass', ['person', + 'organizationalPerson', + 'inetOrgPerson', + 'novaUser']), + ('ou', [FLAGS.ldap_user_unit]), + ('uid', [name]), + ('sn', [name]), + ('cn', [name]), + ('secretKey', [secret_key]), + ('accessKey', [access_key]), + ('isAdmin', [str(is_admin).upper()]), + ] + self.conn.add_s(self.__uid_to_dn(name), attr) + return self.__to_user(dict(attr)) def create_project(self, name, manager_uid, description=None, member_uids=None): @@ -155,7 +192,7 @@ class LdapDriver(object): if description is None: description = name members = [] - if member_uids != None: + if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Project can't be created " @@ -256,7 +293,24 @@ class LdapDriver(object): if not self.__user_exists(uid): raise exception.NotFound("User %s doesn't exist" % uid) self.__remove_from_all(uid) - self.conn.delete_s(self.__uid_to_dn(uid)) + if FLAGS.ldap_user_modify_only: + # Delete attributes + attr = [] + # Retrieve user by name + user = self.__get_ldap_user(uid) + if 'secretKey' in user.keys(): + attr.append((self.ldap.MOD_DELETE, 'secretKey', \ + user['secretKey'])) + if 'accessKey' in user.keys(): + attr.append((self.ldap.MOD_DELETE, 'accessKey', \ + user['accessKey'])) + if 'isAdmin' in user.keys(): + attr.append((self.ldap.MOD_DELETE, 'isAdmin', \ + user['isAdmin'])) + self.conn.modify_s(self.__uid_to_dn(uid), attr) + else: + # Delete entry + self.conn.delete_s(self.__uid_to_dn(uid)) def delete_project(self, project_id): """Delete a project""" @@ -265,7 +319,7 @@ class LdapDriver(object): self.__delete_group(project_dn) def modify_user(self, uid, access_key=None, secret_key=None, admin=None): - """Modify an existing project""" + """Modify an existing user""" if not access_key and not secret_key and admin is None: return attr = [] @@ -279,11 +333,21 @@ class LdapDriver(object): def __user_exists(self, uid): """Check if user exists""" - return self.get_user(uid) != None + return self.get_user(uid) is not None + + def __ldap_user_exists(self, uid): + """Check if the user exists in ldap""" + return self.__get_ldap_user(uid) is not None def __project_exists(self, project_id): """Check if project exists""" - return self.get_project(project_id) != None + return self.get_project(project_id) is not None + + def __get_ldap_user(self, uid): + """Retrieve LDAP user entry by id""" + attr = self.__find_object(self.__uid_to_dn(uid), + '(objectclass=novaUser)') + return attr def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" @@ -330,12 +394,12 @@ class LdapDriver(object): def __group_exists(self, dn): """Check if group exists""" - return self.__find_object(dn, '(objectclass=groupOfNames)') != None + return self.__find_object(dn, '(objectclass=groupOfNames)') is not None @staticmethod def __role_to_dn(role, project_id=None): """Convert role to corresponding dn""" - if project_id == None: + if project_id is None: return FLAGS.__getitem__("ldap_%s" % role).value else: return 'cn=%s,cn=%s,%s' % (role, @@ -349,7 +413,7 @@ class LdapDriver(object): raise exception.Duplicate("Group can't be created because " "group %s already exists" % name) members = [] - if member_uids != None: + if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Group can't be created " @@ -375,7 +439,7 @@ class LdapDriver(object): res = self.__find_object(group_dn, '(member=%s)' % self.__uid_to_dn(uid), self.ldap.SCOPE_BASE) - return res != None + return res is not None def __add_to_group(self, uid, group_dn): """Add user to group""" @@ -447,18 +511,22 @@ class LdapDriver(object): @staticmethod def __to_user(attr): """Convert ldap attributes to User object""" - if attr == None: + if attr is None: + return None + if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ + and 'isAdmin' in attr.keys()): + return { + 'id': attr['uid'][0], + 'name': attr['cn'][0], + 'access': attr['accessKey'][0], + 'secret': attr['secretKey'][0], + 'admin': (attr['isAdmin'][0] == 'TRUE')} + else: return None - return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], - 'access': attr['accessKey'][0], - 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} def __to_project(self, attr): """Convert ldap attributes to Project object""" - if attr == None: + if attr is None: return None member_dns = attr.get('member', []) return { diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 7b2b68161..11c3bd6df 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -624,6 +624,10 @@ class AuthManager(object): with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) + @staticmethod + def get_key_pairs(context): + return db.key_pair_get_all_by_user(context.elevated(), context.user_id) + def get_credentials(self, user, project=None): """Get credential zip for user in project""" if not isinstance(user, User): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema new file mode 100644 index 000000000..4047361de --- /dev/null +++ b/nova/auth/nova_openldap.schema @@ -0,0 +1,84 @@ +# +# Person object for Nova +# inetorgperson with extra attributes +# Author: Vishvananda Ishaya <vishvananda@yahoo.com> +# +# + +# using internet experimental oid arc as per BP64 3.1 +objectidentifier novaSchema 1.3.6.1.3.1.666.666 +objectidentifier novaAttrs novaSchema:3 +objectidentifier novaOCs novaSchema:4 + +attributetype ( + novaAttrs:1 + NAME 'accessKey' + DESC 'Key for accessing data' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:2 + NAME 'secretKey' + DESC 'Secret key' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:3 + NAME 'keyFingerprint' + DESC 'Fingerprint of private key' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:4 + NAME 'isAdmin' + DESC 'Is user an administrator?' + EQUALITY booleanMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:5 + NAME 'projectManager' + DESC 'Project Managers of a project' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 + ) + +objectClass ( + novaOCs:1 + NAME 'novaUser' + DESC 'access and secret keys' + AUXILIARY + MUST ( uid ) + MAY ( accessKey $ secretKey $ isAdmin ) + ) + +objectClass ( + novaOCs:2 + NAME 'novaKeyPair' + DESC 'Key pair for User' + SUP top + STRUCTURAL + MUST ( cn $ sshPublicKey $ keyFingerprint ) + ) + +objectClass ( + novaOCs:3 + NAME 'novaProject' + DESC 'Container for project' + SUP groupOfNames + STRUCTURAL + MUST ( cn $ projectManager ) + ) diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema new file mode 100644 index 000000000..e925e05e4 --- /dev/null +++ b/nova/auth/nova_sun.schema @@ -0,0 +1,16 @@ +# +# Person object for Nova +# inetorgperson with extra attributes +# Author: Vishvananda Ishaya <vishvananda@yahoo.com> +# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com> +# +# using internet experimental oid arc as per BP64 3.1 +dn: cn=schema +attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh new file mode 100755 index 000000000..8052c077d --- /dev/null +++ b/nova/auth/opendj.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# LDAP INSTALL SCRIPT - IS IDEMPOTENT, does not scrub users + +apt-get install -y ldap-utils python-ldap openjdk-6-jre + +if [ ! -d "/usr/opendj" ] +then + # TODO(rlane): Wikimedia Foundation is the current package maintainer. + # After the package is included in Ubuntu's channel, change this. + wget http://apt.wikimedia.org/wikimedia/pool/main/o/opendj/opendj_2.4.0-7_amd64.deb + dpkg -i opendj_2.4.0-7_amd64.deb +fi + +abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +schemapath='/var/opendj/instance/config/schema' +cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif +cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif +chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif +chown opendj:opendj $schemapath/98-nova_sun.ldif + +cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF +# LDAP Client Settings +URI ldap://localhost +BASE dc=example,dc=com +BINDDN cn=Directory Manager +SIZELIMIT 0 +TIMELIMIT 0 +LDAP_CONF_EOF + +cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF +# This is the root of the directory tree +dn: dc=example,dc=com +description: Example.Com, your trusted non-existent corporation. +dc: example +o: Example.Com +objectClass: top +objectClass: dcObject +objectClass: organization + +# Subtree for users +dn: ou=Users,dc=example,dc=com +ou: Users +description: Users +objectClass: organizationalUnit + +# Subtree for groups +dn: ou=Groups,dc=example,dc=com +ou: Groups +description: Groups +objectClass: organizationalUnit + +# Subtree for system accounts +dn: ou=System,dc=example,dc=com +ou: System +description: Special accounts used by software applications. +objectClass: organizationalUnit + +# Special Account for Authentication: +dn: uid=authenticate,ou=System,dc=example,dc=com +uid: authenticate +ou: System +description: Special account for authenticating users +userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg== +objectClass: account +objectClass: simpleSecurityObject + +# create the sysadmin entry + +dn: cn=developers,ou=Groups,dc=example,dc=com +objectclass: groupOfNames +cn: developers +description: IT admin group +member: uid=admin,ou=Users,dc=example,dc=com + +dn: cn=sysadmins,ou=Groups,dc=example,dc=com +objectclass: groupOfNames +cn: sysadmins +description: IT admin group +member: uid=admin,ou=Users,dc=example,dc=com + +dn: cn=netadmins,ou=Groups,dc=example,dc=com +objectclass: groupOfNames +cn: netadmins +description: Network admin group +member: uid=admin,ou=Users,dc=example,dc=com + +dn: cn=cloudadmins,ou=Groups,dc=example,dc=com +objectclass: groupOfNames +cn: cloudadmins +description: Cloud admin group +member: uid=admin,ou=Users,dc=example,dc=com + +dn: cn=itsec,ou=Groups,dc=example,dc=com +objectclass: groupOfNames +cn: itsec +description: IT security users group +member: uid=admin,ou=Users,dc=example,dc=com +BASE_LDIF_EOF + +/etc/init.d/opendj stop +su - opendj -c '/usr/opendj/setup -i -b "dc=example,dc=com" -l /etc/ldap/base.ldif -S -w changeme -O -n --noPropertiesFile' +/etc/init.d/opendj start diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema new file mode 100644 index 000000000..93351da6d --- /dev/null +++ b/nova/auth/openssh-lpk_openldap.schema @@ -0,0 +1,19 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE <eau@phear.org> +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema new file mode 100644 index 000000000..5f52db3b6 --- /dev/null +++ b/nova/auth/openssh-lpk_sun.schema @@ -0,0 +1,10 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE <eau@phear.org> +# +# Schema for Sun Directory Server. +# Based on the original schema, modified by Stefan Fischer. +# +dn: cn=schema +attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) +objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh index fdc0e39dc..797675d2e 100755 --- a/nova/auth/slap.sh +++ b/nova/auth/slap.sh @@ -20,115 +20,9 @@ apt-get install -y slapd ldap-utils python-ldap -cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE <eau@phear.org> -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) -LPK_SCHEMA_EOF - -cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF -# -# Person object for Nova -# inetorgperson with extra attributes -# Author: Vishvananda Ishaya <vishvananda@yahoo.com> -# -# - -# using internet experimental oid arc as per BP64 3.1 -objectidentifier novaSchema 1.3.6.1.3.1.666.666 -objectidentifier novaAttrs novaSchema:3 -objectidentifier novaOCs novaSchema:4 - -attributetype ( - novaAttrs:1 - NAME 'accessKey' - DESC 'Key for accessing data' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:2 - NAME 'secretKey' - DESC 'Secret key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' - EQUALITY booleanMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - -objectClass ( - novaOCs:1 - NAME 'novaUser' - DESC 'access and secret keys' - AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) - ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) - -NOVA_SCHEMA_EOF +abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema +cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF diff --git a/nova/compute/api.py b/nova/compute/api.py new file mode 100644 index 000000000..8e0efa4cc --- /dev/null +++ b/nova/compute/api.py @@ -0,0 +1,305 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all API requests relating to instances (guest vms). +""" + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(internal_id): + """Default function to generate a hostname given an instance reference.""" + return str(internal_id) + + +class ComputeAPI(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, network_manager=None, image_service=None, **kwargs): + if not network_manager: + network_manager = utils.import_object(FLAGS.network_manager) + self.network_manager = network_manager + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + super(ComputeAPI, self).__init__(**kwargs) + + def create_instances(self, context, instance_type, image_id, min_count=1, + max_count=1, kernel_id=None, ramdisk_id=None, + display_name='', description='', key_name=None, + key_data=None, security_group='default', + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quote and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', FLAGS.default_kernel) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # Make sure we have access to kernel and ramdisk + self.image_service.show(context, kernel_id) + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': description, + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug("Going to run %s instances...", num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + internal_id = instance['internal_id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(internal_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % internal_id + + instance = self.update_instance(context, instance_id, **updates) + instances.append(instance) + + # TODO(vish): This probably should be done in the scheduler + # or in compute as a call. The network should be + # allocated after the host is assigned and setup + # can happen at the same time. + address = self.network_manager.allocate_fixed_ip(context, + instance_id, + is_vpn) + rpc.cast(elevated, + self._get_network_topic(context), + {"method": "setup_fixed_ip", + "args": {"address": address}}) + + logging.debug("Casting to scheduler for %s/%s's instance %s", + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update_instance(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete_instance(self, context, instance_id): + logging.debug("Going to try and terminate %d" % instance_id) + try: + instance = self.db.instance_get_by_internal_id(context, + instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found during terminate", + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning("Instance %d is already being terminated", + instance_id) + return + + self.update_instance(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + # FIXME(ja): where should network deallocate occur? + address = self.db.instance_get_floating_address(context, + instance['id']) + if address: + logging.debug("Disassociating address %s" % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(context, + self._get_network_topic(context), + {"method": "disassociate_floating_ip", + "args": {"floating_address": address}}) + + address = self.db.instance_get_fixed_address(context, instance['id']) + if address: + logging.debug("Deallocating address %s" % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + self.network_manager.deallocate_fixed_ip(context.elevated(), + address) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance['id']}}) + else: + self.db.instance_destroy(context, instance['id']) + + def get_instances(self, context, project_id=None): + """Get all instances, possibly filtered by project ID or + user ID. If there is no filter and the context is an admin, + it will retreive all instances in the system.""" + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, project_id) + return self.db.instance_get_all(context) + + def get_instance(self, context, instance_id): + return self.db.instance_get_by_internal_id(context, instance_id) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance['id']}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance['id']}}) + + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + network_ref = self.network_manager.get_network(context) + host = network_ref['host'] + if not host: + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + return self.db.queue_get_for(context, FLAGS.network_topic, host) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 67ee8f8a8..a2679e0fc 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -21,9 +21,29 @@ The built-in instance properties. """ +from nova import flags + +FLAGS = flags.FLAGS INSTANCE_TYPES = { 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + + +def get_by_type(instance_type): + """Build instance data structure and save it to the data store.""" + if instance_type is None: + return FLAGS.default_instance_type + if instance_type not in INSTANCE_TYPES: + raise exception.ApiError("Unknown instance type: %s", + instance_type) + return instance_type + + +def get_by_flavor_id(flavor_id): + for instance_type, details in INSTANCE_TYPES.iteritems(): + if details['flavorid'] == flavor_id: + return instance_type + return FLAGS.default_instance_type diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 890d79fba..dd8d41129 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, -responding to calls to check it state, attaching persistent as well as -termination. +responding to calls to check its state, attaching persistent storage, and +terminating it. **Related Flags** @@ -45,15 +45,15 @@ from nova import manager from nova import utils from nova.compute import power_state - FLAGS = flags.FLAGS -flags.DEFINE_string('instances_path', utils.abspath('../instances'), +flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', - 'Driver to use for volume creation') + 'Driver to use for controlling virtualization') class ComputeManager(manager.Manager): + """Manages the running instances from creation to destruction.""" def __init__(self, compute_driver=None, *args, **kwargs): @@ -84,47 +84,6 @@ class ComputeManager(manager.Manager): """This call passes stright through to the virtualization driver.""" yield self.driver.refresh_security_group(security_group_id) - def create_instance(self, context, security_groups=None, **kwargs): - """Creates the instance in the datastore and returns the - new instance as a mapping - - :param context: The security context - :param security_groups: list of security group ids to - attach to the instance - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - created - - :retval Returns a mapping of the instance information - that has just been created - - """ - instance_ref = self.db.instance_create(context, kwargs) - inst_id = instance_ref['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - inst_id, - security_group_id) - return instance_ref - - def update_instance(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - self.db.instance_update(context, instance_id, kwargs) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): @@ -134,7 +93,6 @@ class ComputeManager(manager.Manager): if instance_ref['name'] in self.driver.list_instances(): raise exception.Error("Instance has already been created") logging.debug("instance %s: starting...", instance_id) - project_id = instance_ref['project_id'] self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, @@ -176,7 +134,6 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - yield self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index ce45b14f6..22653113a 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -46,7 +46,7 @@ flags.DEFINE_integer('monitoring_instances_delay', 5, 'Sleep time between updates') flags.DEFINE_integer('monitoring_instances_step', 300, 'Interval of RRD updates') -flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances', +flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances', 'Location of RRD files') diff --git a/nova/crypto.py b/nova/crypto.py index d73559587..aacc50b17 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -40,9 +40,9 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') -flags.DEFINE_string('keys_path', utils.abspath('../keys'), +flags.DEFINE_string('keys_path', '$state_path/keys', 'Where we keep our keys') -flags.DEFINE_string('ca_path', utils.abspath('../CA'), +flags.DEFINE_string('ca_path', '$state_path/CA', 'Where we keep our root CA') flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?') diff --git a/nova/db/base.py b/nova/db/base.py new file mode 100644 index 000000000..1d1e80866 --- /dev/null +++ b/nova/db/base.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base class for classes that need modular database access. +""" + +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('db_driver', 'nova.db.api', + 'driver to use for database access') + + +class Base(object): + """DB driver is injected in the init method""" + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103 diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index afa55fc03..55036d1d1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -530,6 +530,12 @@ def fixed_ip_update(context, address, values): #functions between the two of them as well. @require_context def instance_create(context, values): + """Create a new Instance record in the database. + + context - request context object + values - dict containing column values. + 'internal_id' is auto-generated and should not be specified. + """ instance_ref = models.Instance() instance_ref.update(values) @@ -537,7 +543,7 @@ def instance_create(context, values): with session.begin(): while instance_ref.internal_id == None: # Instances have integer internal ids. - internal_id = random.randint(0, 2 ** 32 - 1) + internal_id = random.randint(0, 2 ** 31 - 1) if not instance_internal_id_exists(context, internal_id, session=session): instance_ref.internal_id = internal_id @@ -726,6 +732,7 @@ def instance_update(context, instance_id, values): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) instance_ref.save(session=session) + return instance_ref def instance_add_security_group(context, instance_id, security_group_id): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 01b5cf350..fe0a9a921 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -178,8 +178,6 @@ class Instance(BASE, NovaBase): kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) - server_name = Column(String(255)) - # image_id = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -212,6 +210,7 @@ class Instance(BASE, NovaBase): launched_at = Column(DateTime) terminated_at = Column(DateTime) + # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) diff --git a/nova/flags.py b/nova/flags.py index f7ae26050..c6578023d 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -24,6 +24,7 @@ where they're used. import getopt import os import socket +import string import sys import gflags @@ -38,11 +39,12 @@ class FlagValues(gflags.FlagValues): """ - def __init__(self): + def __init__(self, extra_context=None): gflags.FlagValues.__init__(self) self.__dict__['__dirty'] = [] self.__dict__['__was_already_parsed'] = False self.__dict__['__stored_argv'] = [] + self.__dict__['__extra_context'] = extra_context def __call__(self, argv): # We're doing some hacky stuff here so that we don't have to copy @@ -112,7 +114,7 @@ class FlagValues(gflags.FlagValues): def ParseNewFlags(self): if '__stored_argv' not in self.__dict__: return - new_flags = FlagValues() + new_flags = FlagValues(self) for k in self.__dict__['__dirty']: new_flags[k] = gflags.FlagValues.__getitem__(self, k) @@ -134,9 +136,29 @@ class FlagValues(gflags.FlagValues): def __getattr__(self, name): if self.IsDirty(name): self.ParseNewFlags() - return gflags.FlagValues.__getattr__(self, name) + val = gflags.FlagValues.__getattr__(self, name) + if type(val) is str: + tmpl = string.Template(val) + context = [self, self.__dict__['__extra_context']] + return tmpl.substitute(StrWrapper(context)) + return val +class StrWrapper(object): + """Wrapper around FlagValues objects + + Wraps FlagValues objects for string.Template so that we're + sure to return strings.""" + def __init__(self, context_objs): + self.context_objs = context_objs + + def __getitem__(self, name): + for context in self.context_objs: + val = getattr(context, name, False) + if val: + return str(val) + raise KeyError(name) + FLAGS = FlagValues() gflags.FLAGS = FLAGS gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS) @@ -201,8 +223,6 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('cc_host', '127.0.0.1', 'ip of api server') -DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', 'Url to ec2 api server') @@ -222,8 +242,11 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') +DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), + "Top-level directory for maintaining nova's state") + DEFINE_string('sql_connection', - 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), + 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', @@ -236,7 +259,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') # The service to use for image search and retrieval -DEFINE_string('image_service', 'nova.image.local.LocalImageService', +DEFINE_string('image_service', 'nova.image.s3.S3ImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('host', socket.gethostname(), diff --git a/nova/image/local.py b/nova/image/local.py index 9b0cdcc50..b44593221 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -59,7 +59,7 @@ class LocalImageService(service.BaseImageService): """ Store the image data and return the new image id. """ - id = random.randint(0, 2 ** 32 - 1) + id = random.randint(0, 2 ** 31 - 1) data['id'] = id self.update(context, id, data) return id diff --git a/nova/manager.py b/nova/manager.py index a6efb8732..5b61f7a4c 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -53,23 +53,19 @@ This module provides Manager, a base class for managers. from nova import utils from nova import flags +from nova.db import base from twisted.internet import defer FLAGS = flags.FLAGS -flags.DEFINE_string('db_driver', 'nova.db.api', - 'driver to use for volume creation') -class Manager(object): - """DB driver is injected in the init method""" +class Manager(base.Base): def __init__(self, host=None, db_driver=None): if not host: host = FLAGS.host self.host = host - if not db_driver: - db_driver = FLAGS.db_driver - self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103 + super(Manager, self).__init__(db_driver) @defer.inlineCallbacks def periodic_tasks(self, context=None): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ea24cda6..0fefd9415 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -38,14 +38,16 @@ flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') -flags.DEFINE_string('networks_path', utils.abspath('../networks'), +flags.DEFINE_string('networks_path', '$state_path/networks', 'Location to keep network config files') flags.DEFINE_string('public_interface', 'vlan1', 'Interface for public IP addresses') -flags.DEFINE_string('bridge_dev', 'eth0', - 'network device for bridges') +flags.DEFINE_string('vlan_interface', 'eth0', + 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') +flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') +flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') flags.DEFINE_string('routing_source_ip', '127.0.0.1', 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, @@ -54,14 +56,15 @@ flags.DEFINE_bool('use_nova_chains', False, DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] -def init_host(): - """Basic networking setup goes here""" - # NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port - # forwarding entries and a default DNAT entry. +def metadata_forward(): + """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " "--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port)) + +def init_host(): + """Basic networking setup goes here""" # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. _confirm_rule("POSTROUTING", "-t nat -s %s " @@ -134,7 +137,7 @@ def ensure_vlan(vlan_num): if not _device_exists(interface): logging.debug("Starting VLAN inteface %s", interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") - _execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, vlan_num)) + _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) return interface @@ -142,12 +145,13 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug("Starting Bridge inteface for %s", interface) + logging.debug("Starting Bridge interface for %s", interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) _execute("sudo brctl stp %s off" % bridge) - _execute("sudo brctl addif %s %s" % (bridge, interface)) + if interface: + _execute("sudo brctl addif %s %s" % (bridge, interface)) if net_attrs: _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ (bridge, diff --git a/nova/network/manager.py b/nova/network/manager.py index b033bb0a4..a7298b47f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -27,6 +27,7 @@ topologies. All of the network commands are issued to a subclass of :network_driver: Driver to use for network creation :flat_network_bridge: Bridge device for simple network instances +:flat_interface: FlatDhcp will bridge into this interface if set :flat_network_dns: Dns for simple network :flat_network_dhcp_start: Dhcp start for FlatDhcp :vlan_start: First VLAN for private networks @@ -63,7 +64,11 @@ flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') -flags.DEFINE_string('flat_network_dhcp_start', '192.168.0.2', +flags.DEFINE_bool('flat_injected', True, + 'Whether to attempt to inject network setup into guest') +flags.DEFINE_string('flat_interface', None, + 'FlatDhcp will bridge into this interface if set') +flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') @@ -175,9 +180,11 @@ class NetworkManager(manager.Manager): if instance_ref['mac_address'] != mac: raise exception.Error("IP %s leased to bad mac %s vs %s" % (address, instance_ref['mac_address'], mac)) + now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], - {'leased': True}) + {'leased': True, + 'updated_at': now}) if not fixed_ip_ref['allocated']: logging.warn("IP %s leased that was already deallocated", address) @@ -246,7 +253,31 @@ class NetworkManager(manager.Manager): class FlatManager(NetworkManager): - """Basic network where no vlans are used.""" + """Basic network where no vlans are used. + + FlatManager does not do any bridge or vlan creation. The user is + responsible for setting up whatever bridge is specified in + flat_network_bridge (br100 by default). This bridge needs to be created + on all compute hosts. + + The idea is to create a single network for the host with a command like: + nova-manage network create 192.168.0.0/24 1 256. Creating multiple + networks for for one manager is currently not supported, but could be + added by modifying allocate_fixed_ip and get_network to get the a network + with new logic instead of network_get_by_bridge. Arbitrary lists of + addresses in a single network can be accomplished with manual db editing. + + If flat_injected is True, the compute host will attempt to inject network + config into the guest. It attempts to modify /etc/network/interfaces and + currently only works on debian based systems. To support a wider range of + OSes, some other method may need to be devised to let the guest know which + ip it should be using so that it can configure itself. Perhaps an attached + disk or serial device with configuration info. + + Metadata forwarding must be handled by the gateway, and since nova does + not do any setup in this mode, it must be done manually. Requests to + 169.254.169.254 port 80 will need to be forwarded to the api server. + """ def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" @@ -285,6 +316,7 @@ class FlatManager(NetworkManager): cidr = "%s/%s" % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) net = {} + net['bridge'] = FLAGS.flat_network_bridge net['cidr'] = cidr net['netmask'] = str(project_net.netmask()) net['gateway'] = str(project_net[1]) @@ -306,18 +338,36 @@ class FlatManager(NetworkManager): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" net = {} - net['injected'] = True - net['bridge'] = FLAGS.flat_network_bridge + net['injected'] = FLAGS.flat_injected net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) -class FlatDHCPManager(NetworkManager): - """Flat networking with dhcp.""" +class FlatDHCPManager(FlatManager): + """Flat networking with dhcp. + + FlatDHCPManager will start up one dhcp server to give out addresses. + It never injects network settings into the guest. Otherwise it behaves + like FlatDHCPManager. + """ + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + super(FlatDHCPManager, self).init_host() + self.driver.metadata_forward() + + def setup_compute_network(self, context, instance_id): + """Sets up matching network for compute hosts.""" + network_ref = db.network_get_by_instance(context, instance_id) + self.driver.ensure_bridge(network_ref['bridge'], + FLAGS.flat_interface, + network_ref) def setup_fixed_ip(self, context, address): """Setup dhcp for this network.""" - network_ref = db.fixed_ip_get_by_address(context, address) + network_ref = db.fixed_ip_get_network(context, address) self.driver.update_dhcp(context, network_ref['id']) def deallocate_fixed_ip(self, context, address, *args, **kwargs): @@ -326,18 +376,28 @@ class FlatDHCPManager(NetworkManager): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project.""" - super(FlatDHCPManager, self)._on_set_network_host(context, network_id) - network_ref = self.db.network_get(context, network_id) - self.db.network_update(context, - network_id, - {'dhcp_start': FLAGS.flat_network_dhcp_start}) + net = {} + net['dhcp_start'] = FLAGS.flat_network_dhcp_start + self.db.network_update(context, network_id, net) + network_ref = db.network_get(context, network_id) self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.bridge_dev, + FLAGS.flat_interface, network_ref) class VlanManager(NetworkManager): - """Vlan network with dhcp.""" + """Vlan network with dhcp. + + VlanManager is the most complicated. It will create a host-managed + vlan for each project. Each project gets its own subnet. The networks + and associated subnets are created with nova-manage using a command like: + nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks + of 16 addresses from the beginning of the 10.0.0.0 range. + + A dhcp server is run for each subnet, so each project will have its own. + For this mode to be useful, each project will need a vpn to access the + instances in its subnet. + """ @defer.inlineCallbacks def periodic_tasks(self, context=None): @@ -357,6 +417,7 @@ class VlanManager(NetworkManager): standalone service. """ super(VlanManager, self).init_host() + self.driver.metadata_forward() self.driver.init_host() def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index 697982538..82767e52f 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -33,7 +33,7 @@ from nova.objectstore import stored FLAGS = flags.FLAGS -flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), +flags.DEFINE_string('buckets_path', '$state_path/buckets', 'path to s3 buckets') diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 4554444fa..7292dbab8 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -39,8 +39,8 @@ from nova.objectstore import bucket FLAGS = flags.FLAGS -flags.DEFINE_string('images_path', utils.abspath('../images'), - 'path to decrypted images') +flags.DEFINE_string('images_path', '$state_path/images', + 'path to decrypted images') class Image(object): diff --git a/nova/quota.py b/nova/quota.py index 01dd0ecd4..f6ca9f77c 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -94,3 +94,8 @@ def allowed_floating_ips(context, num_floating_ips): quota = get_quota(context, project_id) allowed_floating_ips = quota['floating_ips'] - used_floating_ips return min(num_floating_ips, allowed_floating_ips) + + +class QuotaError(exception.ApiError): + """Quota Exceeeded""" + pass diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 639a2ebe4..c3f129a32 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -54,7 +54,6 @@ def fake_auth_init(self): self.db = FakeAuthDatabase() self.context = Context() self.auth = FakeAuthManager() - self.host = 'foo' @webob.dec.wsgify @@ -68,12 +67,11 @@ def fake_wsgi(self, req): def stub_out_key_pair_funcs(stubs): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] - stubs.Set(nova.db.api, 'key_pair_get_all_by_user', - key_pair) + stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair) def stub_out_image_service(stubs): - def fake_image_show(meh, id): + def fake_image_show(meh, context, id): return dict(kernelId=1, ramdiskId=1) stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show) diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index dd83991b9..d8b202e21 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -50,12 +50,12 @@ class APITest(unittest.TestCase): api.application = succeed resp = Request.blank('/').get_response(api) - self.assertFalse('cloudServersFault' in resp.body, resp.body) + self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 200, resp.body) api.application = raise_webob_exc resp = Request.blank('/').get_response(api) - self.assertFalse('cloudServersFault' in resp.body, resp.body) + self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) api.application = raise_api_fault @@ -65,10 +65,10 @@ class APITest(unittest.TestCase): api.application = fail resp = Request.blank('/').get_response(api) - self.assertTrue('{"cloudServersFault' in resp.body, resp.body) + self.assertTrue('{"computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) api.application = fail resp = Request.blank('/.xml').get_response(api) - self.assertTrue('<cloudServersFault' in resp.body, resp.body) + self.assertTrue('<computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 29f4b8874..14e720be4 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -62,14 +62,14 @@ class Test(unittest.TestCase): f = fakes.FakeAuthManager() f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) - req = webob.Request.blank('/v1.0/') + req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-Key'] = 'derp' result = req.get_response(nova.api.API('os')) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], - "https://foo/v1.0/") + "http://foo/v1.0/") self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 8cfc6c45a..8444b6fce 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -43,9 +43,21 @@ def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] +def return_security_group(context, instance_id, security_group_id): + pass + + +def instance_update(context, instance_id, kwargs): + return stub_instance(instance_id) + + +def instance_address(context, instance_id): + return None + + def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, server_name='server%s' % id, - user_id=user_id) + return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id, internal_id=id) class ServersTest(unittest.TestCase): @@ -63,6 +75,13 @@ class ServersTest(unittest.TestCase): return_server) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) + self.stubs.Set(nova.db.api, 'instance_add_security_group', + return_security_group) + self.stubs.Set(nova.db.api, 'instance_update', instance_update) + self.stubs.Set(nova.db.api, 'instance_get_fixed_address', + instance_address) + self.stubs.Set(nova.db.api, 'instance_get_floating_address', + instance_address) def tearDown(self): self.stubs.UnsetAll() @@ -87,11 +106,11 @@ class ServersTest(unittest.TestCase): i += 1 def test_create_instance(self): - def server_update(context, id, params): - pass - def instance_create(context, inst): - return {'id': 1, 'internal_id': 1} + return {'id': 1, 'internal_id': 1, 'display_name': ''} + + def server_update(context, id, params): + return instance_create(context, id) def fake_method(*args, **kwargs): pass diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 71a1a4457..6f3ef96cb 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -31,6 +31,7 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager +from nova.compute import api as compute_api FLAGS = flags.FLAGS @@ -43,6 +44,7 @@ class ComputeTestCase(test.TrialTestCase): self.flags(connection_type='fake', network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) + self.compute_api = compute_api.ComputeAPI() self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') @@ -66,26 +68,31 @@ class ComputeTestCase(test.TrialTestCase): inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] + def test_create_instance_defaults_display_name(self): + """Verify that an instance cannot be created without a display_name.""" + cases = [dict(), dict(display_name=None)] + for instance in cases: + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, **instance) + try: + self.assertNotEqual(ref[0].display_name, None) + finally: + db.instance_destroy(self.context, ref[0]['id']) + def test_create_instance_associates_security_groups(self): - """Make sure create_instance associates security groups""" - inst = {} - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id + """Make sure create_instances associates security groups""" values = {'name': 'default', 'description': 'default', 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute.create_instance(self.context, - security_groups=[group['id']], - **inst) - # reload to get groups - instance_ref = db.instance_get(self.context, ref['id']) + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, security_group=['default']) try: - self.assertEqual(len(instance_ref['security_groups']), 1) + self.assertEqual(len(ref[0]['security_groups']), 1) finally: db.security_group_destroy(self.context, group['id']) - db.instance_destroy(self.context, instance_ref['id']) + db.instance_destroy(self.context, ref[0]['id']) @defer.inlineCallbacks def test_run_terminate(self): diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py index 856060afa..667c63ad0 100644 --- a/nova/tests/misc_unittest.py +++ b/nova/tests/misc_unittest.py @@ -15,7 +15,6 @@ # under the License. import os -import subprocess from nova import test from nova.utils import parse_mailmap, str_dict_replace @@ -24,18 +23,23 @@ from nova.utils import parse_mailmap, str_dict_replace class ProjectTestCase(test.TrialTestCase): def test_authors_up_to_date(self): if os.path.exists('../.bzr'): - log_cmd = subprocess.Popen(["bzr", "log", "-n0"], - stdout=subprocess.PIPE) - changelog = log_cmd.communicate()[0] + contributors = set() + mailmap = parse_mailmap('../.mailmap') - contributors = set() - for l in changelog.split('\n'): - l = l.strip() - if (l.startswith('author:') or l.startswith('committer:') - and not l == 'committer: Tarmac'): - email = l.split(' ')[-1] - contributors.add(str_dict_replace(email, mailmap)) + import bzrlib.workingtree + tree = bzrlib.workingtree.WorkingTree.open('..') + tree.lock_read() + parents = tree.get_parent_ids() + g = tree.branch.repository.get_graph() + for p in parents[1:]: + rev_ids = [r for r, _ in g.iter_ancestry(parents) + if r != "null:"] + revs = tree.branch.repository.get_revisions(rev_ids) + for r in revs: + for author in r.get_apparent_authors(): + email = author.split(' ')[-1] + contributors.add(str_dict_replace(email, mailmap)) authors_file = open('../Authors', 'r').read() diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py index b7c1d2acc..1966b51f7 100644 --- a/nova/tests/quota_unittest.py +++ b/nova/tests/quota_unittest.py @@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase): for i in range(FLAGS.quota_instances): instance_id = self._create_instance() instance_ids.append(instance_id) - self.assertRaises(cloud.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, self.cloud.run_instances, self.context, min_count=1, max_count=1, - instance_type='m1.small') + instance_type='m1.small', + image_id='fake') for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase): instance_ids = [] instance_id = self._create_instance(cores=4) instance_ids.append(instance_id) - self.assertRaises(cloud.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, self.cloud.run_instances, self.context, min_count=1, max_count=1, - instance_type='m1.small') + instance_type='m1.small', + image_id='fake') for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase): for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) - self.assertRaises(cloud.QuotaError, self.cloud.create_volume, + self.assertRaises(quota.QuotaError, self.cloud.create_volume, self.context, size=10) for volume_id in volume_ids: @@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) - self.assertRaises(cloud.QuotaError, + self.assertRaises(quota.QuotaError, self.cloud.create_volume, self.context, size=10) @@ -146,6 +148,6 @@ class QuotaTestCase(test.TrialTestCase): # make an rpc.call, the test just finishes with OK. It # appears to be something in the magic inline callbacks # that is breaking. - self.assertRaises(cloud.QuotaError, self.cloud.allocate_address, + self.assertRaises(quota.QuotaError, self.cloud.allocate_address, self.context) db.floating_ip_destroy(context.get_admin_context(), address) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 11f0fa8ce..c40bb4bb4 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -25,7 +25,7 @@ import sys from nova import flags from nova.virt import fake from nova.virt import libvirt_conn -from nova.virt import xenapi +from nova.virt import xenapi_conn FLAGS = flags.FLAGS @@ -61,7 +61,7 @@ def get_connection(read_only=False): elif t == 'libvirt': conn = libvirt_conn.get_connection(read_only) elif t == 'xenapi': - conn = xenapi.get_connection(read_only) + conn = xenapi_conn.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py deleted file mode 100644 index 3169562a5..000000000 --- a/nova/virt/xenapi.py +++ /dev/null @@ -1,444 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A connection to XenServer or Xen Cloud Platform. - -The concurrency model for this class is as follows: - -All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator -deferredToThread). They are remote calls, and so may hang for the usual -reasons. They should not be allowed to block the reactor thread. - -All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async -(using XenAPI.VM.async_start etc). These return a task, which can then be -polled for completion. Polling is handled using reactor.callLater. - -This combination of techniques means that we don't block the reactor thread at -all, and at the same time we don't hold lots of threads waiting for -long-running operations. - -FIXME: get_info currently doesn't conform to these rules, and will block the -reactor thread if the VM.get_by_name_label or VM.get_record calls block. - -**Related Flags** - -:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. -:xenapi_connection_username: Username for connection to XenServer/Xen Cloud - Platform (default: root). -:xenapi_connection_password: Password for connection to XenServer/Xen Cloud - Platform. -:xenapi_task_poll_interval: The interval (seconds) used for polling of - remote tasks (Async.VM.start, etc) - (default: 0.5). - -""" - -import logging -import xmlrpclib - -from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task - -from nova import db -from nova import flags -from nova import process -from nova import utils -from nova.auth.manager import AuthManager -from nova.compute import instance_types -from nova.compute import power_state -from nova.virt import images - -XenAPI = None - - -FLAGS = flags.FLAGS -flags.DEFINE_string('xenapi_connection_url', - None, - 'URL for connection to XenServer/Xen Cloud Platform.' - ' Required if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_username', - 'root', - 'Username for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_string('xenapi_connection_password', - None, - 'Password for connection to XenServer/Xen Cloud Platform.' - ' Used only if connection_type=xenapi.') -flags.DEFINE_float('xenapi_task_poll_interval', - 0.5, - 'The interval used for polling of remote tasks ' - '(Async.VM.start, etc). Used only if ' - 'connection_type=xenapi.') - - -XENAPI_POWER_STATE = { - 'Halted': power_state.SHUTDOWN, - 'Running': power_state.RUNNING, - 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed': power_state.CRASHED} - - -def get_connection(_): - """Note that XenAPI doesn't have a read-only connection mode, so - the read_only parameter is ignored.""" - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - global XenAPI - if XenAPI is None: - XenAPI = __import__('XenAPI') - url = FLAGS.xenapi_connection_url - username = FLAGS.xenapi_connection_username - password = FLAGS.xenapi_connection_password - if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') - return XenAPIConnection(url, username, password) - - -class XenAPIConnection(object): - def __init__(self, url, user, pw): - self._conn = XenAPI.Session(url) - self._conn.login_with_password(user, pw) - - def list_instances(self): - return [self._conn.xenapi.VM.get_name_label(vm) \ - for vm in self._conn.xenapi.VM.get_all()] - - @defer.inlineCallbacks - def spawn(self, instance): - vm = yield self._lookup(instance.name) - if vm is not None: - raise Exception('Attempted to create non-unique name %s' % - instance.name) - - network = db.project_get_network(None, instance.project_id) - network_ref = \ - yield self._find_network_with_bridge(network.bridge) - - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield self._fetch_image( - instance.image_id, user, project, True) - kernel = yield self._fetch_image( - instance.kernel_id, user, project, False) - ramdisk = yield self._fetch_image( - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - - vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) - if network_ref: - yield self._create_vif(vm_ref, network_ref, instance.mac_address) - logging.debug('Starting VM %s...', vm_ref) - yield self._call_xenapi('VM.start', vm_ref, False, False) - logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - - @defer.inlineCallbacks - def _create_vm(self, instance, kernel, ramdisk): - """Create a VM record. Returns a Deferred that gives the new - VM reference.""" - - instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] - mem = str(long(instance_type['memory_mb']) * 1024 * 1024) - vcpus = str(instance_type['vcpus']) - rec = { - 'name_label': instance.name, - 'name_description': '', - 'is_a_template': False, - 'memory_static_min': '0', - 'memory_static_max': mem, - 'memory_dynamic_min': mem, - 'memory_dynamic_max': mem, - 'VCPUs_at_startup': vcpus, - 'VCPUs_max': vcpus, - 'VCPUs_params': {}, - 'actions_after_shutdown': 'destroy', - 'actions_after_reboot': 'restart', - 'actions_after_crash': 'destroy', - 'PV_bootloader': '', - 'PV_kernel': kernel, - 'PV_ramdisk': ramdisk, - 'PV_args': 'root=/dev/xvda1', - 'PV_bootloader_args': '', - 'PV_legacy_args': '', - 'HVM_boot_policy': '', - 'HVM_boot_params': {}, - 'platform': {}, - 'PCI_bus': '', - 'recommendations': '', - 'affinity': '', - 'user_version': '0', - 'other_config': {}, - } - logging.debug('Created VM %s...', instance.name) - vm_ref = yield self._call_xenapi('VM.create', rec) - logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) - - @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): - """Create a VBD record. Returns a Deferred that gives the new - VBD reference.""" - - vbd_rec = {} - vbd_rec['VM'] = vm_ref - vbd_rec['VDI'] = vdi_ref - vbd_rec['userdevice'] = str(userdevice) - vbd_rec['bootable'] = bootable - vbd_rec['mode'] = 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) - logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, - vdi_ref) - defer.returnValue(vbd_ref) - - @defer.inlineCallbacks - def _create_vif(self, vm_ref, network_ref, mac_address): - """Create a VIF record. Returns a Deferred that gives the new - VIF reference.""" - - vif_rec = {} - vif_rec['device'] = '0' - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mac_address - vif_rec['MTU'] = '1500' - vif_rec['other_config'] = {} - vif_rec['qos_algorithm_type'] = '' - vif_rec['qos_algorithm_params'] = {} - logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, - network_ref) - vif_ref = yield self._call_xenapi('VIF.create', vif_rec) - logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, - vm_ref, network_ref) - defer.returnValue(vif_ref) - - @defer.inlineCallbacks - def _find_network_with_bridge(self, bridge): - expr = 'field "bridge" = "%s"' % bridge - networks = yield self._call_xenapi('network.get_all_records_where', - expr) - if len(networks) == 1: - defer.returnValue(networks.keys()[0]) - elif len(networks) > 1: - raise Exception('Found non-unique network for bridge %s' % bridge) - else: - raise Exception('Found no network for bridge %s' % bridge) - - @defer.inlineCallbacks - def _fetch_image(self, image, user, project, use_sr): - """use_sr: True to put the image as a VDI in an SR, False to place - it on dom0's filesystem. The former is for VM disks, the latter for - its kernel and ramdisk (if external kernels are being used). - Returns a Deferred that gives the new VDI UUID.""" - - url = images.image_url(image) - access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s" % (url, access)) - fn = use_sr and 'get_vdi' or 'get_kernel' - args = {} - args['src_url'] = url - args['username'] = access - args['password'] = user.secret - if use_sr: - args['add_partition'] = 'true' - task = yield self._async_call_plugin('objectstore', fn, args) - uuid = yield self._wait_for_task(task) - defer.returnValue(uuid) - - @defer.inlineCallbacks - def reboot(self, instance): - vm = yield self._lookup(instance.name) - if vm is None: - raise Exception('instance not present %s' % instance.name) - task = yield self._call_xenapi('Async.VM.clean_reboot', vm) - yield self._wait_for_task(task) - - @defer.inlineCallbacks - def destroy(self, instance): - vm = yield self._lookup(instance.name) - if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. - defer.returnValue(None) - # Get the VDIs related to the VM - vdis = yield self._lookup_vm_vdis(vm) - try: - task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - # Disk clean-up - if vdis: - for vdi in vdis: - try: - task = yield self._call_xenapi('Async.VDI.destroy', vdi) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - try: - task = yield self._call_xenapi('Async.VM.destroy', vm) - yield self._wait_for_task(task) - except Exception, exc: - logging.warn(exc) - - def get_info(self, instance_id): - vm = self._lookup_blocking(instance_id) - if vm is None: - raise Exception('instance not present %s' % instance_id) - rec = self._conn.xenapi.VM.get_record(vm) - return {'state': XENAPI_POWER_STATE[rec['power_state']], - 'max_mem': long(rec['memory_static_max']) >> 10, - 'mem': long(rec['memory_dynamic_max']) >> 10, - 'num_cpu': rec['VCPUs_max'], - 'cpu_time': 0} - - def get_console_output(self, instance): - return 'FAKE CONSOLE OUTPUT' - - @utils.deferredToThread - def _lookup(self, i): - return self._lookup_blocking(i) - - def _lookup_blocking(self, i): - vms = self._conn.xenapi.VM.get_by_name_label(i) - n = len(vms) - if n == 0: - return None - elif n > 1: - raise Exception('duplicate name found: %s' % i) - else: - return vms[0] - - @utils.deferredToThread - def _lookup_vm_vdis(self, vm): - return self._lookup_vm_vdis_blocking(vm) - - def _lookup_vm_vdis_blocking(self, vm): - # Firstly we get the VBDs, then the VDIs. - # TODO: do we leave the read-only devices? - vbds = self._conn.xenapi.VM.get_VBDs(vm) - vdis = [] - if vbds: - for vbd in vbds: - try: - vdi = self._conn.xenapi.VBD.get_VDI(vbd) - # Test valid VDI - record = self._conn.xenapi.VDI.get_record(vdi) - except Exception, exc: - logging.warn(exc) - else: - vdis.append(vdi) - if len(vdis) > 0: - return vdis - else: - return None - - def _wait_for_task(self, task): - """Return a Deferred that will give the result of the given task. - The task is polled until it completes.""" - d = defer.Deferred() - reactor.callLater(0, self._poll_task, task, d) - return d - - @utils.deferredToThread - def _poll_task(self, task, deferred): - """Poll the given XenAPI task, and fire the given Deferred if we - get a result.""" - try: - #logging.debug('Polling task %s...', task) - status = self._conn.xenapi.task.get_status(task) - if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, - self._poll_task, task, deferred) - elif status == 'success': - result = self._conn.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) - deferred.callback(_parse_xmlrpc_value(result)) - else: - error_info = self._conn.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) - deferred.errback(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) - except Exception, exc: - logging.warn(exc) - deferred.errback(exc) - - @utils.deferredToThread - def _call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - f = self._conn.xenapi - for m in method.split('.'): - f = f.__getattr__(m) - return f(*args) - - @utils.deferredToThread - def _async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - return _unwrap_plugin_exceptions( - self._conn.xenapi.Async.host.call_plugin, - self._get_xenapi_host(), plugin, fn, args) - - def _get_xenapi_host(self): - return self._conn.xenapi.session.get_this_host(self._conn.handle) - - -def _unwrap_plugin_exceptions(func, *args, **kwargs): - try: - return func(*args, **kwargs) - except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) - if (len(exc.details) == 4 and - exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and - exc.details[2] == 'Failure'): - params = None - try: - params = eval(exc.details[3]) - except: - raise exc - raise XenAPI.Failure(params) - else: - raise - except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) - raise - - -def _parse_xmlrpc_value(val): - """Parse the given value as if it were an XML-RPC value. This is - sometimes used as the format for the task.result field.""" - if not val: - return val - x = xmlrpclib.loads( - '<?xml version="1.0"?><methodResponse><params><param>' + - val + - '</param></params></methodResponse>') - return x[0][0] diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py new file mode 100644 index 000000000..3d598c463 --- /dev/null +++ b/nova/virt/xenapi/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py new file mode 100644 index 000000000..8cb4cce3a --- /dev/null +++ b/nova/virt/xenapi/network_utils.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of network +records and their attributes like bridges, PIFs, QoS, as well as +their lookup functions. +""" + +from twisted.internet import defer + + +class NetworkHelper(): + """ + The class that wraps the helper methods together. + """ + def __init__(self): + return + + @classmethod + @defer.inlineCallbacks + def find_network_with_bridge(cls, session, bridge): + """ Return the network on which the bridge is attached, if found """ + expr = 'field "bridge" = "%s"' % bridge + networks = yield session.call_xenapi('network.get_all_records_where', + expr) + if len(networks) == 1: + defer.returnValue(networks.keys()[0]) + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py new file mode 100644 index 000000000..99d484ca2 --- /dev/null +++ b/nova/virt/xenapi/vm_utils.py @@ -0,0 +1,216 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of VM records and +their attributes like VDIs, VIFs, as well as their lookup functions. +""" + +import logging + +from twisted.internet import defer + +from nova import utils +from nova.auth.manager import AuthManager +from nova.compute import instance_types +from nova.compute import power_state +from nova.virt import images + +XENAPI_POWER_STATE = { + 'Halted': power_state.SHUTDOWN, + 'Running': power_state.RUNNING, + 'Paused': power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed': power_state.CRASHED} + +XenAPI = None + + +class VMHelper(): + """ + The class that wraps the helper methods together. + """ + def __init__(self): + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') + + @classmethod + @defer.inlineCallbacks + def create_vm(cls, session, instance, kernel, ramdisk): + """Create a VM record. Returns a Deferred that gives the new + VM reference.""" + + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + mem = str(long(instance_type['memory_mb']) * 1024 * 1024) + vcpus = str(instance_type['vcpus']) + rec = { + 'name_label': instance.name, + 'name_description': '', + 'is_a_template': False, + 'memory_static_min': '0', + 'memory_static_max': mem, + 'memory_dynamic_min': mem, + 'memory_dynamic_max': mem, + 'VCPUs_at_startup': vcpus, + 'VCPUs_max': vcpus, + 'VCPUs_params': {}, + 'actions_after_shutdown': 'destroy', + 'actions_after_reboot': 'restart', + 'actions_after_crash': 'destroy', + 'PV_bootloader': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', + 'PV_bootloader_args': '', + 'PV_legacy_args': '', + 'HVM_boot_policy': '', + 'HVM_boot_params': {}, + 'platform': {}, + 'PCI_bus': '', + 'recommendations': '', + 'affinity': '', + 'user_version': '0', + 'other_config': {}, + } + logging.debug('Created VM %s...', instance.name) + vm_ref = yield session.call_xenapi('VM.create', rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + defer.returnValue(vm_ref) + + @classmethod + @defer.inlineCallbacks + def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference.""" + + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + defer.returnValue(vbd_ref) + + @classmethod + @defer.inlineCallbacks + def create_vif(cls, session, vm_ref, network_ref, mac_address): + """Create a VIF record. Returns a Deferred that gives the new + VIF reference.""" + + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = yield session.call_xenapi('VIF.create', vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + defer.returnValue(vif_ref) + + @classmethod + @defer.inlineCallbacks + def fetch_image(cls, session, image, user, project, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used). + Returns a Deferred that gives the new VDI UUID.""" + + url = images.image_url(image) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s", url, access) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + task = yield session.async_call_plugin('objectstore', fn, args) + uuid = yield session.wait_for_task(task) + defer.returnValue(uuid) + + @classmethod + @utils.deferredToThread + def lookup(cls, session, i): + """ Look the instance i up, and returns it if available """ + return VMHelper.lookup_blocking(session, i) + + @classmethod + def lookup_blocking(cls, session, i): + """ Synchronous lookup """ + vms = session.get_xenapi().VM.get_by_name_label(i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception('duplicate name found: %s' % i) + else: + return vms[0] + + @classmethod + @utils.deferredToThread + def lookup_vm_vdis(cls, session, vm): + """ Look for the VDIs that are attached to the VM """ + return VMHelper.lookup_vm_vdis_blocking(session, vm) + + @classmethod + def lookup_vm_vdis_blocking(cls, session, vm): + """ Synchronous lookup_vm_vdis """ + # Firstly we get the VBDs, then the VDIs. + # TODO(Armando): do we leave the read-only devices? + vbds = session.get_xenapi().VM.get_VBDs(vm) + vdis = [] + if vbds: + for vbd in vbds: + try: + vdi = session.get_xenapi().VBD.get_VDI(vbd) + # Test valid VDI + record = session.get_xenapi().VDI.get_record(vdi) + logging.debug('VDI %s is still available', record['uuid']) + except XenAPI.Failure, exc: + logging.warn(exc) + else: + vdis.append(vdi) + if len(vdis) > 0: + return vdis + else: + return None + + @classmethod + def compile_info(cls, record): + return {'state': XENAPI_POWER_STATE[record['power_state']], + 'max_mem': long(record['memory_static_max']) >> 10, + 'mem': long(record['memory_dynamic_max']) >> 10, + 'num_cpu': record['VCPUs_max'], + 'cpu_time': 0} diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py new file mode 100644 index 000000000..d36cdaea5 --- /dev/null +++ b/nova/virt/xenapi/vmops.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Management class for VM-related functions (spawn, reboot, etc). +""" + +import logging + +from twisted.internet import defer + +from nova import db +from nova import context +from nova.auth.manager import AuthManager +from nova.virt.xenapi.network_utils import NetworkHelper +from nova.virt.xenapi.vm_utils import VMHelper + +XenAPI = None + + +class VMOps(object): + """ + Management class for VM-related tasks + """ + def __init__(self, session): + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') + self._session = session + + def list_instances(self): + """ List VM instances """ + return [self._session.get_xenapi().VM.get_name_label(vm) \ + for vm in self._session.get_xenapi().VM.get_all()] + + @defer.inlineCallbacks + def spawn(self, instance): + """ Create VM instance """ + vm = yield VMHelper.lookup(self._session, instance.name) + if vm is not None: + raise Exception('Attempted to create non-unique name %s' % + instance.name) + + bridge = db.project_get_network(context.get_admin_context(), + instance.project_id).bridge + network_ref = \ + yield NetworkHelper.find_network_with_bridge(self._session, bridge) + + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) + vdi_uuid = yield VMHelper.fetch_image(self._session, + instance.image_id, user, project, True) + kernel = yield VMHelper.fetch_image(self._session, + instance.kernel_id, user, project, False) + ramdisk = yield VMHelper.fetch_image(self._session, + instance.ramdisk_id, user, project, False) + vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + vm_ref = yield VMHelper.create_vm(self._session, + instance, kernel, ramdisk) + yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + if network_ref: + yield VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) + logging.debug('Starting VM %s...', vm_ref) + yield self._session.call_xenapi('VM.start', vm_ref, False, False) + logging.info('Spawning VM %s created %s.', instance.name, + vm_ref) + + @defer.inlineCallbacks + def reboot(self, instance): + """ Reboot VM instance """ + instance_name = instance.name + vm = yield VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) + yield self._session.wait_for_task(task) + + @defer.inlineCallbacks + def destroy(self, instance): + """ Destroy VM instance """ + vm = yield VMHelper.lookup(self._session, instance.name) + if vm is None: + # Don't complain, just return. This lets us clean up instances + # that have already disappeared from the underlying platform. + defer.returnValue(None) + # Get the VDIs related to the VM + vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) + try: + task = yield self._session.call_xenapi('Async.VM.hard_shutdown', + vm) + yield self._session.wait_for_task(task) + except XenAPI.Failure, exc: + logging.warn(exc) + # Disk clean-up + if vdis: + for vdi in vdis: + try: + task = yield self._session.call_xenapi('Async.VDI.destroy', + vdi) + yield self._session.wait_for_task(task) + except XenAPI.Failure, exc: + logging.warn(exc) + try: + task = yield self._session.call_xenapi('Async.VM.destroy', vm) + yield self._session.wait_for_task(task) + except XenAPI.Failure, exc: + logging.warn(exc) + + def get_info(self, instance_id): + """ Return data about VM instance """ + vm = VMHelper.lookup_blocking(self._session, instance_id) + if vm is None: + raise Exception('instance not present %s' % instance_id) + rec = self._session.get_xenapi().VM.get_record(vm) + return VMHelper.compile_info(rec) + + def get_console_output(self, instance): + """ Return snapshot of console """ + # TODO: implement this to fix pylint! + return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py new file mode 100644 index 000000000..a4c7a3861 --- /dev/null +++ b/nova/virt/xenapi/volumeops.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Management class for Storage-related functions (attach, detach, etc). +""" + + +class VolumeOps(object): + def __init__(self, session): + self._session = session + + def attach_volume(self, instance_name, device_path, mountpoint): + # FIXME: that's going to be sorted when iscsi-xenapi lands in branch + return True + + def detach_volume(self, instance_name, mountpoint): + # FIXME: that's going to be sorted when iscsi-xenapi lands in branch + return True diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py new file mode 100644 index 000000000..26b30bf92 --- /dev/null +++ b/nova/virt/xenapi_conn.py @@ -0,0 +1,238 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to XenServer or Xen Cloud Platform. + +The concurrency model for this class is as follows: + +All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator +deferredToThread). They are remote calls, and so may hang for the usual +reasons. They should not be allowed to block the reactor thread. + +All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. Polling is handled using reactor.callLater. + +This combination of techniques means that we don't block the reactor thread at +all, and at the same time we don't hold lots of threads waiting for +long-running operations. + +FIXME: get_info currently doesn't conform to these rules, and will block the +reactor thread if the VM.get_by_name_label or VM.get_record calls block. + +**Related Flags** + +:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. +:xenapi_connection_username: Username for connection to XenServer/Xen Cloud + Platform (default: root). +:xenapi_connection_password: Password for connection to XenServer/Xen Cloud + Platform. +:xenapi_task_poll_interval: The interval (seconds) used for polling of + remote tasks (Async.VM.start, etc) + (default: 0.5). + +""" + +import logging +import xmlrpclib + +from twisted.internet import defer +from twisted.internet import reactor + +from nova import utils +from nova import flags +from nova.virt.xenapi.vmops import VMOps +from nova.virt.xenapi.volumeops import VolumeOps + +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_connection_url', + None, + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_username', + 'root', + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_password', + None, + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') +flags.DEFINE_float('xenapi_task_poll_interval', + 0.5, + 'The interval used for polling of remote tasks ' + '(Async.VM.start, etc). Used only if ' + 'connection_type=xenapi.') + +XenAPI = None + + +def get_connection(_): + """Note that XenAPI doesn't have a read-only connection mode, so + the read_only parameter is ignored.""" + # This is loaded late so that there's no need to install this + # library when not using XenAPI. + global XenAPI + if XenAPI is None: + XenAPI = __import__('XenAPI') + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password + if not url or password is None: + raise Exception('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi') + return XenAPIConnection(url, username, password) + + +class XenAPIConnection(object): + """ A connection to XenServer or Xen Cloud Platform """ + def __init__(self, url, user, pw): + session = XenAPISession(url, user, pw) + self._vmops = VMOps(session) + self._volumeops = VolumeOps(session) + + def list_instances(self): + """ List VM instances """ + return self._vmops.list_instances() + + def spawn(self, instance): + """ Create VM instance """ + self._vmops.spawn(instance) + + def reboot(self, instance): + """ Reboot VM instance """ + self._vmops.reboot(instance) + + def destroy(self, instance): + """ Destroy VM instance """ + self._vmops.destroy(instance) + + def get_info(self, instance_id): + """ Return data about VM instance """ + return self._vmops.get_info(instance_id) + + def get_console_output(self, instance): + """ Return snapshot of console """ + return self._vmops.get_console_output(instance) + + def attach_volume(self, instance_name, device_path, mountpoint): + """ Attach volume storage to VM instance """ + return self._volumeops.attach_volume(instance_name, + device_path, + mountpoint) + + def detach_volume(self, instance_name, mountpoint): + """ Detach volume storage to VM instance """ + return self._volumeops.detach_volume(instance_name, mountpoint) + + +class XenAPISession(object): + """ The session to invoke XenAPI SDK calls """ + def __init__(self, url, user, pw): + self._session = XenAPI.Session(url) + self._session.login_with_password(user, pw) + + def get_xenapi(self): + """ Return the xenapi object """ + return self._session.xenapi + + def get_xenapi_host(self): + """ Return the xenapi host """ + return self._session.xenapi.session.get_this_host(self._session.handle) + + @utils.deferredToThread + def call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + f = self._session.xenapi + for m in method.split('.'): + f = f.__getattr__(m) + return f(*args) + + @utils.deferredToThread + def async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + return _unwrap_plugin_exceptions( + self._session.xenapi.Async.host.call_plugin, + self.get_xenapi_host(), plugin, fn, args) + + def wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + d = defer.Deferred() + reactor.callLater(0, self._poll_task, task, d) + return d + + @utils.deferredToThread + def _poll_task(self, task, deferred): + """Poll the given XenAPI task, and fire the given Deferred if we + get a result.""" + try: + #logging.debug('Polling task %s...', task) + status = self._session.xenapi.task.get_status(task) + if status == 'pending': + reactor.callLater(FLAGS.xenapi_task_poll_interval, + self._poll_task, task, deferred) + elif status == 'success': + result = self._session.xenapi.task.get_result(task) + logging.info('Task %s status: success. %s', task, result) + deferred.callback(_parse_xmlrpc_value(result)) + else: + error_info = self._session.xenapi.task.get_error_info(task) + logging.warn('Task %s status: %s. %s', task, status, + error_info) + deferred.errback(XenAPI.Failure(error_info)) + #logging.debug('Polling task %s done.', task) + except XenAPI.Failure, exc: + logging.warn(exc) + deferred.errback(exc) + + +def _unwrap_plugin_exceptions(func, *args, **kwargs): + """ Parse exception details """ + try: + return func(*args, **kwargs) + except XenAPI.Failure, exc: + logging.debug("Got exception: %s", exc) + if (len(exc.details) == 4 and + exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and + exc.details[2] == 'Failure'): + params = None + try: + params = eval(exc.details[3]) + except: + raise exc + raise XenAPI.Failure(params) + else: + raise + except xmlrpclib.ProtocolError, exc: + logging.debug("Got exception: %s", exc) + raise + + +def _parse_xmlrpc_value(val): + """Parse the given value as if it were an XML-RPC value. This is + sometimes used as the format for the task.result field.""" + if not val: + return val + x = xmlrpclib.loads( + '<?xml version="1.0"?><methodResponse><params><param>' + + val + + '</param></params></methodResponse>') + return x[0][0] |
