summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorArmando Migliaccio <armando.migliaccio@citrix.com>2010-12-06 16:56:27 +0000
committerArmando Migliaccio <armando.migliaccio@citrix.com>2010-12-06 16:56:27 +0000
commit7f6770f0802cdf0e73b789494ebdc8a57bf9cfad (patch)
tree07d7439e9b8545a88bbb86bdeea9aa3c616f0c8e /nova
parente4cfd7f3fe7d3c50d65c61abf21bf998fde85147 (diff)
parentb4ac00dcbba9bd827177888f2790fb48e1432262 (diff)
merge with lp:~armando-migliaccio/nova/xenapi-refactoring
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/cloud.py179
-rw-r--r--nova/api/openstack/__init__.py2
-rw-r--r--nova/api/openstack/servers.py130
-rw-r--r--nova/compute/api.py212
-rw-r--r--nova/compute/instance_types.py20
-rw-r--r--nova/compute/manager.py57
-rw-r--r--nova/db/base.py36
-rw-r--r--nova/manager.py10
-rw-r--r--nova/quota.py5
-rw-r--r--nova/tests/api/openstack/fakes.py2
-rw-r--r--nova/tests/api/openstack/test_servers.py10
-rw-r--r--nova/tests/compute_unittest.py19
-rw-r--r--nova/tests/quota_unittest.py16
-rw-r--r--nova/virt/xenapi/__init__.py11
-rw-r--r--nova/virt/xenapi/network_utils.py13
-rw-r--r--nova/virt/xenapi/novadeps.py59
-rw-r--r--nova/virt/xenapi/vm_utils.py31
-rw-r--r--nova/virt/xenapi/vmops.py19
-rw-r--r--nova/virt/xenapi/volumeops.py6
-rw-r--r--nova/virt/xenapi_conn.py22
20 files changed, 493 insertions, 366 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 884372ce7..e50906ae1 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -39,7 +39,8 @@ from nova import flags
from nova import quota
from nova import rpc
from nova import utils
-from nova.compute.instance_types import INSTANCE_TYPES
+from nova.compute import api as compute_api
+from nova.compute import instance_types
from nova.api import cloud
from nova.image.s3 import S3ImageService
@@ -50,11 +51,6 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException
-class QuotaError(exception.ApiError):
- """Quota Exceeeded"""
- pass
-
-
def _gen_key(context, user_id, key_name):
"""Generate a key
@@ -99,7 +95,7 @@ class CloudController(object):
"""
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
- self.compute_manager = utils.import_object(FLAGS.compute_manager)
+ self.compute_api = compute_api.ComputeAPI()
self.image_service = S3ImageService()
self.setup()
@@ -127,7 +123,7 @@ class CloudController(object):
for instance in db.instance_get_all_by_project(context, project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'],
- INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ instance['vcpus'])
key = str(instance['key_name'])
if key in result:
result[key].append(line)
@@ -260,7 +256,7 @@ class CloudController(object):
return True
def describe_security_groups(self, context, group_name=None, **kwargs):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
if context.user.is_admin():
groups = db.security_group_get_all(context)
else:
@@ -358,7 +354,7 @@ class CloudController(object):
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -383,7 +379,7 @@ class CloudController(object):
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -419,7 +415,7 @@ class CloudController(object):
return source_project_id
def create_security_group(self, context, group_name, group_description):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError('group %s already exists' % group_name)
@@ -505,9 +501,8 @@ class CloudController(object):
if quota.allowed_volumes(context, 1, size) < 1:
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
context.project_id, size)
- raise QuotaError("Volume quota exceeded. You cannot "
- "create a volume of size %s" %
- size)
+ raise quota.QuotaError("Volume quota exceeded. You cannot "
+ "create a volume of size %s" % size)
vol = {}
vol['size'] = size
vol['user_id'] = context.user.id
@@ -699,8 +694,8 @@ class CloudController(object):
if quota.allowed_floating_ips(context, 1) < 1:
logging.warn("Quota exceeeded for %s, tried to allocate address",
context.project_id)
- raise QuotaError("Address quota exceeded. You cannot "
- "allocate any more addresses")
+ raise quota.QuotaError("Address quota exceeded. You cannot "
+ "allocate any more addresses")
network_topic = self._get_network_topic(context)
public_ip = rpc.call(context,
network_topic,
@@ -752,137 +747,25 @@ class CloudController(object):
"args": {"network_id": network_ref['id']}})
return db.queue_get_for(context, FLAGS.network_topic, host)
- def _ensure_default_security_group(self, context):
- try:
- db.security_group_get_by_name(context,
- context.project_id,
- 'default')
- except exception.NotFound:
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': context.user.id,
- 'project_id': context.project_id}
- group = db.security_group_create(context, values)
-
def run_instances(self, context, **kwargs):
- instance_type = kwargs.get('instance_type', 'm1.small')
- if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError("Unknown instance type: %s",
- instance_type)
- # check quota
- max_instances = int(kwargs.get('max_count', 1))
- min_instances = int(kwargs.get('min_count', max_instances))
- num_instances = quota.allowed_instances(context,
- max_instances,
- instance_type)
- if num_instances < min_instances:
- logging.warn("Quota exceeeded for %s, tried to run %s instances",
- context.project_id, min_instances)
- raise QuotaError("Instance quota exceeded. You can only "
- "run %s more instances of this type." %
- num_instances, "InstanceLimitExceeded")
- # make sure user can access the image
- # vpn image is private so it doesn't show up on lists
- vpn = kwargs['image_id'] == FLAGS.vpn_image_id
-
- if not vpn:
- image = self.image_service.show(context, kwargs['image_id'])
-
- # FIXME(ja): if image is vpn, this breaks
- # get defaults from imagestore
- image_id = image['imageId']
- kernel_id = image.get('kernelId', FLAGS.default_kernel)
- ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
-
- # API parameters overrides of defaults
- kernel_id = kwargs.get('kernel_id', kernel_id)
- ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
-
- # make sure we have access to kernel and ramdisk
- self.image_service.show(context, kernel_id)
- self.image_service.show(context, ramdisk_id)
-
- logging.debug("Going to run %s instances...", num_instances)
- launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- key_data = None
- if 'key_name' in kwargs:
- key_pair_ref = db.key_pair_get(context,
- context.user.id,
- kwargs['key_name'])
- key_data = key_pair_ref['public_key']
-
- security_group_arg = kwargs.get('security_group', ["default"])
- if not type(security_group_arg) is list:
- security_group_arg = [security_group_arg]
-
- security_groups = []
- self._ensure_default_security_group(context)
- for security_group_name in security_group_arg:
- group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
- security_groups.append(group['id'])
-
- reservation_id = utils.generate_uid('r')
- base_options = {}
- base_options['state_description'] = 'scheduling'
- base_options['image_id'] = image_id
- base_options['kernel_id'] = kernel_id
- base_options['ramdisk_id'] = ramdisk_id
- base_options['reservation_id'] = reservation_id
- base_options['key_data'] = key_data
- base_options['key_name'] = kwargs.get('key_name', None)
- base_options['user_id'] = context.user.id
- base_options['project_id'] = context.project_id
- base_options['user_data'] = kwargs.get('user_data', '')
-
- base_options['display_name'] = kwargs.get('display_name')
- base_options['display_description'] = kwargs.get('display_description')
-
- type_data = INSTANCE_TYPES[instance_type]
- base_options['instance_type'] = instance_type
- base_options['memory_mb'] = type_data['memory_mb']
- base_options['vcpus'] = type_data['vcpus']
- base_options['local_gb'] = type_data['local_gb']
- elevated = context.elevated()
-
- for num in range(num_instances):
-
- instance_ref = self.compute_manager.create_instance(context,
- security_groups,
- mac_address=utils.generate_mac(),
- launch_index=num,
- **base_options)
- inst_id = instance_ref['id']
-
- internal_id = instance_ref['internal_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
-
- self.compute_manager.update_instance(context,
- inst_id,
- hostname=ec2_id)
-
- # TODO(vish): This probably should be done in the scheduler
- # or in compute as a call. The network should be
- # allocated after the host is assigned and setup
- # can happen at the same time.
- address = self.network_manager.allocate_fixed_ip(context,
- inst_id,
- vpn)
- network_topic = self._get_network_topic(context)
- rpc.cast(elevated,
- network_topic,
- {"method": "setup_fixed_ip",
- "args": {"address": address}})
-
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "run_instance",
- "args": {"topic": FLAGS.compute_topic,
- "instance_id": inst_id}})
- logging.debug("Casting to scheduler for %s/%s's instance %s" %
- (context.project.name, context.user.name, inst_id))
- return self._format_run_instances(context, reservation_id)
+ max_count = int(kwargs.get('max_count', 1))
+ instances = self.compute_api.create_instances(context,
+ instance_types.get_by_type(kwargs.get('instance_type', None)),
+ self.image_service,
+ kwargs['image_id'],
+ self._get_network_topic(context),
+ min_count=int(kwargs.get('min_count', max_count)),
+ max_count=max_count,
+ kernel_id=kwargs.get('kernel_id'),
+ ramdisk_id=kwargs.get('ramdisk_id'),
+ name=kwargs.get('display_name'),
+ description=kwargs.get('display_description'),
+ user_data=kwargs.get('user_data', ''),
+ key_name=kwargs.get('key_name'),
+ security_group=kwargs.get('security_group'),
+ generate_hostname=internal_id_to_ec2_id)
+ return self._format_run_instances(context,
+ instances[0]['reservation_id'])
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
@@ -907,7 +790,7 @@ class CloudController(object):
id_str)
continue
now = datetime.datetime.utcnow()
- self.compute_manager.update_instance(context,
+ self.compute_api.update_instance(context,
instance_ref['id'],
state_description='terminating',
state=0,
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 1dd3ba770..4ca108c4e 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -25,6 +25,7 @@ import time
import logging
import routes
+import traceback
import webob.dec
import webob.exc
import webob
@@ -61,6 +62,7 @@ class API(wsgi.Middleware):
return req.get_response(self.application)
except Exception as ex:
logging.warn("Caught error: %s" % str(ex))
+ logging.debug(traceback.format_exc())
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 44e69b82c..e7f765c02 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
import webob
from webob import exc
@@ -27,6 +25,7 @@ from nova import wsgi
from nova import context
from nova.api import cloud
from nova.api.openstack import faults
+from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
import nova.api.openstack
@@ -35,16 +34,6 @@ import nova.image.service
FLAGS = flags.FLAGS
-def _filter_params(inst_dict):
- """ Extracts all updatable parameters for a server update request """
- keys = dict(name='name', admin_pass='adminPass')
- new_attrs = {}
- for k, v in keys.items():
- if v in inst_dict:
- new_attrs[k] = inst_dict[v]
- return new_attrs
-
-
def _entity_list(entities):
""" Coerces a list of servers into proper dictionary format """
return dict(servers=entities)
@@ -63,7 +52,7 @@ def _entity_detail(inst):
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
- flavorId='instance_type', name='display_name', id='id')
+ flavorId='instance_type', name='display_name', id='internal_id')
for k, v in mapped_keys.iteritems():
inst_dict[k] = inst[v]
@@ -78,7 +67,7 @@ def _entity_detail(inst):
def _entity_inst(inst):
""" Filters all model attributes save for id and name """
- return dict(server=dict(id=inst['id'], name=inst['display_name']))
+ return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
class Controller(wsgi.Controller):
@@ -88,14 +77,14 @@ class Controller(wsgi.Controller):
'application/xml': {
"attributes": {
"server": ["id", "imageId", "name", "flavorId", "hostId",
- "status", "progress", "progress"]}}}
+ "status", "progress"]}}}
def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db_driver = utils.import_object(db_driver)
self.network_manager = utils.import_object(FLAGS.network_manager)
- self.compute_manager = utils.import_object(FLAGS.compute_manager)
+ self.compute_api = compute_api.ComputeAPI()
super(Controller, self).__init__()
def index(self, req):
@@ -140,22 +129,23 @@ class Controller(wsgi.Controller):
def create(self, req):
""" Creates a new server for a given user """
-
env = self._deserialize(req.body, req)
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- #try:
- inst = self._build_server_instance(req, env)
- #except Exception, e:
- # return faults.Fault(exc.HTTPUnprocessableEntity())
-
user_id = req.environ['nova.context']['user']['id']
- rpc.cast(context.RequestContext(user_id, user_id),
- FLAGS.compute_topic,
- {"method": "run_instance",
- "args": {"instance_id": inst['id']}})
- return _entity_inst(inst)
+ ctxt = context.RequestContext(user_id, user_id)
+ key_pair = self.db_driver.key_pair_get_all_by_user(None, user_id)[0]
+ instances = self.compute_api.create_instances(ctxt,
+ instance_types.get_by_flavor_id(env['server']['flavorId']),
+ utils.import_object(FLAGS.image_service),
+ env['server']['imageId'],
+ self._get_network_topic(ctxt),
+ name=env['server']['name'],
+ description=env['server']['name'],
+ key_name=key_pair['name'],
+ key_data=key_pair['public_key'])
+ return _entity_inst(instances[0])
def update(self, req, id):
""" Updates the server name or password """
@@ -171,10 +161,14 @@ class Controller(wsgi.Controller):
if not instance or instance.user_id != user_id:
return faults.Fault(exc.HTTPNotFound())
- self.db_driver.instance_update(ctxt,
- int(id),
- _filter_params(inst_dict['server']))
- return faults.Fault(exc.HTTPNoContent())
+ update_dict = {}
+ if 'adminPass' in inst_dict['server']:
+ update_dict['admin_pass'] = inst_dict['server']['adminPass']
+ if 'name' in inst_dict['server']:
+ update_dict['display_name'] = inst_dict['server']['name']
+
+ self.compute_api.update_instance(ctxt, instance['id'], update_dict)
+ return exc.HTTPNoContent()
def action(self, req, id):
""" multi-purpose method used to reboot, rebuild, and
@@ -189,80 +183,10 @@ class Controller(wsgi.Controller):
inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id))
if not inst_ref or (inst_ref and not inst_ref.user_id == user_id):
return faults.Fault(exc.HTTPUnprocessableEntity())
+ #TODO(gundlach): pass reboot_type, support soft reboot in
+ #virt driver
cloud.reboot(id)
- def _build_server_instance(self, req, env):
- """Build instance data structure and save it to the data store."""
- ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- inst = {}
-
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
-
- flavor_id = env['server']['flavorId']
-
- instance_type, flavor = [(k, v) for k, v in
- instance_types.INSTANCE_TYPES.iteritems()
- if v['flavorid'] == flavor_id][0]
-
- image_id = env['server']['imageId']
- img_service = utils.import_object(FLAGS.image_service)
-
- image = img_service.show(image_id)
-
- if not image:
- raise Exception("Image not found")
-
- inst['image_id'] = image_id
- inst['user_id'] = user_id
- inst['launch_time'] = ltime
- inst['mac_address'] = utils.generate_mac()
- inst['project_id'] = user_id
-
- inst['state_description'] = 'scheduling'
- inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel)
- inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk)
- inst['reservation_id'] = utils.generate_uid('r')
-
- inst['display_name'] = env['server']['name']
- inst['display_description'] = env['server']['name']
-
- #TODO(dietz) this may be ill advised
- key_pair_ref = self.db_driver.key_pair_get_all_by_user(
- None, user_id)[0]
-
- inst['key_data'] = key_pair_ref['public_key']
- inst['key_name'] = key_pair_ref['name']
-
- #TODO(dietz) stolen from ec2 api, see TODO there
- inst['security_group'] = 'default'
-
- # Flavor related attributes
- inst['instance_type'] = instance_type
- inst['memory_mb'] = flavor['memory_mb']
- inst['vcpus'] = flavor['vcpus']
- inst['local_gb'] = flavor['local_gb']
- inst['mac_address'] = utils.generate_mac()
- inst['launch_index'] = 0
-
- ref = self.compute_manager.create_instance(ctxt, **inst)
- inst['id'] = ref['internal_id']
-
- inst['hostname'] = str(ref['internal_id'])
- self.compute_manager.update_instance(ctxt, inst['id'], **inst)
-
- address = self.network_manager.allocate_fixed_ip(ctxt,
- inst['id'])
-
- # TODO(vish): This probably should be done in the scheduler
- # network is setup when host is assigned
- network_topic = self._get_network_topic(ctxt)
- rpc.call(ctxt,
- network_topic,
- {"method": "setup_fixed_ip",
- "args": {"address": address}})
- return inst
-
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
network_ref = self.network_manager.get_network(context)
diff --git a/nova/compute/api.py b/nova/compute/api.py
new file mode 100644
index 000000000..929342a1e
--- /dev/null
+++ b/nova/compute/api.py
@@ -0,0 +1,212 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all API requests relating to instances (guest vms).
+"""
+
+import logging
+import time
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import quota
+from nova import rpc
+from nova import utils
+from nova.compute import instance_types
+from nova.db import base
+
+FLAGS = flags.FLAGS
+
+
+def generate_default_hostname(internal_id):
+ """Default function to generate a hostname given an instance reference."""
+ return str(internal_id)
+
+
+class ComputeAPI(base.Base):
+ """API for interacting with the compute manager."""
+
+ def __init__(self, **kwargs):
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ super(ComputeAPI, self).__init__(**kwargs)
+
+ # TODO(eday): network_topic arg should go away once we push network
+ # allocation into the scheduler or compute worker.
+ def create_instances(self, context, instance_type, image_service, image_id,
+ network_topic, min_count=1, max_count=1,
+ kernel_id=None, ramdisk_id=None, name='',
+ description='', user_data='', key_name=None,
+ key_data=None, security_group='default',
+ generate_hostname=generate_default_hostname):
+ """Create the number of instances requested if quote and
+ other arguments check out ok."""
+
+ num_instances = quota.allowed_instances(context, max_count,
+ instance_type)
+ if num_instances < min_count:
+ logging.warn("Quota exceeeded for %s, tried to run %s instances",
+ context.project_id, min_count)
+ raise quota.QuotaError("Instance quota exceeded. You can only "
+ "run %s more instances of this type." %
+ num_instances, "InstanceLimitExceeded")
+
+ is_vpn = image_id == FLAGS.vpn_image_id
+ if not is_vpn:
+ image = image_service.show(context, image_id)
+ if kernel_id is None:
+ kernel_id = image.get('kernelId', FLAGS.default_kernel)
+ if ramdisk_id is None:
+ ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
+
+ # Make sure we have access to kernel and ramdisk
+ image_service.show(context, kernel_id)
+ image_service.show(context, ramdisk_id)
+
+ if security_group is None:
+ security_group = ['default']
+ if not type(security_group) is list:
+ security_group = [security_group]
+
+ security_groups = []
+ self.ensure_default_security_group(context)
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
+ if key_data is None and key_name:
+ key_pair = db.key_pair_get(context, context.user_id, key_name)
+ key_data = key_pair['public_key']
+
+ type_data = instance_types.INSTANCE_TYPES[instance_type]
+ base_options = {
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': image_id,
+ 'kernel_id': kernel_id,
+ 'ramdisk_id': ramdisk_id,
+ 'state_description': 'scheduling',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': instance_type,
+ 'memory_mb': type_data['memory_mb'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ 'display_name': name,
+ 'display_description': description,
+ 'key_name': key_name,
+ 'key_data': key_data}
+
+ elevated = context.elevated()
+ instances = []
+ logging.debug("Going to run %s instances...", num_instances)
+ for num in range(num_instances):
+ instance = dict(mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ instance_ref = self.create_instance(context, security_groups,
+ **instance)
+ instance_id = instance_ref['id']
+ internal_id = instance_ref['internal_id']
+ hostname = generate_hostname(internal_id)
+ self.update_instance(context, instance_id, hostname=hostname)
+ instances.append(dict(id=instance_id, internal_id=internal_id,
+ hostname=hostname, **instance))
+
+ # TODO(vish): This probably should be done in the scheduler
+ # or in compute as a call. The network should be
+ # allocated after the host is assigned and setup
+ # can happen at the same time.
+ address = self.network_manager.allocate_fixed_ip(context,
+ instance_id,
+ is_vpn)
+ rpc.cast(elevated,
+ network_topic,
+ {"method": "setup_fixed_ip",
+ "args": {"address": address}})
+
+ logging.debug("Casting to scheduler for %s/%s's instance %s" %
+ (context.project_id, context.user_id, instance_id))
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id}})
+
+ return instances
+
+ def ensure_default_security_group(self, context):
+ try:
+ db.security_group_get_by_name(context, context.project_id,
+ 'default')
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ group = db.security_group_create(context, values)
+
+ def create_instance(self, context, security_groups=None, **kwargs):
+ """Creates the instance in the datastore and returns the
+ new instance as a mapping
+
+ :param context: The security context
+ :param security_groups: list of security group ids to
+ attach to the instance
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ created
+
+ :retval Returns a mapping of the instance information
+ that has just been created
+
+ """
+ instance_ref = self.db.instance_create(context, kwargs)
+ inst_id = instance_ref['id']
+ # Set sane defaults if not specified
+ if kwargs.get('display_name') is None:
+ display_name = "Server %s" % instance_ref['internal_id']
+ instance_ref['display_name'] = display_name
+ self.db.instance_update(context, inst_id,
+ {'display_name': display_name})
+
+ elevated = context.elevated()
+ if not security_groups:
+ security_groups = []
+ for security_group_id in security_groups:
+ self.db.instance_add_security_group(elevated,
+ inst_id,
+ security_group_id)
+ return instance_ref
+
+ def update_instance(self, context, instance_id, **kwargs):
+ """Updates the instance in the datastore.
+
+ :param context: The security context
+ :param instance_id: ID of the instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :retval None
+
+ """
+ self.db.instance_update(context, instance_id, kwargs)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 67ee8f8a8..a2679e0fc 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,9 +21,29 @@
The built-in instance properties.
"""
+from nova import flags
+
+FLAGS = flags.FLAGS
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+
+def get_by_type(instance_type):
+ """Build instance data structure and save it to the data store."""
+ if instance_type is None:
+ return FLAGS.default_instance_type
+ if instance_type not in INSTANCE_TYPES:
+ raise exception.ApiError("Unknown instance type: %s",
+ instance_type)
+ return instance_type
+
+
+def get_by_flavor_id(flavor_id):
+ for instance_type, details in INSTANCE_TYPES.iteritems():
+ if details['flavorid'] == flavor_id:
+ return instance_type
+ return FLAGS.default_instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index e826bdaa2..dd8d41129 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
-responding to calls to check it state, attaching persistent as well as
-termination.
+responding to calls to check its state, attaching persistent storage, and
+terminating it.
**Related Flags**
@@ -45,15 +45,15 @@ from nova import manager
from nova import utils
from nova.compute import power_state
-
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
'where instances are stored on disk')
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
- 'Driver to use for volume creation')
+ 'Driver to use for controlling virtualization')
class ComputeManager(manager.Manager):
+
"""Manages the running instances from creation to destruction."""
def __init__(self, compute_driver=None, *args, **kwargs):
@@ -84,53 +84,6 @@ class ComputeManager(manager.Manager):
"""This call passes stright through to the virtualization driver."""
yield self.driver.refresh_security_group(security_group_id)
- def create_instance(self, context, security_groups=None, **kwargs):
- """Creates the instance in the datastore and returns the
- new instance as a mapping
-
- :param context: The security context
- :param security_groups: list of security group ids to
- attach to the instance
- :param kwargs: All additional keyword args are treated
- as data fields of the instance to be
- created
-
- :retval Returns a mapping of the instance information
- that has just been created
-
- """
- instance_ref = self.db.instance_create(context, kwargs)
- inst_id = instance_ref['id']
- # Set sane defaults if not specified
- if 'display_name' not in kwargs:
- display_name = "Server %s" % instance_ref['internal_id']
- instance_ref['display_name'] = display_name
- self.db.instance_update(context, inst_id,
- {'display_name': display_name})
-
- elevated = context.elevated()
- if not security_groups:
- security_groups = []
- for security_group_id in security_groups:
- self.db.instance_add_security_group(elevated,
- inst_id,
- security_group_id)
- return instance_ref
-
- def update_instance(self, context, instance_id, **kwargs):
- """Updates the instance in the datastore.
-
- :param context: The security context
- :param instance_id: ID of the instance to update
- :param kwargs: All additional keyword args are treated
- as data fields of the instance to be
- updated
-
- :retval None
-
- """
- self.db.instance_update(context, instance_id, kwargs)
-
@defer.inlineCallbacks
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
@@ -140,7 +93,6 @@ class ComputeManager(manager.Manager):
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
- project_id = instance_ref['project_id']
self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
@@ -182,7 +134,6 @@ class ComputeManager(manager.Manager):
self.db.instance_destroy(context, instance_id)
raise exception.Error('trying to destroy already destroyed'
' instance: %s' % instance_id)
-
yield self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
diff --git a/nova/db/base.py b/nova/db/base.py
new file mode 100644
index 000000000..1d1e80866
--- /dev/null
+++ b/nova/db/base.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Base class for classes that need modular database access.
+"""
+
+from nova import utils
+from nova import flags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('db_driver', 'nova.db.api',
+ 'driver to use for database access')
+
+
+class Base(object):
+ """DB driver is injected in the init method"""
+ def __init__(self, db_driver=None):
+ if not db_driver:
+ db_driver = FLAGS.db_driver
+ self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
diff --git a/nova/manager.py b/nova/manager.py
index a6efb8732..5b61f7a4c 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -53,23 +53,19 @@ This module provides Manager, a base class for managers.
from nova import utils
from nova import flags
+from nova.db import base
from twisted.internet import defer
FLAGS = flags.FLAGS
-flags.DEFINE_string('db_driver', 'nova.db.api',
- 'driver to use for volume creation')
-class Manager(object):
- """DB driver is injected in the init method"""
+class Manager(base.Base):
def __init__(self, host=None, db_driver=None):
if not host:
host = FLAGS.host
self.host = host
- if not db_driver:
- db_driver = FLAGS.db_driver
- self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
+ super(Manager, self).__init__(db_driver)
@defer.inlineCallbacks
def periodic_tasks(self, context=None):
diff --git a/nova/quota.py b/nova/quota.py
index 01dd0ecd4..f6ca9f77c 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -94,3 +94,8 @@ def allowed_floating_ips(context, num_floating_ips):
quota = get_quota(context, project_id)
allowed_floating_ips = quota['floating_ips'] - used_floating_ips
return min(num_floating_ips, allowed_floating_ips)
+
+
+class QuotaError(exception.ApiError):
+ """Quota Exceeeded"""
+ pass
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 6e91ca7bb..7c0343942 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -72,7 +72,7 @@ def stub_out_key_pair_funcs(stubs):
def stub_out_image_service(stubs):
- def fake_image_show(meh, id):
+ def fake_image_show(meh, context, id):
return dict(kernelId=1, ramdiskId=1)
stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 530d06760..44ac8f342 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -43,9 +43,13 @@ def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
def stub_instance(id, user_id=1):
- return Instance(id=id, state=0, image_id=10, display_name='server%s' % id,
- user_id=user_id)
+ return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id, internal_id=id)
class ServersTest(unittest.TestCase):
@@ -63,6 +67,8 @@ class ServersTest(unittest.TestCase):
return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
+ self.stubs.Set(nova.db.api, 'instance_add_security_group',
+ return_security_group)
def tearDown(self):
self.stubs.UnsetAll()
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index 71a1a4457..a55449739 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -31,6 +31,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import api as compute_api
FLAGS = flags.FLAGS
@@ -43,6 +44,7 @@ class ComputeTestCase(test.TrialTestCase):
self.flags(connection_type='fake',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
+ self.compute_api = compute_api.ComputeAPI()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
@@ -66,6 +68,17 @@ class ComputeTestCase(test.TrialTestCase):
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
+ def test_create_instance_defaults_display_name(self):
+ """Verify that an instance cannot be created without a display_name."""
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ ref = self.compute_api.create_instance(self.context, None,
+ **instance)
+ try:
+ self.assertNotEqual(ref.display_name, None)
+ finally:
+ db.instance_destroy(self.context, ref['id'])
+
def test_create_instance_associates_security_groups(self):
"""Make sure create_instance associates security groups"""
inst = {}
@@ -76,9 +89,9 @@ class ComputeTestCase(test.TrialTestCase):
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
- ref = self.compute.create_instance(self.context,
- security_groups=[group['id']],
- **inst)
+ ref = self.compute_api.create_instance(self.context,
+ security_groups=[group['id']],
+ **inst)
# reload to get groups
instance_ref = db.instance_get(self.context, ref['id'])
try:
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
index b7c1d2acc..1966b51f7 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/quota_unittest.py
@@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
- self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
- instance_type='m1.small')
+ instance_type='m1.small',
+ image_id='fake')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
- self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
- instance_type='m1.small')
+ instance_type='m1.small',
+ image_id='fake')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
- self.assertRaises(cloud.QuotaError, self.cloud.create_volume,
+ self.assertRaises(quota.QuotaError, self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
@@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
- self.assertRaises(cloud.QuotaError,
+ self.assertRaises(quota.QuotaError,
self.cloud.create_volume,
self.context,
size=10)
@@ -146,6 +148,6 @@ class QuotaTestCase(test.TrialTestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
- self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
+ self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py
index 3d598c463..ece430407 100644
--- a/nova/virt/xenapi/__init__.py
+++ b/nova/virt/xenapi/__init__.py
@@ -13,3 +13,14 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+"""
+This is loaded late so that there's no need to install this library
+when not using XenAPI
+"""
+
+XenAPI = None
+global XenAPI
+
+if XenAPI is None:
+ XenAPI = __import__('XenAPI')
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index b58b9159c..8cb4cce3a 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -15,20 +15,25 @@
# under the License.
"""
-Helper methods for operations related to the management of network records and
-their attributes like bridges, PIFs, QoS, as well as their lookup functions.
+Helper methods for operations related to the management of network
+records and their attributes like bridges, PIFs, QoS, as well as
+their lookup functions.
"""
from twisted.internet import defer
class NetworkHelper():
- def __init__(self, session):
+ """
+ The class that wraps the helper methods together.
+ """
+ def __init__(self):
return
@classmethod
@defer.inlineCallbacks
- def find_network_with_bridge(self, session, bridge):
+ def find_network_with_bridge(cls, session, bridge):
+ """ Return the network on which the bridge is attached, if found """
expr = 'field "bridge" = "%s"' % bridge
networks = yield session.call_xenapi('network.get_all_records_where',
expr)
diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py
index aa3535162..65576019e 100644
--- a/nova/virt/xenapi/novadeps.py
+++ b/nova/virt/xenapi/novadeps.py
@@ -15,13 +15,17 @@
# under the License.
+"""
+It captures all the inner details of Nova classes and avoid their exposure
+to the implementation of the XenAPI module. One benefit of this, is to avoid
+sprawl of code changes
+"""
+
import re
import string
from nova import db
from nova import flags
-from nova import process
-from nova import utils
from nova import context
from nova.compute import power_state
@@ -60,23 +64,28 @@ flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix')
class Configuration(object):
+ """ Wraps Configuration details into common class """
def __init__(self):
self._flags = flags.FLAGS
@property
def xenapi_connection_url(self):
+ """ Return the connection url """
return self._flags.xenapi_connection_url
@property
def xenapi_connection_username(self):
+ """ Return the username used for the connection """
return self._flags.xenapi_connection_username
@property
def xenapi_connection_password(self):
+ """ Return the password used for the connection """
return self._flags.xenapi_connection_password
@property
def xenapi_task_poll_interval(self):
+ """ Return the poll interval for the connection """
return self._flags.xenapi_task_poll_interval
@property
@@ -96,72 +105,90 @@ config = Configuration()
class Instance(object):
+ """ Wraps up instance specifics """
@classmethod
- def get_name(self, instance):
+ def get_name(cls, instance):
+ """ The name of the instance """
return instance.name
@classmethod
- def get_type(self, instance):
+ def get_type(cls, instance):
+ """ The type of the instance """
return instance_types.INSTANCE_TYPES[instance.instance_type]
@classmethod
- def get_project(self, instance):
+ def get_project(cls, instance):
+ """ The project the instance belongs """
return AuthManager().get_project(instance.project_id)
@classmethod
- def get_project_id(self, instance):
+ def get_project_id(cls, instance):
+ """ The id of the project the instance belongs """
return instance.project_id
@classmethod
- def get_image_id(self, instance):
+ def get_image_id(cls, instance):
+ """ The instance's image id """
return instance.image_id
@classmethod
- def get_kernel_id(self, instance):
+ def get_kernel_id(cls, instance):
+ """ The instance's kernel id """
return instance.kernel_id
@classmethod
- def get_ramdisk_id(self, instance):
+ def get_ramdisk_id(cls, instance):
+ """ The instance's ramdisk id """
return instance.ramdisk_id
@classmethod
- def get_network(self, instance):
+ def get_network(cls, instance):
+ """ The network the instance is connected to """
# TODO: is ge_admin_context the right context to retrieve?
return db.project_get_network(context.get_admin_context(),
instance.project_id)
@classmethod
- def get_mac(self, instance):
+ def get_mac(cls, instance):
+ """ The instance's MAC address """
return instance.mac_address
@classmethod
- def get_user(self, instance):
+ def get_user(cls, instance):
+ """ The owner of the instance """
return AuthManager().get_user(instance.user_id)
class Network(object):
+ """ Wraps up network specifics """
@classmethod
- def get_bridge(self, network):
+ def get_bridge(cls, network):
+ """ the bridge for the network """
return network.bridge
class Image(object):
+ """ Wraps up image specifics """
@classmethod
- def get_url(self, image):
+ def get_url(cls, image):
+ """ the url to get the image from """
return images.image_url(image)
class User(object):
+ """ Wraps up user specifics """
@classmethod
- def get_access(self, user, project):
+ def get_access(cls, user, project):
+ """ access key """
return AuthManager().get_access_key(user, project)
@classmethod
- def get_secret(self, user):
+ def get_secret(cls, user):
+ """ access secret """
return user.secret
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6966e7b7b..e6d20f98b 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -20,6 +20,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import logging
+import XenAPI
from twisted.internet import defer
@@ -31,12 +32,15 @@ from novadeps import User
class VMHelper():
- def __init__(self, session):
+ """
+ The class that wraps the helper methods together.
+ """
+ def __init__(self):
return
@classmethod
@defer.inlineCallbacks
- def create_vm(self, session, instance, kernel, ramdisk):
+ def create_vm(cls, session, instance, kernel, ramdisk):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
@@ -80,7 +84,7 @@ class VMHelper():
@classmethod
@defer.inlineCallbacks
- def create_vbd(self, session, vm_ref, vdi_ref, userdevice, bootable):
+ def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
@@ -143,7 +147,7 @@ class VMHelper():
@classmethod
@defer.inlineCallbacks
- def create_vif(self, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
@@ -165,7 +169,7 @@ class VMHelper():
@classmethod
@defer.inlineCallbacks
- def fetch_image(self, session, image, user, project, use_sr):
+ def fetch_image(cls, session, image, user, project, use_sr):
"""use_sr: True to put the image as a VDI in an SR, False to place
it on dom0's filesystem. The former is for VM disks, the latter for
its kernel and ramdisk (if external kernels are being used).
@@ -173,7 +177,7 @@ class VMHelper():
url = Image.get_url(image)
access = User.get_access(user, project)
- logging.debug("Asking xapi to fetch %s as %s" % (url, access))
+ logging.debug("Asking xapi to fetch %s as %s", url, access)
fn = use_sr and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -187,11 +191,13 @@ class VMHelper():
@classmethod
@utils.deferredToThread
- def lookup(self, session, i):
+ def lookup(cls, session, i):
+ """ Look the instance i up, and returns it if available """
return VMHelper.lookup_blocking(session, i)
@classmethod
- def lookup_blocking(self, session, i):
+ def lookup_blocking(cls, session, i):
+ """ Synchronous lookup """
vms = session.get_xenapi().VM.get_by_name_label(i)
n = len(vms)
if n == 0:
@@ -203,11 +209,13 @@ class VMHelper():
@classmethod
@utils.deferredToThread
- def lookup_vm_vdis(self, session, vm):
+ def lookup_vm_vdis(cls, session, vm):
+ """ Look for the VDIs that are attached to the VM """
return VMHelper.lookup_vm_vdis_blocking(session, vm)
@classmethod
- def lookup_vm_vdis_blocking(self, session, vm):
+ def lookup_vm_vdis_blocking(cls, session, vm):
+ """ Synchronous lookup_vm_vdis """
# Firstly we get the VBDs, then the VDIs.
# TODO: do we leave the read-only devices?
vbds = session.get_xenapi().VM.get_VBDs(vm)
@@ -218,7 +226,8 @@ class VMHelper():
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- except Exception, exc:
+ logging.debug('VDI %s is still available', record['uuid'])
+ except XenAPI.Failure, exc:
logging.warn(exc)
else:
vdis.append(vdi)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d6ea5e7db..3db86f179 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -19,6 +19,7 @@ Management class for VM-related functions (spawn, reboot, etc).
"""
import logging
+import XenAPI
from twisted.internet import defer
@@ -31,15 +32,20 @@ from network_utils import NetworkHelper
class VMOps(object):
+ """
+ Management class for VM-related tasks
+ """
def __init__(self, session):
self._session = session
def list_instances(self):
+ """ List VM instances """
return [self._session.get_xenapi().VM.get_name_label(vm) \
for vm in self._session.get_xenapi().VM.get_all()]
@defer.inlineCallbacks
def spawn(self, instance):
+ """ Create VM instance """
vm = yield VMHelper.lookup(self._session, Instance.get_name(instance))
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
@@ -71,6 +77,7 @@ class VMOps(object):
@defer.inlineCallbacks
def reboot(self, instance):
+ """ Reboot VM instance """
instance_name = Instance.get_name(instance)
vm = yield VMHelper.lookup(self._session, instance_name)
if vm is None:
@@ -80,6 +87,7 @@ class VMOps(object):
@defer.inlineCallbacks
def destroy(self, instance):
+ """ Destroy VM instance """
vm = yield VMHelper.lookup(self._session, Instance.get_name(instance))
if vm is None:
# Don't complain, just return. This lets us clean up instances
@@ -91,7 +99,7 @@ class VMOps(object):
task = yield self._session.call_xenapi('Async.VM.hard_shutdown',
vm)
yield self._session.wait_for_task(task)
- except Exception, exc:
+ except XenAPI.Failure, exc:
logging.warn(exc)
# Disk clean-up
if vdis:
@@ -100,15 +108,16 @@ class VMOps(object):
task = yield self._session.call_xenapi('Async.VDI.destroy',
vdi)
yield self._session.wait_for_task(task)
- except Exception, exc:
+ except XenAPI.Failure, exc:
logging.warn(exc)
try:
task = yield self._session.call_xenapi('Async.VM.destroy', vm)
yield self._session.wait_for_task(task)
- except Exception, exc:
+ except XenAPI.Failure, exc:
logging.warn(exc)
def get_info(self, instance_id):
+ """ Return data about VM instance """
vm = VMHelper.lookup_blocking(self._session, instance_id)
if vm is None:
raise Exception('instance not present %s' % instance_id)
@@ -120,4 +129,6 @@ class VMOps(object):
'cpu_time': 0}
def get_console_output(self, instance):
- return 'FAKE CONSOLE OUTPUT'
+ """ Return snapshot of console """
+ # TODO: implement this to fix pylint!
+ return 'FAKE CONSOLE OUTPUT of instance'
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index ec4343329..6c48f6491 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -18,6 +18,7 @@
Management class for Storage-related functions (attach, detach, etc).
"""
import logging
+import XenAPI
from twisted.internet import defer
@@ -28,11 +29,15 @@ from novadeps import Volume
class VolumeOps(object):
+ """
+ Management class for Volume-related tasks
+ """
def __init__(self, session):
self._session = session
@defer.inlineCallbacks
def attach_volume(self, instance_name, device_path, mountpoint):
+ """ Attach volume storage to VM instance """
# Before we start, check that the VM exists
vm_ref = yield VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
@@ -91,6 +96,7 @@ class VolumeOps(object):
@defer.inlineCallbacks
def detach_volume(self, instance_name, mountpoint):
+ """ Detach volume storage to VM instance """
# Before we start, check that the VM exists
vm_ref = yield VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index d3f66b12c..58a505d89 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -61,19 +61,14 @@ from nova import utils
from xenapi.vmops import VMOps
from xenapi.volumeops import VolumeOps
from xenapi.novadeps import Configuration
+from xenapi import XenAPI
-XenAPI = None
Config = Configuration()
def get_connection(_):
"""Note that XenAPI doesn't have a read-only connection mode, so
the read_only parameter is ignored."""
- # This is loaded late so that there's no need to install this
- # library when not using XenAPI.
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
url = Config.xenapi_connection_url
username = Config.xenapi_connection_username
password = Config.xenapi_connection_password
@@ -86,47 +81,59 @@ def get_connection(_):
class XenAPIConnection(object):
+ """ A connection to XenServer or Xen Cloud Platform """
def __init__(self, url, user, pw):
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
def list_instances(self):
+ """ List VM instances """
return self._vmops.list_instances()
def spawn(self, instance):
+ """ Create VM instance """
self._vmops.spawn(instance)
def reboot(self, instance):
+ """ Reboot VM instance """
self._vmops.reboot(instance)
def destroy(self, instance):
+ """ Destroy VM instance """
self._vmops.destroy(instance)
def get_info(self, instance_id):
+ """ Return data about VM instance """
return self._vmops.get_info(instance_id)
def get_console_output(self, instance):
+ """ Return snapshot of console """
return self._vmops.get_console_output(instance)
def attach_volume(self, instance_name, device_path, mountpoint):
+ """ Attach volume storage to VM instance """
return self._volumeops.attach_volume(instance_name,
device_path,
mountpoint)
def detach_volume(self, instance_name, mountpoint):
+ """ Detach volume storage to VM instance """
return self._volumeops.detach_volume(instance_name, mountpoint)
class XenAPISession(object):
+ """ The session to invoke XenAPI SDK calls """
def __init__(self, url, user, pw):
self._session = XenAPI.Session(url)
self._session.login_with_password(user, pw)
def get_xenapi(self):
+ """ Return the xenapi object """
return self._session.xenapi
def get_xenapi_host(self):
+ """ Return the xenapi host """
return self._session.xenapi.session.get_this_host(self._session.handle)
@utils.deferredToThread
@@ -173,12 +180,13 @@ class XenAPISession(object):
error_info)
deferred.errback(XenAPI.Failure(error_info))
#logging.debug('Polling task %s done.', task)
- except Exception, exc:
+ except XenAPI.Failure, exc:
logging.warn(exc)
deferred.errback(exc)
def _unwrap_plugin_exceptions(func, *args, **kwargs):
+ """ Parse exception details """
try:
return func(*args, **kwargs)
except XenAPI.Failure, exc: