summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorSandy Walsh <sandy.walsh@rackspace.com>2011-06-20 10:22:53 -0700
committerSandy Walsh <sandy.walsh@rackspace.com>2011-06-20 10:22:53 -0700
commitf1b009d0ff5001a6a48ca1cfb71fa9da7139bc62 (patch)
tree916d2a23003a258ebf1f832b699f17ee0d8ac106 /nova
parent1acb699a6fb0ea7a7d84ba4598790d7c9d7abd14 (diff)
parentc2a8d0f1e2e9a25465100128bae4f60b532d16f5 (diff)
tests working again
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/apirequest.py78
-rw-r--r--nova/api/ec2/cloud.py61
-rw-r--r--nova/api/ec2/ec2utils.py94
-rw-r--r--nova/api/openstack/__init__.py73
-rw-r--r--nova/api/openstack/contrib/volumes.py6
-rw-r--r--nova/api/openstack/create_instance_helper.py (renamed from nova/api/openstack/create_instance_controller.py)63
-rw-r--r--nova/api/openstack/extensions.py13
-rw-r--r--nova/api/openstack/limits.py2
-rw-r--r--nova/api/openstack/notes.txt3
-rw-r--r--nova/api/openstack/server_metadata.py49
-rw-r--r--nova/api/openstack/servers.py42
-rw-r--r--nova/api/openstack/wsgi.py33
-rw-r--r--nova/api/openstack/zones.py49
-rw-r--r--nova/compute/api.py111
-rw-r--r--nova/compute/manager.py146
-rw-r--r--nova/compute/utils.py29
-rw-r--r--nova/crypto.py3
-rw-r--r--nova/db/api.py35
-rw-r--r--nova/db/sqlalchemy/api.py101
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py45
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/025_add_zone_weight_offsets.py38
-rw-r--r--nova/db/sqlalchemy/models.py40
-rw-r--r--nova/exception.py4
-rw-r--r--nova/flags.py4
-rw-r--r--nova/image/fake.py8
-rw-r--r--nova/image/local.py167
-rw-r--r--nova/scheduler/api.py5
-rw-r--r--nova/scheduler/simple.py8
-rw-r--r--nova/scheduler/zone_aware_scheduler.py15
-rw-r--r--nova/scheduler/zone_manager.py3
-rw-r--r--nova/tests/api/openstack/fakes.py1
-rw-r--r--nova/tests/api/openstack/test_api.py21
-rw-r--r--nova/tests/api/openstack/test_extensions.py13
-rw-r--r--nova/tests/api/openstack/test_images.py30
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py80
-rw-r--r--nova/tests/api/openstack/test_servers.py14
-rw-r--r--nova/tests/api/openstack/test_wsgi.py20
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/integrated/api/client.py16
-rw-r--r--nova/tests/scheduler/test_host_filter.py8
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py8
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py35
-rw-r--r--nova/tests/test_api.py2
-rw-r--r--nova/tests/test_cloud.py364
-rw-r--r--nova/tests/test_compute.py29
-rw-r--r--nova/tests/test_crypto.py83
-rw-r--r--nova/tests/test_xenapi.py20
-rw-r--r--nova/virt/driver.py2
-rw-r--r--nova/virt/fake.py6
-rw-r--r--nova/virt/hyperv.py2
-rw-r--r--nova/virt/libvirt.xml.template9
-rw-r--r--nova/virt/libvirt/connection.py76
-rw-r--r--nova/virt/vmwareapi/vmware_images.py6
-rw-r--r--nova/virt/vmwareapi_conn.py2
-rw-r--r--nova/virt/xenapi/fake.py1
-rw-r--r--nova/virt/xenapi/vm_utils.py36
-rw-r--r--nova/virt/xenapi/vmops.py21
-rw-r--r--nova/virt/xenapi_conn.py2
-rw-r--r--nova/volume/api.py14
-rw-r--r--nova/volume/driver.py8
61 files changed, 1754 insertions, 592 deletions
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 6672e60bb..7d78c5cfa 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -21,22 +21,15 @@ APIRequest class
"""
import datetime
-import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from nova import log as logging
+from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.api.request")
-_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
-
-
-def _camelcase_to_underscore(str):
- return _c2u.sub(r'_\1', str).lower().strip('_')
-
-
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
@@ -51,59 +44,6 @@ def _database_to_isoformat(datetimeobj):
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
-def _try_convert(value):
- """Return a non-string from a string or unicode, if possible.
-
- ============= =====================================================
- When value is returns
- ============= =====================================================
- zero-length ''
- 'None' None
- 'True' True
- 'False' False
- '0', '-0' 0
- 0xN, -0xN int from hex (postitive) (N is any number)
- 0bN, -0bN int from binary (positive) (N is any number)
- * try conversion to int, float, complex, fallback value
-
- """
- if len(value) == 0:
- return ''
- if value == 'None':
- return None
- if value == 'True':
- return True
- if value == 'False':
- return False
- valueneg = value[1:] if value[0] == '-' else value
- if valueneg == '0':
- return 0
- if valueneg == '':
- return value
- if valueneg[0] == '0':
- if valueneg[1] in 'xX':
- return int(value, 16)
- elif valueneg[1] in 'bB':
- return int(value, 2)
- else:
- try:
- return int(value, 8)
- except ValueError:
- pass
- try:
- return int(value)
- except ValueError:
- pass
- try:
- return float(value)
- except ValueError:
- pass
- try:
- return complex(value)
- except ValueError:
- return value
-
-
class APIRequest(object):
def __init__(self, controller, action, version, args):
self.controller = controller
@@ -114,7 +54,7 @@ class APIRequest(object):
def invoke(self, context):
try:
method = getattr(self.controller,
- _camelcase_to_underscore(self.action))
+ ec2utils.camelcase_to_underscore(self.action))
except AttributeError:
controller = self.controller
action = self.action
@@ -125,19 +65,7 @@ class APIRequest(object):
# and reraise as 400 error.
raise Exception(_error)
- args = {}
- for key, value in self.args.items():
- parts = key.split(".")
- key = _camelcase_to_underscore(parts[0])
- if isinstance(value, str) or isinstance(value, unicode):
- # NOTE(vish): Automatically convert strings back
- # into their respective values
- value = _try_convert(value)
- if len(parts) > 1:
- d = args.get(key, {})
- d[parts[1]] = value
- value = d
- args[key] = value
+ args = ec2utils.dict_from_dotted_str(self.args.items())
for key in args.keys():
# NOTE(vish): Turn numeric dict keys into lists
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index e1c65ae40..97875f1f5 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -909,6 +909,25 @@ class CloudController(object):
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
+ for bdm in kwargs.get('block_device_mapping', []):
+ # NOTE(yamahata)
+ # BlockDevicedMapping.<N>.DeviceName
+ # BlockDevicedMapping.<N>.Ebs.SnapshotId
+ # BlockDevicedMapping.<N>.Ebs.VolumeSize
+ # BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
+ # BlockDevicedMapping.<N>.VirtualName
+ # => remove .Ebs and allow volume id in SnapshotId
+ ebs = bdm.pop('ebs', None)
+ if ebs:
+ ec2_id = ebs.pop('snapshot_id')
+ id = ec2utils.ec2_id_to_id(ec2_id)
+ if ec2_id.startswith('snap-'):
+ bdm['snapshot_id'] = id
+ elif ec2_id.startswith('vol-'):
+ bdm['volume_id'] = id
+ ebs.setdefault('delete_on_termination', True)
+ bdm.update(ebs)
+
image = self._get_image(context, kwargs['image_id'])
if image:
@@ -933,37 +952,54 @@ class CloudController(object):
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
- 'AvailabilityZone'))
+ 'AvailabilityZone'),
+ block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context,
instances[0]['reservation_id'])
+ def _do_instance(self, action, context, ec2_id):
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ action(context, instance_id=instance_id)
+
+ def _do_instances(self, action, context, instance_id):
+ for ec2_id in instance_id:
+ self._do_instance(action, context, ec2_id)
+
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
- for ec2_id in instance_id:
- instance_id = ec2utils.ec2_id_to_id(ec2_id)
- self.compute_api.delete(context, instance_id=instance_id)
+ self._do_instances(self.compute_api.delete, context, instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
- for ec2_id in instance_id:
- instance_id = ec2utils.ec2_id_to_id(ec2_id)
- self.compute_api.reboot(context, instance_id=instance_id)
+ self._do_instances(self.compute_api.reboot, context, instance_id)
+ return True
+
+ def stop_instances(self, context, instance_id, **kwargs):
+ """Stop each instances in instance_id.
+ Here instance_id is a list of instance ids"""
+ LOG.debug(_("Going to stop instances"))
+ self._do_instances(self.compute_api.stop, context, instance_id)
+ return True
+
+ def start_instances(self, context, instance_id, **kwargs):
+ """Start each instances in instance_id.
+ Here instance_id is a list of instance ids"""
+ LOG.debug(_("Going to start instances"))
+ self._do_instances(self.compute_api.start, context, instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2utils.ec2_id_to_id(instance_id)
- self.compute_api.rescue(context, instance_id=instance_id)
+ self._do_instance(self.compute_api.rescue, contect, instnace_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2utils.ec2_id_to_id(instance_id)
- self.compute_api.unrescue(context, instance_id=instance_id)
+ self._do_instance(self.compute_api.unrescue, context, instance_id)
return True
def update_instance(self, context, instance_id, **kwargs):
@@ -974,7 +1010,8 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
instance_id = ec2utils.ec2_id_to_id(instance_id)
- self.compute_api.update(context, instance_id=instance_id, **kwargs)
+ self.compute_api.update(context, instance_id=instance_id,
+ **changes)
return True
@staticmethod
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 163aa4ed2..222e1de1e 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -16,6 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import re
+
from nova import exception
@@ -30,3 +32,95 @@ def ec2_id_to_id(ec2_id):
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % instance_id
+
+
+_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
+
+
+def camelcase_to_underscore(str):
+ return _c2u.sub(r'_\1', str).lower().strip('_')
+
+
+def _try_convert(value):
+ """Return a non-string from a string or unicode, if possible.
+
+ ============= =====================================================
+ When value is returns
+ ============= =====================================================
+ zero-length ''
+ 'None' None
+ 'True' True case insensitive
+ 'False' False case insensitive
+ '0', '-0' 0
+ 0xN, -0xN int from hex (postitive) (N is any number)
+ 0bN, -0bN int from binary (positive) (N is any number)
+ * try conversion to int, float, complex, fallback value
+
+ """
+ if len(value) == 0:
+ return ''
+ if value == 'None':
+ return None
+ lowered_value = value.lower()
+ if lowered_value == 'true':
+ return True
+ if lowered_value == 'false':
+ return False
+ valueneg = value[1:] if value[0] == '-' else value
+ if valueneg == '0':
+ return 0
+ if valueneg == '':
+ return value
+ if valueneg[0] == '0':
+ if valueneg[1] in 'xX':
+ return int(value, 16)
+ elif valueneg[1] in 'bB':
+ return int(value, 2)
+ else:
+ try:
+ return int(value, 8)
+ except ValueError:
+ pass
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ try:
+ return float(value)
+ except ValueError:
+ pass
+ try:
+ return complex(value)
+ except ValueError:
+ return value
+
+
+def dict_from_dotted_str(items):
+ """parse multi dot-separated argument into dict.
+ EBS boot uses multi dot-separeted arguments like
+ BlockDeviceMapping.1.DeviceName=snap-id
+ Convert the above into
+ {'block_device_mapping': {'1': {'device_name': snap-id}}}
+ """
+ args = {}
+ for key, value in items:
+ parts = key.split(".")
+ key = camelcase_to_underscore(parts[0])
+ if isinstance(value, str) or isinstance(value, unicode):
+ # NOTE(vish): Automatically convert strings back
+ # into their respective values
+ value = _try_convert(value)
+
+ if len(parts) > 1:
+ d = args.get(key, {})
+ args[key] = d
+ for k in parts[1:-1]:
+ k = camelcase_to_underscore(k)
+ v = d.get(k, {})
+ d[k] = v
+ d = v
+ d[camelcase_to_underscore(parts[-1])] = value
+ else:
+ args[key] = value
+
+ return args
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index e0ae55105..f24017df0 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -81,7 +81,9 @@ class APIRouter(base_wsgi.Router):
self._setup_routes(mapper)
super(APIRouter, self).__init__(mapper)
- def _setup_routes(self, mapper, version='1.0'):
+ def _setup_routes(self, mapper, version):
+ """Routes common to all versions."""
+
server_members = self.server_members
server_members['action'] = 'POST'
if FLAGS.allow_admin_api:
@@ -98,14 +100,6 @@ class APIRouter(base_wsgi.Router):
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
- mapper.resource("zone", "zones",
- controller=zones.create_resource(version),
- collection={'detail': 'GET',
- 'info': 'GET',
- 'select': 'POST',
- 'boot': 'POST'
- })
-
mapper.resource("user", "users",
controller=users.create_resource(),
collection={'detail': 'GET'})
@@ -114,10 +108,33 @@ class APIRouter(base_wsgi.Router):
controller=accounts.create_resource(),
collection={'detail': 'GET'})
+ mapper.resource("zone", "zones",
+ controller=zones.create_resource(version),
+ collection={'detail': 'GET',
+ 'info': 'GET',
+ 'select': 'POST',
+ 'boot': 'POST'})
+
mapper.resource("console", "consoles",
- controller=consoles.create_resource(),
- parent_resource=dict(member_name='server',
- collection_name='servers'))
+ controller=consoles.create_resource(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
+
+ mapper.resource("server", "servers",
+ controller=servers.create_resource(version),
+ collection={'detail': 'GET'},
+ member=self.server_members)
+
+ mapper.resource("image", "images",
+ controller=images.create_resource(version),
+ collection={'detail': 'GET'})
+
+ mapper.resource("limit", "limits",
+ controller=limits.create_resource(version))
+
+ mapper.resource("flavor", "flavors",
+ controller=flavors.create_resource(version),
+ collection={'detail': 'GET'})
super(APIRouter, self).__init__(mapper)
@@ -126,20 +143,11 @@ class APIRouterV10(APIRouter):
"""Define routes specific to OpenStack API V1.0."""
def _setup_routes(self, mapper):
- super(APIRouterV10, self)._setup_routes(mapper, version='1.0')
- mapper.resource("server", "servers",
- controller=servers.create_resource('1.0'),
- collection={'detail': 'GET'},
- member=self.server_members)
-
+ super(APIRouterV10, self)._setup_routes(mapper, '1.0')
mapper.resource("image", "images",
controller=images.create_resource('1.0'),
collection={'detail': 'GET'})
- mapper.resource("flavor", "flavors",
- controller=flavors.create_resource('1.0'),
- collection={'detail': 'GET'})
-
mapper.resource("shared_ip_group", "shared_ip_groups",
collection={'detail': 'GET'},
controller=shared_ip_groups.create_resource())
@@ -149,9 +157,6 @@ class APIRouterV10(APIRouter):
parent_resource=dict(member_name='server',
collection_name='servers'))
- mapper.resource("limit", "limits",
- controller=limits.create_resource('1.0'))
-
mapper.resource("ip", "ips", controller=ips.create_resource(),
collection=dict(public='GET', private='GET'),
parent_resource=dict(member_name='server',
@@ -162,16 +167,7 @@ class APIRouterV11(APIRouter):
"""Define routes specific to OpenStack API V1.1."""
def _setup_routes(self, mapper):
- super(APIRouterV11, self)._setup_routes(mapper, version='1.1')
- mapper.resource("server", "servers",
- controller=servers.create_resource('1.1'),
- collection={'detail': 'GET'},
- member=self.server_members)
-
- mapper.resource("image", "images",
- controller=images.create_resource('1.1'),
- collection={'detail': 'GET'})
-
+ super(APIRouterV11, self)._setup_routes(mapper, '1.1')
mapper.resource("image_meta", "meta",
controller=image_metadata.create_resource(),
parent_resource=dict(member_name='image',
@@ -181,10 +177,3 @@ class APIRouterV11(APIRouter):
controller=server_metadata.create_resource(),
parent_resource=dict(member_name='server',
collection_name='servers'))
-
- mapper.resource("flavor", "flavors",
- controller=flavors.create_resource('1.1'),
- collection={'detail': 'GET'})
-
- mapper.resource("limit", "limits",
- controller=limits.create_resource('1.1'))
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index feabdce89..e5e2c5b50 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -301,7 +301,7 @@ class Volumes(extensions.ExtensionDescriptor):
return "Volumes"
def get_alias(self):
- return "VOLUMES"
+ return "os-volumes"
def get_description(self):
return "Volumes support"
@@ -317,12 +317,12 @@ class Volumes(extensions.ExtensionDescriptor):
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
- res = extensions.ResourceExtension('volumes',
+ res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
- res = extensions.ResourceExtension('volume_attachments',
+ res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
diff --git a/nova/api/openstack/create_instance_controller.py b/nova/api/openstack/create_instance_helper.py
index 90f2542d9..436e524c1 100644
--- a/nova/api/openstack/create_instance_controller.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -1,6 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -35,7 +33,7 @@ from nova.api.openstack import wsgi
from nova.auth import manager as auth_manager
-LOG = logging.getLogger('nova.api.openstack.create_instance_controller')
+LOG = logging.getLogger('nova.api.openstack.create_instance_helper')
FLAGS = flags.FLAGS
@@ -47,7 +45,7 @@ class CreateFault(exception.NovaException):
super(CreateFault, self).__init__()
-class OpenstackCreateInstanceController(object):
+class CreateInstanceHelper(object):
"""This is the base class for OS API Controllers that
are capable of creating instances (currently Servers and Zones).
@@ -55,22 +53,11 @@ class OpenstackCreateInstanceController(object):
to move this code back into servers.py
"""
- def __init__(self):
+ def __init__(self, controller):
"""We need the image service to create an instance."""
+ self.controller = controller
self._image_service = utils.import_object(FLAGS.image_service)
- super(OpenstackCreateInstanceController, self).__init__()
-
- # Default to the 1.0 naming scheme.
-
- def _image_ref_from_req_data(self, data):
- return data['server']['imageId']
-
- def _flavor_id_from_req_data(self, data):
- return data['server']['flavorId']
-
- def _get_server_admin_password(self, server):
- """ Determine the admin password for a server on creation """
- return utils.generate_password(16)
+ super(CreateInstanceHelper, self).__init__()
def create_instance(self, req, body, create_method):
"""Creates a new server for the given user. The approach
@@ -87,7 +74,7 @@ class OpenstackCreateInstanceController(object):
context = req.environ['nova.context']
- password = self._get_server_admin_password(body['server'])
+ password = self.controller._get_server_admin_password(body['server'])
key_name = None
key_data = None
@@ -97,7 +84,7 @@ class OpenstackCreateInstanceController(object):
key_name = key_pair['name']
key_data = key_pair['public_key']
- image_href = self._image_ref_from_req_data(body)
+ image_href = self.controller._image_ref_from_req_data(body)
try:
image_service, image_id = nova.image.get_image_service(image_href)
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
@@ -107,7 +94,7 @@ class OpenstackCreateInstanceController(object):
except Exception, e:
msg = _("Cannot find requested image %(image_href)s: %(e)s" %
locals())
- raise faults.Fault(exc.HTTPBadRequest(msg))
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
personality = body['server'].get('personality')
@@ -115,11 +102,11 @@ class OpenstackCreateInstanceController(object):
if personality:
injected_files = self._get_injected_files(personality)
- flavor_id = self._flavor_id_from_req_data(body)
+ flavor_id = self.controller._flavor_id_from_req_data(body)
if not 'name' in body['server']:
msg = _("Server name is not defined")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
zone_blob = body['server'].get('blob')
name = body['server']['name']
@@ -134,8 +121,7 @@ class OpenstackCreateInstanceController(object):
extra_values = {
'instance_type': inst_type,
'image_ref': image_href,
- 'password': password
- }
+ 'password': password}
return (extra_values,
create_method(context,
@@ -151,14 +137,12 @@ class OpenstackCreateInstanceController(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
- reservation_id=reservation_id
- )
- )
+ reservation_id=reservation_id))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
- raise faults.Fault(exc.HTTPBadRequest(msg))
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
# Let the caller deal with unhandled exceptions.
@@ -193,11 +177,11 @@ class OpenstackCreateInstanceController(object):
def _validate_server_name(self, value):
if not isinstance(value, basestring):
msg = _("Server name is not a string or unicode")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
if value.strip() == '':
msg = _("Server name is an empty string")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
def _get_kernel_ramdisk_from_image(self, req, image_id):
"""Fetch an image from the ImageService, then if present, return the
@@ -266,6 +250,21 @@ class OpenstackCreateInstanceController(object):
injected_files.append((path, contents))
return injected_files
+ def _get_server_admin_password_old_style(self, server):
+ """ Determine the admin password for a server on creation """
+ return utils.generate_password(16)
+
+ def _get_server_admin_password_new_style(self, server):
+ """ Determine the admin password for a server on creation """
+ password = server.get('adminPass')
+
+ if password is None:
+ return utils.generate_password(16)
+ if not isinstance(password, basestring) or password == '':
+ msg = _("Invalid adminPass")
+ raise exc.HTTPBadRequest(explanation=msg)
+ return password
+
class ServerXMLDeserializer(wsgi.XMLDeserializer):
"""
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 54e17e23d..da06ecd15 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -374,6 +374,8 @@ class ExtensionManager(object):
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
+ return False
+ return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
@@ -412,15 +414,16 @@ class ExtensionManager(object):
'file': ext_path})
continue
new_ext = new_ext_class()
- self._check_extension(new_ext)
- self._add_extension(new_ext)
+ self.add_extension(new_ext)
+
+ def add_extension(self, ext):
+ # Do nothing if the extension doesn't check out
+ if not self._check_extension(ext):
+ return
- def _add_extension(self, ext):
alias = ext.get_alias()
LOG.audit(_('Loaded extension: %s'), alias)
- self._check_extension(ext)
-
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index dc2bc6bbc..fede96e33 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -99,7 +99,7 @@ def create_resource(version='1.0'):
serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
- metadata=metadata)
+ metadata=metadata),
}
return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/notes.txt b/nova/api/openstack/notes.txt
index 2330f1002..4e95bffc8 100644
--- a/nova/api/openstack/notes.txt
+++ b/nova/api/openstack/notes.txt
@@ -7,9 +7,6 @@ image ids.
GlanceImageService(ImageService):
image ids are URIs.
-LocalImageService(ImageService):
-image ids are random strings.
-
OpenstackAPITranslationStore:
translates RS server/images/flavor/etc ids into formats required
by a given ImageService strategy.
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index b38b84a2a..8a314de22 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -18,9 +18,10 @@
from webob import exc
from nova import compute
-from nova import quota
from nova.api.openstack import faults
from nova.api.openstack import wsgi
+from nova import exception
+from nova import quota
class Controller(object):
@@ -37,23 +38,39 @@ class Controller(object):
meta_dict[key] = value
return dict(metadata=meta_dict)
+ def _check_body(self, body):
+ if body == None or body == "":
+ expl = _('No Request Body')
+ raise exc.HTTPBadRequest(explanation=expl)
+
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
- return self._get_metadata(context, server_id)
+ try:
+ return self._get_metadata(context, server_id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
def create(self, req, server_id, body):
+ self._check_body(body)
context = req.environ['nova.context']
metadata = body.get('metadata')
try:
self.compute_api.update_or_create_instance_metadata(context,
server_id,
metadata)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
except quota.QuotaError as error:
self._handle_quota_error(error)
- return req.body
+
+ return body
def update(self, req, server_id, id, body):
+ self._check_body(body)
context = req.environ['nova.context']
if not id in body:
expl = _('Request body and URI mismatch')
@@ -65,24 +82,38 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
body)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
except quota.QuotaError as error:
self._handle_quota_error(error)
- return req.body
+ return body
def show(self, req, server_id, id):
""" Return a single metadata item """
context = req.environ['nova.context']
- data = self._get_metadata(context, server_id)
- if id in data['metadata']:
+ try:
+ data = self._get_metadata(context, server_id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
+ try:
return {id: data['metadata'][id]}
- else:
- return faults.Fault(exc.HTTPNotFound())
+ except KeyError:
+ msg = _("metadata item %s was not found" % (id))
+ raise exc.HTTPNotFound(explanation=msg)
def delete(self, req, server_id, id):
""" Deletes an existing metadata """
context = req.environ['nova.context']
- self.compute_api.delete_instance_metadata(context, server_id, id)
+ try:
+ self.compute_api.delete_instance_metadata(context, server_id, id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 1b18c4ecb..b82a6de19 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -24,7 +24,7 @@ from nova import flags
from nova import log as logging
from nova import utils
from nova.api.openstack import common
-from nova.api.openstack import create_instance_controller as base_controller
+from nova.api.openstack import create_instance_helper as helper
from nova.api.openstack import faults
import nova.api.openstack.views.addresses
import nova.api.openstack.views.flavors
@@ -39,19 +39,19 @@ LOG = logging.getLogger('nova.api.openstack.servers')
FLAGS = flags.FLAGS
-class Controller(base_controller.OpenstackCreateInstanceController):
+class Controller(object):
""" The Server API controller for the OpenStack API """
def __init__(self):
self.compute_api = compute.API()
- super(Controller, self).__init__()
+ self.helper = helper.CreateInstanceHelper(self)
def index(self, req):
""" Returns a list of server names and ids for a given user """
try:
servers = self._items(req, is_detail=False)
except exception.Invalid as err:
- return exc.HTTPBadRequest(str(err))
+ return exc.HTTPBadRequest(explanation=str(err))
return servers
def detail(self, req):
@@ -59,7 +59,7 @@ class Controller(base_controller.OpenstackCreateInstanceController):
try:
servers = self._items(req, is_detail=True)
except exception.Invalid as err:
- return exc.HTTPBadRequest(str(err))
+ return exc.HTTPBadRequest(explanation=str(err))
return servers
def _get_view_builder(self, req):
@@ -111,8 +111,8 @@ class Controller(base_controller.OpenstackCreateInstanceController):
extra_values = None
result = None
try:
- extra_values, result = \
- self.create_instance(req, body, self.compute_api.create)
+ extra_values, result = self.helper.create_instance(
+ req, body, self.compute_api.create)
except faults.Fault, f:
return f
@@ -141,7 +141,7 @@ class Controller(base_controller.OpenstackCreateInstanceController):
if 'name' in body['server']:
name = body['server']['name']
- self._validate_server_name(name)
+ self.helper._validate_server_name(name)
update_dict['display_name'] = name.strip()
self._parse_update(ctxt, id, body, update_dict)
@@ -403,6 +403,13 @@ class Controller(base_controller.OpenstackCreateInstanceController):
class ControllerV10(Controller):
+
+ def _image_ref_from_req_data(self, data):
+ return data['server']['imageId']
+
+ def _flavor_id_from_req_data(self, data):
+ return data['server']['flavorId']
+
def _get_view_builder(self, req):
addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV10()
return nova.api.openstack.views.servers.ViewBuilderV10(
@@ -453,6 +460,10 @@ class ControllerV10(Controller):
response.empty_body = True
return response
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return self.helper._get_server_admin_password_old_style(server)
+
class ControllerV11(Controller):
def _image_ref_from_req_data(self, data):
@@ -477,11 +488,11 @@ class ControllerV11(Controller):
if (not 'changePassword' in input_dict
or not 'adminPass' in input_dict['changePassword']):
msg = _("No adminPass was specified")
- return exc.HTTPBadRequest(msg)
+ return exc.HTTPBadRequest(explanation=msg)
password = input_dict['changePassword']['adminPass']
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
- return exc.HTTPBadRequest(msg)
+ return exc.HTTPBadRequest(explanation=msg)
self.compute_api.set_admin_password(context, id, password)
return exc.HTTPAccepted()
@@ -567,14 +578,7 @@ class ControllerV11(Controller):
def _get_server_admin_password(self, server):
""" Determine the admin password for a server on creation """
- password = server.get('adminPass')
-
- if password is None:
- return utils.generate_password(16)
- if not isinstance(password, basestring) or password == '':
- msg = _("Invalid adminPass")
- raise exc.HTTPBadRequest(msg)
- return password
+ return self.helper._get_server_admin_password_new_style(server)
def create_resource(version='1.0'):
@@ -610,7 +614,7 @@ def create_resource(version='1.0'):
}
deserializers = {
- 'application/xml': base_controller.ServerXMLDeserializer(),
+ 'application/xml': helper.ServerXMLDeserializer(),
}
return wsgi.Resource(controller, serializers=serializers,
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 33da915ee..a57b7f72b 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -2,7 +2,9 @@
import json
import webob
from xml.dom import minidom
+from xml.parsers import expat
+import faults
from nova import exception
from nova import log as logging
from nova import utils
@@ -60,7 +62,7 @@ class TextDeserializer(object):
def deserialize(self, datastring, action='default'):
"""Find local deserialization method and parse request body."""
- action_method = getattr(self, action, self.default)
+ action_method = getattr(self, str(action), self.default)
return action_method(datastring)
def default(self, datastring):
@@ -71,7 +73,11 @@ class TextDeserializer(object):
class JSONDeserializer(TextDeserializer):
def default(self, datastring):
- return utils.loads(datastring)
+ try:
+ return utils.loads(datastring)
+ except ValueError:
+ raise exception.MalformedRequestBody(
+ reason=_("malformed JSON in request body"))
class XMLDeserializer(TextDeserializer):
@@ -86,8 +92,13 @@ class XMLDeserializer(TextDeserializer):
def default(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
- node = minidom.parseString(datastring).childNodes[0]
- return {node.nodeName: self._from_xml_node(node, plurals)}
+
+ try:
+ node = minidom.parseString(datastring).childNodes[0]
+ return {node.nodeName: self._from_xml_node(node, plurals)}
+ except expat.ExpatError:
+ raise exception.MalformedRequestBody(
+ reason=_("malformed XML in request body"))
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
@@ -189,7 +200,7 @@ class DictSerializer(object):
def serialize(self, data, action='default'):
"""Find local serialization method and encode response body."""
- action_method = getattr(self, action, self.default)
+ action_method = getattr(self, str(action), self.default)
return action_method(data)
def default(self, data):
@@ -296,7 +307,7 @@ class ResponseSerializer(object):
}
self.serializers.update(serializers or {})
- def serialize(self, response_data, content_type):
+ def serialize(self, response_data, content_type, action='default'):
"""Serialize a dict into a string and wrap in a wsgi.Request object.
:param response_data: dict produced by the Controller
@@ -307,7 +318,7 @@ class ResponseSerializer(object):
response.headers['Content-Type'] = content_type
serializer = self.get_serializer(content_type)
- response.body = serializer.serialize(response_data)
+ response.body = serializer.serialize(response_data, action)
return response
@@ -352,13 +363,17 @@ class Resource(wsgi.Application):
action, action_args, accept = self.deserializer.deserialize(
request)
except exception.InvalidContentType:
- return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
+ msg = _("Unsupported Content-Type")
+ return webob.exc.HTTPBadRequest(explanation=msg)
+ except exception.MalformedRequestBody:
+ msg = _("Malformed request body")
+ return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg))
action_result = self.dispatch(request, action, action_args)
#TODO(bcwaldon): find a more elegant way to pass through non-dict types
if type(action_result) is dict:
- response = self.serializer.serialize(action_result, accept)
+ response = self.serializer.serialize(action_result, accept, action)
else:
response = action_result
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
index 7ccb8555b..8864f825b 100644
--- a/nova/api/openstack/zones.py
+++ b/nova/api/openstack/zones.py
@@ -25,8 +25,9 @@ from nova import log as logging
from nova.compute import api as compute
from nova.scheduler import api
-from nova.api.openstack import create_instance_controller as controller
+from nova.api.openstack import create_instance_helper as helper
from nova.api.openstack import common
+from nova.api.openstack import faults
from nova.api.openstack import wsgi
@@ -62,17 +63,12 @@ def check_encryption_key(func):
return wrapped
-class Controller(controller.OpenstackCreateInstanceController):
- """Controller for Zone resources. Since we can also create instances
- via /zone/boot, this controller is derived from
- OpenstackCreateInstanceController, which contains all the logic for
- doing that (shared with Servers).
- """
+class Controller(object):
+ """Controller for Zone resources."""
- def __init__(self, version):
+ def __init__(self):
self.compute_api = compute.API()
- self.version = version
- super(Controller, self).__init__()
+ self.helper = helper.CreateInstanceHelper(self)
def index(self, req):
"""Return all zones in brief"""
@@ -132,7 +128,7 @@ class Controller(controller.OpenstackCreateInstanceController):
"""
result = None
try:
- extra_values, result = self.create_instance(req, body,
+ extra_values, result = self.helper.create_instance(req, body,
self.compute_api.create_all_at_once)
except faults.Fault, f:
return f
@@ -164,17 +160,36 @@ class Controller(controller.OpenstackCreateInstanceController):
return cooked
def _image_ref_from_req_data(self, data):
- if self.version == '1.0':
- return data['server']['imageId']
+ return data['server']['imageId']
+
+ def _flavor_id_from_req_data(self, data):
+ return data['server']['flavorId']
+
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return self.helper._get_server_admin_password_old_style(server)
+
+
+class ControllerV11(object):
+ """Controller for 1.1 Zone resources."""
+
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return self.helper._get_server_admin_password_new_style(server)
+
+ def _image_ref_from_req_data(self, data):
return data['server']['imageRef']
def _flavor_id_from_req_data(self, data):
- if self.version == '1.0':
- return data['server']['flavorId']
return data['server']['flavorRef']
def create_resource(version):
+ controller = {
+ '1.0': Controller,
+ '1.1': ControllerV11,
+ }[version]()
+
metadata = {
"attributes": {
"zone": ["id", "api_url", "name", "capabilities"],
@@ -187,8 +202,8 @@ def create_resource(version):
}
deserializers = {
- 'application/xml': controller.ServerXMLDeserializer(),
+ 'application/xml': helper.ServerXMLDeserializer(),
}
- return wsgi.Resource(Controller(version), serializers=serializers,
+ return wsgi.Resource(controller, serializers=serializers,
deserializers=deserializers)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index c7db167c1..b81aecb70 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -34,6 +34,7 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -52,6 +53,18 @@ def generate_default_hostname(instance_id):
return str(instance_id)
+def _is_able_to_shutdown(instance, instance_id):
+ states = {'terminating': "Instance %s is already being terminated",
+ 'migrating': "Instance %s is being migrated",
+ 'stopping': "Instance %s is being stopped"}
+ msg = states.get(instance['state_description'])
+ if msg:
+ LOG.warning(_(msg), instance_id)
+ return False
+
+ return True
+
+
class API(base.Base):
"""API for interacting with the compute manager."""
@@ -165,6 +178,9 @@ class API(base.Base):
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
+ vm_mode = None
+ if 'properties' in image and 'vm_mode' in image['properties']:
+ vm_mode = image['properties']['vm_mode']
if kernel_id is None:
kernel_id = image['properties'].get('kernel_id', None)
@@ -226,12 +242,13 @@ class API(base.Base):
'locked': False,
'metadata': metadata,
'availability_zone': availability_zone,
- 'os_type': os_type}
+ 'os_type': os_type,
+ 'vm_mode': vm_mode}
return (num_instances, base_options, security_groups)
def create_db_entry_for_new_instance(self, context, base_options,
- security_groups, num=1):
+ security_groups, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security
groups, MAC address, etc). This will called by create()
@@ -251,6 +268,23 @@ class API(base.Base):
instance_id,
security_group_id)
+ # NOTE(yamahata)
+ # tell vm driver to attach volume at boot time by updating
+ # BlockDeviceMapping
+ for bdm in block_device_mapping:
+ LOG.debug(_('bdm %s'), bdm)
+ assert 'device_name' in bdm
+ values = {
+ 'instance_id': instance_id,
+ 'device_name': bdm['device_name'],
+ 'delete_on_termination': bdm.get('delete_on_termination'),
+ 'virtual_name': bdm.get('virtual_name'),
+ 'snapshot_id': bdm.get('snapshot_id'),
+ 'volume_id': bdm.get('volume_id'),
+ 'volume_size': bdm.get('volume_size'),
+ 'no_device': bdm.get('no_device')}
+ self.db.block_device_mapping_create(elevated, values)
+
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
@@ -285,7 +319,7 @@ class API(base.Base):
'instance_type': instance_type,
'filter': filter_class,
'blob': zone_blob,
- 'num_instances': num_instances
+ 'num_instances': num_instances,
}
rpc.cast(context,
@@ -335,7 +369,7 @@ class API(base.Base):
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None):
+ reservation_id=None, block_device_mapping=None):
"""
Provision the instances by sending off a series of single
instance requests to the Schedulers. This is fine for trival
@@ -356,11 +390,13 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
+ block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = self.create_db_entry_for_new_instance(context,
- base_options, security_groups, num=num)
+ base_options, security_groups,
+ block_device_mapping, num=num)
instances.append(instance)
instance_id = instance['id']
@@ -470,24 +506,22 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
- @scheduler_api.reroute_compute("delete")
- def delete(self, context, instance_id):
- """Terminate an instance."""
- LOG.debug(_("Going to try to terminate %s"), instance_id)
+ def _get_instance(self, context, instance_id, action_str):
try:
- instance = self.get(context, instance_id)
+ return self.get(context, instance_id)
except exception.NotFound:
- LOG.warning(_("Instance %s was not found during terminate"),
- instance_id)
+ LOG.warning(_("Instance %(instance_id)s was not found during "
+ "%(action_str)s") %
+ {'instance_id': instance_id, 'action_str': action_str})
raise
- if instance['state_description'] == 'terminating':
- LOG.warning(_("Instance %s is already being terminated"),
- instance_id)
- return
+ @scheduler_api.reroute_compute("delete")
+ def delete(self, context, instance_id):
+ """Terminate an instance."""
+ LOG.debug(_("Going to try to terminate %s"), instance_id)
+ instance = self._get_instance(context, instance_id, 'terminating')
- if instance['state_description'] == 'migrating':
- LOG.warning(_("Instance %s is being migrated"), instance_id)
+ if not _is_able_to_shutdown(instance, instance_id):
return
self.update(context,
@@ -501,8 +535,48 @@ class API(base.Base):
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
+ terminate_volumes(self.db, context, instance_id)
self.db.instance_destroy(context, instance_id)
+ @scheduler_api.reroute_compute("stop")
+ def stop(self, context, instance_id):
+ """Stop an instance."""
+ LOG.debug(_("Going to try to stop %s"), instance_id)
+
+ instance = self._get_instance(context, instance_id, 'stopping')
+ if not _is_able_to_shutdown(instance, instance_id):
+ return
+
+ self.update(context,
+ instance['id'],
+ state_description='stopping',
+ state=power_state.NOSTATE,
+ terminated_at=utils.utcnow())
+
+ host = instance['host']
+ if host:
+ self._cast_compute_message('stop_instance', context,
+ instance_id, host)
+
+ def start(self, context, instance_id):
+ """Start an instance."""
+ LOG.debug(_("Going to try to start %s"), instance_id)
+ instance = self._get_instance(context, instance_id, 'starting')
+ if instance['state_description'] != 'stopped':
+ _state_description = instance['state_description']
+ LOG.warning(_("Instance %(instance_id)s is not "
+ "stopped(%(_state_description)s)") % locals())
+ return
+
+ # TODO(yamahata): injected_files isn't supported right now.
+ # It is used only for osapi. not for ec2 api.
+ # availability_zone isn't used by run_instance.
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "start_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id}})
+
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
rv = self.db.instance_get(context, instance_id)
@@ -522,6 +596,7 @@ class API(base.Base):
"""Get all instances with this reservation_id, across
all available Zones (if any).
"""
+ context = context.elevated()
instances = self.db.instance_get_all_by_reservation(
context, reservation_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 245958de7..8ab744855 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -53,6 +53,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -214,8 +215,63 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
- @exception.wrap_exception
- def run_instance(self, context, instance_id, **kwargs):
+ def _setup_block_device_mapping(self, context, instance_id):
+ """setup volumes for block device mapping"""
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'block_device_mapping')
+
+ volume_api = volume.API()
+ block_device_mapping = []
+ for bdm in self.db.block_device_mapping_get_all_by_instance(
+ context, instance_id):
+ LOG.debug(_("setting up bdm %s"), bdm)
+ if ((bdm['snapshot_id'] is not None) and
+ (bdm['volume_id'] is None)):
+ # TODO(yamahata): default name and description
+ vol = volume_api.create(context, bdm['volume_size'],
+ bdm['snapshot_id'], '', '')
+ # TODO(yamahata): creating volume simultaneously
+ # reduces creation time?
+ volume_api.wait_creation(context, vol['id'])
+ self.db.block_device_mapping_update(
+ context, bdm['id'], {'volume_id': vol['id']})
+ bdm['volume_id'] = vol['id']
+
+ if not ((bdm['snapshot_id'] is None) or
+ (bdm['volume_id'] is not None)):
+ LOG.error(_('corrupted state of block device mapping '
+ 'id: %(id)s '
+ 'snapshot: %(snapshot_id) volume: %(vollume_id)') %
+ {'id': bdm['id'],
+ 'snapshot_id': bdm['snapshot'],
+ 'volume_id': bdm['volume_id']})
+ raise exception.ApiError(_('broken block device mapping %d') %
+ bdm['id'])
+
+ if bdm['volume_id'] is not None:
+ volume_api.check_attach(context,
+ volume_id=bdm['volume_id'])
+ dev_path = self._attach_volume_boot(context, instance_id,
+ bdm['volume_id'],
+ bdm['device_name'])
+ block_device_mapping.append({'device_path': dev_path,
+ 'mount_device':
+ bdm['device_name']})
+ elif bdm['virtual_name'] is not None:
+ # TODO(yamahata): ephemeral/swap device support
+ LOG.debug(_('block_device_mapping: '
+ 'ephemeral device is not supported yet'))
+ else:
+ # TODO(yamahata): NoDevice support
+ assert bdm['no_device']
+ LOG.debug(_('block_device_mapping: '
+ 'no device is not supported yet'))
+
+ return block_device_mapping
+
+ def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -249,11 +305,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_manager.setup_compute_network(context,
instance_id)
+ block_device_mapping = self._setup_block_device_mapping(context,
+ instance_id)
+
# TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING)
try:
- self.driver.spawn(instance_ref)
+ self.driver.spawn(instance_ref,
+ block_device_mapping=block_device_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
@@ -277,12 +337,24 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id)
@exception.wrap_exception
+ def run_instance(self, context, instance_id, **kwargs):
+ self._run_instance(context, instance_id, **kwargs)
+
+ @exception.wrap_exception
@checks_instance_lock
- def terminate_instance(self, context, instance_id):
- """Terminate an instance on this host."""
+ def start_instance(self, context, instance_id):
+ """Starting an instance on this host."""
+ # TODO(yamahata): injected_files isn't supported.
+ # Anyway OSAPI doesn't support stop/start yet
+ self._run_instance(context, instance_id)
+
+ def _shutdown_instance(self, context, instance_id, action_str):
+ """Shutdown an instance on this host."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("Terminating instance %s"), instance_id, context=context)
+ LOG.audit(_("%(action_str)s instance %(instance_id)s") %
+ {'action_str': action_str, 'instance_id': instance_id},
+ context=context)
fixed_ip = instance_ref.get('fixed_ip')
if not FLAGS.stub_network and fixed_ip:
@@ -318,18 +390,36 @@ class ComputeManager(manager.SchedulerDependentManager):
volumes = instance_ref.get('volumes') or []
for volume in volumes:
- self.detach_volume(context, instance_id, volume['id'])
- if instance_ref['state'] == power_state.SHUTOFF:
+ self._detach_volume(context, instance_id, volume['id'], False)
+
+ if (instance_ref['state'] == power_state.SHUTOFF and
+ instance_ref['state_description'] != 'stopped'):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
self.driver.destroy(instance_ref)
+ if action_str == 'Terminating':
+ terminate_volumes(self.db, context, instance_id)
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def terminate_instance(self, context, instance_id):
+ """Terminate an instance on this host."""
+ self._shutdown_instance(context, instance_id, 'Terminating')
+
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
+ def stop_instance(self, context, instance_id):
+ """Stopping an instance on this host."""
+ self._shutdown_instance(context, instance_id, 'Stopping')
+ # instance state will be updated to stopped by _poll_instance_states()
+
+ @exception.wrap_exception
+ @checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
@@ -800,6 +890,22 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_vnc_console(instance_ref)
+ def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint):
+ """Attach a volume to an instance at boot time. So actual attach
+ is done by instance creation"""
+
+ # TODO(yamahata):
+ # should move check_attach to volume manager?
+ volume.API().check_attach(context, volume_id)
+
+ context = context.elevated()
+ LOG.audit(_("instance %(instance_id)s: booting with "
+ "volume %(volume_id)s at %(mountpoint)s") %
+ locals(), context=context)
+ dev_path = self.volume_manager.setup_compute_volume(context, volume_id)
+ self.db.volume_attached(context, volume_id, instance_id, mountpoint)
+ return dev_path
+
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@@ -817,6 +923,16 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id,
instance_id,
mountpoint)
+ values = {
+ 'instance_id': instance_id,
+ 'device_name': mountpoint,
+ 'delete_on_termination': False,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': volume_id,
+ 'volume_size': None,
+ 'no_device': None}
+ self.db.block_device_mapping_create(context, values)
except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
@@ -831,7 +947,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
- def detach_volume(self, context, instance_id, volume_id):
+ def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -847,8 +963,15 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
+ if destroy_bdm:
+ self.db.block_device_mapping_destroy_by_instance_and_volume(
+ context, instance_id, volume_id)
return True
+ def detach_volume(self, context, instance_id, volume_id):
+ """Detach a volume from an instance."""
+ return self._detach_volume(context, instance_id, volume_id, True)
+
def remove_volume(self, context, volume_id):
"""Remove volume on compute host.
@@ -1174,11 +1297,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"State=%(db_state)s, so setting state to "
"shutoff.") % locals())
vm_state = power_state.SHUTOFF
+ if db_instance['state_description'] == 'stopping':
+ self.db.instance_stop(context, db_instance['id'])
+ continue
else:
vm_state = vm_instance.state
vms_not_found_in_db.remove(name)
- if db_instance['state_description'] == 'migrating':
+ if (db_instance['state_description'] in ['migrating', 'stopping']):
# A situation which db record exists, but no instance"
# sometimes occurs while live-migration at src compute,
# this case should be ignored.
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
new file mode 100644
index 000000000..c8cb9bab8
--- /dev/null
+++ b/nova/compute/utils.py
@@ -0,0 +1,29 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 VA Linux Systems Japan K.K
+# Copyright (c) 2011 Isaku Yamahata
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import volume
+
+
+def terminate_volumes(db, context, instance_id):
+ """delete volumes of delete_on_termination=True in block device mapping"""
+ volume_api = volume.API()
+ for bdm in db.block_device_mapping_get_all_by_instance(context,
+ instance_id):
+ #LOG.debug(_("terminating bdm %s") % bdm)
+ if bdm['volume_id'] and bdm['delete_on_termination']:
+ volume_api.delete(context, bdm['volume_id'])
+ db.block_device_mapping_destroy(context, bdm['id'])
diff --git a/nova/crypto.py b/nova/crypto.py
index bdc32482a..8d535f426 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -176,7 +176,8 @@ def revoke_certs_by_project(project_id):
def revoke_certs_by_user_and_project(user_id, project_id):
"""Revoke certs for user in project."""
admin = context.get_admin_context()
- for cert in db.certificate_get_all_by_user(admin, user_id, project_id):
+ for cert in db.certificate_get_all_by_user_and_project(admin,
+ user_id, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
diff --git a/nova/db/api.py b/nova/db/api.py
index 4e0aa60a2..117c235ea 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -414,6 +414,11 @@ def instance_destroy(context, instance_id):
return IMPL.instance_destroy(context, instance_id)
+def instance_stop(context, instance_id):
+ """Stop the instance or raise if it does not exist."""
+ return IMPL.instance_stop(context, instance_id)
+
+
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
@@ -920,6 +925,36 @@ def snapshot_update(context, snapshot_id, values):
####################
+def block_device_mapping_create(context, values):
+ """Create an entry of block device mapping"""
+ return IMPL.block_device_mapping_create(context, values)
+
+
+def block_device_mapping_update(context, bdm_id, values):
+ """Create an entry of block device mapping"""
+ return IMPL.block_device_mapping_update(context, bdm_id, values)
+
+
+def block_device_mapping_get_all_by_instance(context, instance_id):
+ """Get all block device mapping belonging to a instance"""
+ return IMPL.block_device_mapping_get_all_by_instance(context, instance_id)
+
+
+def block_device_mapping_destroy(context, bdm_id):
+ """Destroy the block device mapping."""
+ return IMPL.block_device_mapping_destroy(context, bdm_id)
+
+
+def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
+ volume_id):
+ """Destroy the block device mapping or raise if it does not exist."""
+ return IMPL.block_device_mapping_destroy_by_instance_and_volume(
+ context, instance_id, volume_id)
+
+
+####################
+
+
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 73870d2f3..81569e98c 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -18,7 +18,7 @@
"""
Implementation of SQLAlchemy backend.
"""
-
+import traceback
import warnings
from nova import db
@@ -840,6 +840,25 @@ def instance_destroy(context, instance_id):
@require_context
+def instance_stop(context, instance_id):
+ session = get_session()
+ with session.begin():
+ from nova.compute import power_state
+ session.query(models.Instance).\
+ filter_by(id=instance_id).\
+ update({'host': None,
+ 'state': power_state.SHUTOFF,
+ 'state_description': 'stopped',
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_id=instance_id).\
+ update({'updated_at': literal_column('updated_at')})
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ update({'updated_at': literal_column('updated_at')})
+
+
+@require_context
def instance_get(context, instance_id, session=None):
if not session:
session = get_session()
@@ -907,6 +926,7 @@ def instance_get_all_by_host(context, host):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
@@ -922,6 +942,7 @@ def instance_get_all_by_project(context, project_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
@@ -937,6 +958,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
@@ -946,6 +968,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
@@ -959,6 +982,8 @@ def instance_get_project_vpn(context, project_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(image_ref=str(FLAGS.vpn_image_id)).\
@@ -1877,6 +1902,66 @@ def snapshot_update(context, snapshot_id, values):
@require_context
+def block_device_mapping_create(context, values):
+ bdm_ref = models.BlockDeviceMapping()
+ bdm_ref.update(values)
+
+ session = get_session()
+ with session.begin():
+ bdm_ref.save(session=session)
+
+
+@require_context
+def block_device_mapping_update(context, bdm_id, values):
+ session = get_session()
+ with session.begin():
+ session.query(models.BlockDeviceMapping).\
+ filter_by(id=bdm_id).\
+ filter_by(deleted=False).\
+ update(values)
+
+
+@require_context
+def block_device_mapping_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.BlockDeviceMapping).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ return []
+ return result
+
+
+@require_context
+def block_device_mapping_destroy(context, bdm_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.BlockDeviceMapping).\
+ filter_by(id=bdm_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
+ volume_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.BlockDeviceMapping).\
+ filter_by(instance_id=instance_id).\
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+###################
+
+
+@require_context
def security_group_get_all(context):
session = get_session()
return session.query(models.SecurityGroup).\
@@ -2609,7 +2694,17 @@ def zone_get_all(context):
####################
+
+def require_instance_exists(func):
+ def new_func(context, instance_id, *args, **kwargs):
+ db.api.instance_get(context, instance_id)
+ return func(context, instance_id, *args, **kwargs)
+ new_func.__name__ = func.__name__
+ return new_func
+
+
@require_context
+@require_instance_exists
def instance_metadata_get(context, instance_id):
session = get_session()
@@ -2625,6 +2720,7 @@ def instance_metadata_get(context, instance_id):
@require_context
+@require_instance_exists
def instance_metadata_delete(context, instance_id, key):
session = get_session()
session.query(models.InstanceMetadata).\
@@ -2637,6 +2733,7 @@ def instance_metadata_delete(context, instance_id, key):
@require_context
+@require_instance_exists
def instance_metadata_delete_all(context, instance_id):
session = get_session()
session.query(models.InstanceMetadata).\
@@ -2648,6 +2745,7 @@ def instance_metadata_delete_all(context, instance_id):
@require_context
+@require_instance_exists
def instance_metadata_get_item(context, instance_id, key):
session = get_session()
@@ -2664,6 +2762,7 @@ def instance_metadata_get_item(context, instance_id, key):
@require_context
+@require_instance_exists
def instance_metadata_update_or_create(context, instance_id, metadata):
session = get_session()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py
new file mode 100644
index 000000000..0c587f569
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+
+meta = MetaData()
+
+instances_vm_mode = Column('vm_mode',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ instances.create_column(instances_vm_mode)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ instances.drop_column('vm_mode')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py
new file mode 100644
index 000000000..6e9b806cb
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py
@@ -0,0 +1,87 @@
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Isaku Yamahata
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, Table, Column
+from sqlalchemy import DateTime, Boolean, Integer, String
+from sqlalchemy import ForeignKey
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+snapshots = Table('snapshots', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+block_device_mapping = Table('block_device_mapping', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, autoincrement=True),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=False),
+ Column('device_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ Column('delete_on_termination',
+ Boolean(create_constraint=True, name=None),
+ default=False),
+ Column('virtual_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=True),
+ Column('snapshot_id',
+ Integer(),
+ ForeignKey('snapshots.id'),
+ nullable=True),
+ Column('volume_id', Integer(), ForeignKey('volumes.id'),
+ nullable=True),
+ Column('volume_size', Integer(), nullable=True),
+ Column('no_device',
+ Boolean(create_constraint=True, name=None),
+ nullable=True),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ try:
+ block_device_mapping.create()
+ except Exception:
+ logging.info(repr(block_device_mapping))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[block_device_mapping])
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ block_device_mapping.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/025_add_zone_weight_offsets.py b/nova/db/sqlalchemy/migrate_repo/versions/025_add_zone_weight_offsets.py
new file mode 100644
index 000000000..1b7871e5f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/025_add_zone_weight_offsets.py
@@ -0,0 +1,38 @@
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Float, Integer, MetaData, Table
+
+meta = MetaData()
+
+zones = Table('zones', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+weight_offset = Column('weight_offset', Float(), default=0.0)
+weight_scale = Column('weight_scale', Float(), default=1.0)
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ zones.create_column(weight_offset)
+ zones.create_column(weight_scale)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ zones.drop_column(weight_offset)
+ zones.drop_column(weight_scale)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index f28fb0778..2121afa40 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -232,6 +232,7 @@ class Instance(BASE, NovaBase):
locked = Column(Boolean)
os_type = Column(String(255))
+ vm_mode = Column(String(255))
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
@@ -356,6 +357,45 @@ class Snapshot(BASE, NovaBase):
display_description = Column(String(255))
+class BlockDeviceMapping(BASE, NovaBase):
+ """Represents block device mapping that is defined by EC2"""
+ __tablename__ = "block_device_mapping"
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance = relationship(Instance,
+ backref=backref('balock_device_mapping'),
+ foreign_keys=instance_id,
+ primaryjoin='and_(BlockDeviceMapping.instance_id=='
+ 'Instance.id,'
+ 'BlockDeviceMapping.deleted=='
+ 'False)')
+ device_name = Column(String(255), nullable=False)
+
+ # default=False for compatibility of the existing code.
+ # With EC2 API,
+ # default True for ami specified device.
+ # default False for created with other timing.
+ delete_on_termination = Column(Boolean, default=False)
+
+ # for ephemeral device
+ virtual_name = Column(String(255), nullable=True)
+
+ # for snapshot or volume
+ snapshot_id = Column(Integer, ForeignKey('snapshots.id'), nullable=True)
+ # outer join
+ snapshot = relationship(Snapshot,
+ foreign_keys=snapshot_id)
+
+ volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
+ volume = relationship(Volume,
+ foreign_keys=volume_id)
+ volume_size = Column(Integer, nullable=True)
+
+ # for no device to suppress devices.
+ no_device = Column(Boolean, nullable=True)
+
+
class ExportDevice(BASE, NovaBase):
"""Represates a shelf and blade that a volume can be exported on."""
__tablename__ = 'export_devices'
diff --git a/nova/exception.py b/nova/exception.py
index 1571dd032..f3a452228 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -585,3 +585,7 @@ class InstanceExists(Duplicate):
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
+
+
+class MalformedRequestBody(NovaException):
+ message = _("Malformed message body: %(reason)s")
diff --git a/nova/flags.py b/nova/flags.py
index acfcf8d68..57a4ecf2f 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -272,7 +272,7 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
DEFINE_list('glance_api_servers',
- ['127.0.0.1:9292'],
+ ['%s:9292' % _get_my_ip()],
'list of glance api servers available to nova (host:port)')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
@@ -364,7 +364,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.local.LocalImageService',
+DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
diff --git a/nova/image/fake.py b/nova/image/fake.py
index 70a5f0e22..c4b3d5fd6 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -120,6 +120,14 @@ class _FakeImageService(service.BaseImageService):
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
+ def show_by_name(self, context, name):
+ """Returns a dict containing image data for the given name."""
+ images = copy.deepcopy(self.images.values())
+ for image in images:
+ if name == image.get('name'):
+ return image
+ raise exception.ImageNotFound(image_id=name)
+
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
diff --git a/nova/image/local.py b/nova/image/local.py
deleted file mode 100644
index c7dee4573..000000000
--- a/nova/image/local.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import os.path
-import random
-import shutil
-
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.image import service
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('images_path', '$state_path/images',
- 'path to decrypted images')
-
-
-LOG = logging.getLogger('nova.image.local')
-
-
-class LocalImageService(service.BaseImageService):
- """Image service storing images to local disk.
-
- It assumes that image_ids are integers.
-
- """
-
- def __init__(self):
- self._path = FLAGS.images_path
-
- def _path_to(self, image_id, fname='info.json'):
- if fname:
- return os.path.join(self._path, '%08x' % int(image_id), fname)
- return os.path.join(self._path, '%08x' % int(image_id))
-
- def _ids(self):
- """The list of all image ids."""
- images = []
- for image_dir in os.listdir(self._path):
- try:
- unhexed_image_id = int(image_dir, 16)
- except ValueError:
- LOG.error(_('%s is not in correct directory naming format')
- % image_dir)
- else:
- images.append(unhexed_image_id)
- return images
-
- def index(self, context, filters=None, marker=None, limit=None):
- # TODO(blamar): Make use of filters, marker, and limit
- filtered = []
- image_metas = self.detail(context)
- for image_meta in image_metas:
- meta = utils.subset_dict(image_meta, ('id', 'name'))
- filtered.append(meta)
- return filtered
-
- def detail(self, context, filters=None, marker=None, limit=None):
- # TODO(blamar): Make use of filters, marker, and limit
- images = []
- for image_id in self._ids():
- try:
- image = self.show(context, image_id)
- images.append(image)
- except exception.NotFound:
- continue
- return images
-
- def show(self, context, image_id):
- try:
- with open(self._path_to(image_id)) as metadata_file:
- image_meta = json.load(metadata_file)
- if not self._is_image_available(context, image_meta):
- raise exception.ImageNotFound(image_id=image_id)
- return image_meta
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
-
- def show_by_name(self, context, name):
- """Returns a dict containing image data for the given name."""
- # NOTE(vish): Not very efficient, but the local image service
- # is for testing so it should be fine.
- images = self.detail(context)
- image = None
- for cantidate in images:
- if name == cantidate.get('name'):
- image = cantidate
- break
- if image is None:
- raise exception.ImageNotFound(image_id=name)
- return image
-
- def get(self, context, image_id, data):
- """Get image and metadata."""
- try:
- with open(self._path_to(image_id)) as metadata_file:
- metadata = json.load(metadata_file)
- with open(self._path_to(image_id, 'image')) as image_file:
- shutil.copyfileobj(image_file, data)
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
- return metadata
-
- def create(self, context, metadata, data=None):
- """Store the image data and return the new image."""
- image_id = random.randint(0, 2 ** 31 - 1)
- image_path = self._path_to(image_id, None)
- if not os.path.exists(image_path):
- os.mkdir(image_path)
- return self._store(context, image_id, metadata, data)
-
- def update(self, context, image_id, metadata, data=None):
- """Replace the contents of the given image with the new data."""
- # NOTE(vish): show is to check if image is available
- self.show(context, image_id)
- return self._store(context, image_id, metadata, data)
-
- def _store(self, context, image_id, metadata, data=None):
- metadata['id'] = image_id
- try:
- if data:
- location = self._path_to(image_id, 'image')
- with open(location, 'w') as image_file:
- shutil.copyfileobj(data, image_file)
- # NOTE(vish): update metadata similarly to glance
- metadata['status'] = 'active'
- metadata['location'] = location
- with open(self._path_to(image_id), 'w') as metadata_file:
- json.dump(metadata, metadata_file)
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
- return metadata
-
- def delete(self, context, image_id):
- """Delete the given image.
-
- :raises: ImageNotFound if the image does not exist.
-
- """
- # NOTE(vish): show is to check if image is available
- self.show(context, image_id)
- try:
- shutil.rmtree(self._path_to(image_id, None))
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
-
- def delete_all(self):
- """Clears out all images in local directory."""
- for image_id in self._ids():
- shutil.rmtree(self._path_to(image_id, None))
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index f966528f0..dbd244a7f 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -106,7 +106,8 @@ def _wrap_method(function, self):
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
- nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ nova = novaclient.OpenStack(zone.username, zone.password, None,
+ zone.api_url)
nova.authenticate()
return func(nova, zone)
@@ -124,7 +125,7 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
zones = db.zone_get_all(context)
for zone in zones:
try:
- nova = novaclient.OpenStack(zone.username, zone.password,
+ nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 87cdef11d..fc1b3142a 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -39,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000,
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
- def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
+ def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone']
@@ -75,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler):
" for this request. Is the appropriate"
" service running?"))
+ def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
+ return self._schedule_instance(context, instance_id, *_args, **_kwargs)
+
+ def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
+ return self._schedule_instance(context, instance_id, *_args, **_kwargs)
+
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index b23a1a7c1..2f4ea95da 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -88,7 +88,7 @@ class ZoneAwareScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
name = instance_properties['display_name']
- image_id = instance_properties['image_id']
+ image_ref = instance_properties['image_ref']
meta = instance_properties['metadata']
flavor_id = instance_type['flavorid']
reservation_id = instance_properties['reservation_id']
@@ -105,13 +105,14 @@ class ZoneAwareScheduler(driver.Scheduler):
% locals())
nova = None
try:
- nova = novaclient.OpenStack(zone.username, zone.password, url)
+ nova = novaclient.OpenStack(zone.username, zone.password, None,
+ url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
raise exception.NotAuthorized(_("Bad credentials attempting "
"to talk to zone at %(url)s.") % locals())
- nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
+ nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id)
def _provision_resource_from_blob(self, context, item, instance_id,
@@ -170,7 +171,7 @@ class ZoneAwareScheduler(driver.Scheduler):
continue
for zone_rec in zones:
- if zone_rec['url'] != zone:
+ if zone_rec['api_url'] != zone:
continue
try:
@@ -208,7 +209,11 @@ class ZoneAwareScheduler(driver.Scheduler):
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
- for item in build_plan:
+ for num in xrange(request_spec['num_instances']):
+ if not build_plan:
+ break
+
+ item = build_plan.pop(0)
self._provision_resource(context, item, instance_id, request_spec,
kwargs)
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index 3f483adff..ba7403c15 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -89,7 +89,8 @@ class ZoneState(object):
def _call_novaclient(zone):
"""Call novaclient. Broken out for testing purposes."""
- client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ client = novaclient.OpenStack(zone.username, zone.password, None,
+ zone.api_url)
return client.zones.info()._info
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index a10fb7433..f8d158ddd 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -39,7 +39,6 @@ from nova.api.openstack import limits
from nova.auth.manager import User, Project
import nova.image.fake
from nova.image import glance
-from nova.image import local
from nova.image import service
from nova.tests import fake_flags
from nova.wsgi import Router
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
index c63431a45..7321c329f 100644
--- a/nova/tests/api/openstack/test_api.py
+++ b/nova/tests/api/openstack/test_api.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+
import webob.exc
import webob.dec
@@ -23,6 +25,7 @@ from webob import Request
from nova import test
from nova.api import openstack
from nova.api.openstack import faults
+from nova.tests.api.openstack import fakes
class APITest(test.TestCase):
@@ -31,6 +34,24 @@ class APITest(test.TestCase):
# simpler version of the app than fakes.wsgi_app
return openstack.FaultWrapper(inner_app)
+ def test_malformed_json(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '{'
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_malformed_xml(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '<hi im not xml>'
+ req.headers["content-type"] = "application/xml"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_exceptions_are_converted_to_faults(self):
@webob.dec.wsgify
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 60914c0a3..697c62e5c 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -128,6 +128,11 @@ class ResourceExtensionTest(unittest.TestCase):
self.assertEqual(response_body, response.body)
+class InvalidExtension(object):
+ def get_alias(self):
+ return "THIRD"
+
+
class ExtensionManagerTest(unittest.TestCase):
response_body = "Try to say this Mr. Knox, sir..."
@@ -144,6 +149,14 @@ class ExtensionManagerTest(unittest.TestCase):
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
+ def test_invalid_extensions(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ ext_mgr = ext_midware.ext_mgr
+ ext_mgr.add_extension(InvalidExtension())
+ self.assertTrue('FOXNSOX' in ext_mgr.extensions)
+ self.assertTrue('THIRD' not in ext_mgr.extensions)
+
class ActionExtensionTest(unittest.TestCase):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index be777df9b..e4204809f 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -135,36 +135,6 @@ class _BaseImageServiceTests(test.TestCase):
return fixture
-class LocalImageServiceTest(_BaseImageServiceTests):
-
- """Tests the local image service"""
-
- def setUp(self):
- super(LocalImageServiceTest, self).setUp()
- self.tempdir = tempfile.mkdtemp()
- self.flags(images_path=self.tempdir)
- self.stubs = stubout.StubOutForTesting()
- service_class = 'nova.image.local.LocalImageService'
- self.service = utils.import_object(service_class)
- self.context = context.RequestContext(None, None)
-
- def tearDown(self):
- shutil.rmtree(self.tempdir)
- self.stubs.UnsetAll()
- super(LocalImageServiceTest, self).tearDown()
-
- def test_get_all_ids_with_incorrect_directory_formats(self):
- # create some old-style image directories (starting with 'ami-')
- for x in [1, 2, 3]:
- tempfile.mkstemp(prefix='ami-', dir=self.tempdir)
- # create some valid image directories names
- for x in ["1485baed", "1a60f0ee", "3123a73d"]:
- os.makedirs(os.path.join(self.tempdir, x))
- found_image_ids = self.service._ids()
- self.assertEqual(True, isinstance(found_image_ids, list))
- self.assertEqual(3, len(found_image_ids), len(found_image_ids))
-
-
class GlanceImageServiceTest(_BaseImageServiceTests):
"""Tests the Glance image service, in particular that metadata translation
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
index c4d1d4fd8..0431e68d2 100644
--- a/nova/tests/api/openstack/test_server_metadata.py
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -21,6 +21,7 @@ import unittest
import webob
+from nova import exception
from nova import flags
from nova.api import openstack
from nova.tests.api.openstack import fakes
@@ -67,6 +68,14 @@ def stub_max_server_metadata():
return metadata
+def return_server(context, server_id):
+ return {'id': server_id}
+
+
+def return_server_nonexistant(context, server_id):
+ raise exception.InstanceNotFound()
+
+
class ServerMetaDataTest(unittest.TestCase):
def setUp(self):
@@ -76,6 +85,7 @@ class ServerMetaDataTest(unittest.TestCase):
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
def tearDown(self):
self.stubs.UnsetAll()
@@ -89,8 +99,16 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
+ def test_index_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
@@ -99,6 +117,7 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual(0, len(res_dict['metadata']))
def test_show(self):
@@ -109,15 +128,22 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
+ def test_show_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_show_meta_not_found(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key6')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
@@ -129,6 +155,14 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
+ def test_delete_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -138,10 +172,31 @@ class ServerMetaDataTest(unittest.TestCase):
req.body = '{"metadata": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_create_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/100/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -152,9 +207,30 @@ class ServerMetaDataTest(unittest.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
+ def test_update_item_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/asdf/100/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 529ca83c5..8357df594 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -31,7 +31,7 @@ from nova import test
from nova import utils
import nova.api.openstack
from nova.api.openstack import servers
-from nova.api.openstack import create_instance_controller
+from nova.api.openstack import create_instance_helper
import nova.compute.api
from nova.compute import instance_types
from nova.compute import power_state
@@ -570,8 +570,7 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.stubs.Set(
- nova.api.openstack.create_instance_controller.\
- OpenstackCreateInstanceController,
+ nova.api.openstack.create_instance_helper.CreateInstanceHelper,
"_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
@@ -1531,7 +1530,7 @@ class ServersTest(test.TestCase):
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
def setUp(self):
- self.deserializer = create_instance_controller.ServerXMLDeserializer()
+ self.deserializer = create_instance_helper.ServerXMLDeserializer()
def test_minimal_request(self):
serial_request = """
@@ -1863,7 +1862,8 @@ class TestServerInstanceCreation(test.TestCase):
compute_api = MockComputeAPI()
self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
- self.stubs.Set(nova.api.openstack.servers.Controller,
+ self.stubs.Set(
+ nova.api.openstack.create_instance_helper.CreateInstanceHelper,
'_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
return compute_api
@@ -2119,6 +2119,6 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
@staticmethod
def _get_k_r(image_meta):
"""Rebinding function to a shorter name for convenience"""
- kernel_id, ramdisk_id = \
- servers.Controller._do_get_kernel_ramdisk_from_image(image_meta)
+ kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \
+ _do_get_kernel_ramdisk_from_image(image_meta)
return kernel_id, ramdisk_id
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index ebbdc9409..2fa50ac9b 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -89,6 +89,12 @@ class DictSerializerTest(test.TestCase):
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
+ def test_dispatch_action_None(self):
+ serializer = wsgi.DictSerializer()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.serialize({}, None), 'trousers')
+
class XMLDictSerializerTest(test.TestCase):
def test_xml(self):
@@ -123,6 +129,12 @@ class TextDeserializerTest(test.TestCase):
deserializer.default = lambda x: 'trousers'
self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
+ def test_dispatch_action_None(self):
+ deserializer = wsgi.TextDeserializer()
+ deserializer.create = lambda x: 'pants'
+ deserializer.default = lambda x: 'trousers'
+ self.assertEqual(deserializer.deserialize({}, None), 'trousers')
+
class JSONDeserializerTest(test.TestCase):
def test_json(self):
@@ -171,11 +183,11 @@ class XMLDeserializerTest(test.TestCase):
class ResponseSerializerTest(test.TestCase):
def setUp(self):
class JSONSerializer(object):
- def serialize(self, data):
+ def serialize(self, data, action='default'):
return 'pew_json'
class XMLSerializer(object):
- def serialize(self, data):
+ def serialize(self, data, action='default'):
return 'pew_xml'
self.serializers = {
@@ -211,11 +223,11 @@ class ResponseSerializerTest(test.TestCase):
class RequestDeserializerTest(test.TestCase):
def setUp(self):
class JSONDeserializer(object):
- def deserialize(self, data):
+ def deserialize(self, data, action='default'):
return 'pew_json'
class XMLDeserializer(object):
- def deserialize(self, data):
+ def deserialize(self, data, action='default'):
return 'pew_xml'
self.deserializers = {
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index ecefc464a..2297d2f0e 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True)
-FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
+FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
index eb9a3056e..76c03c5fa 100644
--- a/nova/tests/integrated/api/client.py
+++ b/nova/tests/integrated/api/client.py
@@ -221,30 +221,30 @@ class TestOpenStackClient(object):
return self.api_delete('/flavors/%s' % flavor_id)
def get_volume(self, volume_id):
- return self.api_get('/volumes/%s' % volume_id)['volume']
+ return self.api_get('/os-volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
- rel_url = '/volumes/detail' if detail else '/volumes'
+ rel_url = '/os-volumes/detail' if detail else '/os-volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
- return self.api_post('/volumes', volume)['volume']
+ return self.api_post('/os-volumes', volume)['volume']
def delete_volume(self, volume_id):
- return self.api_delete('/volumes/%s' % volume_id)
+ return self.api_delete('/os-volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
- return self.api_get('/servers/%s/volume_attachments/%s' %
+ return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))['volumeAttachment']
def get_server_volumes(self, server_id):
- return self.api_get('/servers/%s/volume_attachments' %
+ return self.api_get('/servers/%s/os-volume_attachments' %
(server_id))['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
- return self.api_post('/servers/%s/volume_attachments' %
+ return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment)['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
- return self.api_delete('/servers/%s/volume_attachments/%s' %
+ return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py
index 07817cc5a..10eafde08 100644
--- a/nova/tests/scheduler/test_host_filter.py
+++ b/nova/tests/scheduler/test_host_filter.py
@@ -133,11 +133,11 @@ class HostFilterTestCase(test.TestCase):
raw = ['or',
['and',
['<', '$compute.host_memory_free', 30],
- ['<', '$compute.disk_available', 300]
+ ['<', '$compute.disk_available', 300],
],
['and',
['>', '$compute.host_memory_free', 70],
- ['>', '$compute.disk_available', 700]
+ ['>', '$compute.disk_available', 700],
]
]
cooked = json.dumps(raw)
@@ -183,12 +183,12 @@ class HostFilterTestCase(test.TestCase):
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
- ['not', True, False, True, False]
+ ['not', True, False, True, False],
)))
try:
hf.filter_hosts(self.zone_manager, json.dumps(
- 'not', True, False, True, False
+ 'not', True, False, True, False,
))
self.fail("Should give KeyError")
except KeyError, e:
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index 506fa62fb..9a5318aee 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -44,7 +44,7 @@ class WeightedSumTestCase(test.TestCase):
hosts = [
FakeHost(1, 512 * MB, 100),
FakeHost(2, 256 * MB, 400),
- FakeHost(3, 512 * MB, 100)
+ FakeHost(3, 512 * MB, 100),
]
weighted_fns = [
@@ -96,7 +96,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
def test_noop_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
- 'nova.scheduler.least_cost.noop_cost_fn'
+ 'nova.scheduler.least_cost.noop_cost_fn',
]
FLAGS.noop_cost_fn_weight = 1
@@ -110,7 +110,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
def test_cost_fn_weights(self):
FLAGS.least_cost_scheduler_cost_functions = [
- 'nova.scheduler.least_cost.noop_cost_fn'
+ 'nova.scheduler.least_cost.noop_cost_fn',
]
FLAGS.noop_cost_fn_weight = 2
@@ -124,7 +124,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
def test_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
- 'nova.scheduler.least_cost.fill_first_cost_fn'
+ 'nova.scheduler.least_cost.fill_first_cost_fn',
]
FLAGS.fill_first_cost_fn_weight = 1
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
index 1cbc914ef..11f55c11f 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -16,7 +16,10 @@
Tests For Zone Aware Scheduler.
"""
-from nova import db
+import stubout
+
+import nova.db
+
from nova import exception
from nova import test
from nova.scheduler import driver
@@ -88,7 +91,7 @@ class FakeEmptyZoneManager(zone_manager.ZoneManager):
self.service_states = {}
-def fake_empty_call_zone_method(context, method, specs):
+def fake_empty_call_zone_method(context, method, specs, zones):
return []
@@ -127,7 +130,7 @@ def fake_decrypt_blob_returns_child_info(blob):
'child_blob': True} # values aren't important. Keys are.
-def fake_call_zone_method(context, method, specs):
+def fake_call_zone_method(context, method, specs, zones):
return [
('zone1', [
dict(weight=1, blob='AAAAAAA'),
@@ -150,9 +153,31 @@ def fake_call_zone_method(context, method, specs):
]
+def fake_zone_get_all(context):
+ return [
+ dict(id=1, api_url='zone1',
+ username='admin', password='password',
+ weight_offset=0.0, weight_scale=1.0),
+ dict(id=2, api_url='zone2',
+ username='admin', password='password',
+ weight_offset=1000.0, weight_scale=1.0),
+ dict(id=3, api_url='zone3',
+ username='admin', password='password',
+ weight_offset=2000.0, weight_scale=1.0),
+ ]
+
+
class ZoneAwareSchedulerTestCase(test.TestCase):
"""Test case for Zone Aware Scheduler."""
+ def setUp(self):
+ super(ZoneAwareSchedulerTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(ZoneAwareSchedulerTestCase, self).tearDown()
+
def test_zone_aware_scheduler(self):
"""
Create a nested set of FakeZones, ensure that a select call returns the
@@ -160,6 +185,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeZoneManager()
sched.set_zone_manager(zm)
@@ -179,6 +205,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm)
@@ -202,7 +229,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
'instance_properties': {},
'instance_type': {},
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
- 'blob': "Non-None blob data"
+ 'blob': "Non-None blob data",
}
result = sched.schedule_run_instance(None, 1, request_spec)
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 7c0331eff..20b20fcbf 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -89,7 +89,7 @@ class FakeHttplibConnection(object):
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
- conv = apirequest._try_convert
+ conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('False'), False)
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 13046f861..6327734f5 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -35,7 +35,7 @@ from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
-from nova.image import local
+from nova.image import fake
FLAGS = flags.FLAGS
@@ -56,6 +56,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
+ self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
@@ -69,8 +70,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast
@@ -303,7 +304,7 @@ class CloudTestCase(test.TestCase):
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
- self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
@@ -317,8 +318,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual(2, len(result3['imagesSet']))
# provide an non-existing image_id
self.stubs.UnsetAll()
- self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
@@ -329,8 +330,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True}
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
@@ -345,9 +346,9 @@ class CloudTestCase(test.TestCase):
def fake_update(meh, context, image_id, metadata, data=None):
return metadata
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
- self.stubs.Set(local.LocalImageService, 'update', fake_update)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
@@ -359,7 +360,7 @@ class CloudTestCase(test.TestCase):
def fake_delete(self, context, id):
return None
- self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertEqual(result['imageId'], 'ami-00000001')
@@ -369,18 +370,25 @@ class CloudTestCase(test.TestCase):
def fake_detail_empty(self, context):
return []
- self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
- def test_console_output(self):
- instance_type = FLAGS.default_instance_type
- max_count = 1
- kwargs = {'image_id': 'ami-1',
- 'instance_type': instance_type,
- 'max_count': max_count}
+ def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def _run_instance_wait(self, **kwargs):
+ ec2_instance_id = self._run_instance(**kwargs)
+ self._wait_for_running(ec2_instance_id)
+ return ec2_instance_id
+
+ def test_console_output(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=FLAGS.default_instance_type,
+ max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
@@ -389,9 +397,7 @@ class CloudTestCase(test.TestCase):
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_ajax_console(self):
- kwargs = {'image_id': 'ami-1'}
- rv = self.cloud.run_instances(self.context, **kwargs)
- instance_id = rv['instancesSet'][0]['instanceId']
+ instance_id = self._run_instance(image_id='ami-1')
output = self.cloud.get_ajax_console(context=self.context,
instance_id=[instance_id])
self.assertEquals(output['url'],
@@ -457,6 +463,12 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
+ # stub out the rpc call
+ def stub_cast(*args, **kwargs):
+ pass
+
+ self.stubs.Set(rpc, 'cast', stub_cast)
+
kwargs = {'image_id': FLAGS.default_image,
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
@@ -466,7 +478,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['displayName'], 'Server 1')
self.assertEqual(instance['instanceId'], 'i-00000001')
- self.assertEqual(instance['instanceState']['name'], 'networking')
+ self.assertEqual(instance['instanceState']['name'], 'scheduling')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_image_state_none(self):
@@ -480,7 +492,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}}
self.stubs.UnsetAll()
- self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@@ -495,7 +507,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
- self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@@ -509,7 +521,7 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'status': 'active'}
- self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
@@ -538,7 +550,9 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {})
- self.cloud.update_instance(self.context, inst['id'],
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
+ self.cloud.update_instance(self.context, ec2_id,
+ display_name='c00l 1m4g3',
mac_address='DE:AD:BE:EF')
inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address'])
@@ -561,3 +575,299 @@ class CloudTestCase(test.TestCase):
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
db.volume_destroy(self.context, vol['id'])
+
+ def _restart_compute_service(self, periodic_interval=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval:
+ self.compute = self.start_service(
+ 'compute', periodic_interval=periodic_interval)
+ else:
+ self.compute = self.start_service('compute')
+
+ def _wait_for_state(self, ctxt, instance_id, predicate):
+ """Wait for an stopping instance to be a given state"""
+ id = ec2utils.ec2_id_to_id(instance_id)
+ while True:
+ info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
+ LOG.debug(info)
+ if predicate(info):
+ break
+ greenthread.sleep(1)
+
+ def _wait_for_running(self, instance_id):
+ def is_running(info):
+ return info['state_description'] == 'running'
+ self._wait_for_state(self.context, instance_id, is_running)
+
+ def _wait_for_stopped(self, instance_id):
+ def is_stopped(info):
+ return info['state_description'] == 'stopped'
+ self._wait_for_state(self.context, instance_id, is_stopped)
+
+ def _wait_for_terminate(self, instance_id):
+ def is_deleted(info):
+ return info['deleted']
+ elevated = self.context.elevated(read_deleted=True)
+ self._wait_for_state(elevated, instance_id, is_deleted)
+
+ def test_stop_start_instance(self):
+ """Makes sure stop/start instance works"""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1, }
+ instance_id = self._run_instance_wait(**kwargs)
+
+ # a running instance can't be started. It is just ignored.
+ result = self.cloud.start_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+ self._wait_for_stopped(instance_id)
+
+ result = self.cloud.start_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+ self._wait_for_running(instance_id)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+ self._wait_for_stopped(instance_id)
+
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+
+ self._restart_compute_service()
+
+ def _volume_create(self):
+ kwargs = {'status': 'available',
+ 'host': self.volume.host,
+ 'size': 1,
+ 'attach_status': 'detached', }
+ return db.volume_create(self.context, kwargs)
+
+ def _assert_volume_attached(self, vol, instance_id, mountpoint):
+ self.assertEqual(vol['instance_id'], instance_id)
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ def _assert_volume_detached(self, vol):
+ self.assertEqual(vol['instance_id'], None)
+ self.assertEqual(vol['mountpoint'], None)
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+
+ def test_stop_start_with_volume(self):
+ """Make sure run instance with block device mapping works"""
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ vol1 = self._volume_create()
+ vol2 = self._volume_create()
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'volume_id': vol1['id'],
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'volume_id': vol2['id'],
+ 'delete_on_termination': True, },
+ ]}
+ ec2_instance_id = self._run_instance_wait(**kwargs)
+ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
+
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
+
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdb')
+
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdc')
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+ self._wait_for_stopped(ec2_instance_id)
+
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_detached(vol)
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_detached(vol)
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ self._wait_for_running(ec2_instance_id)
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
+ self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
+ vol['mountpoint'] == '/dev/vdc')
+ self.assertEqual(vol['instance_id'], instance_id)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ greenthread.sleep(0.3)
+
+ admin_ctxt = context.get_admin_context(read_deleted=False)
+ vol = db.volume_get(admin_ctxt, vol1['id'])
+ self.assertFalse(vol['deleted'])
+ db.volume_destroy(self.context, vol1['id'])
+
+ greenthread.sleep(0.3)
+ admin_ctxt = context.get_admin_context(read_deleted=True)
+ vol = db.volume_get(admin_ctxt, vol2['id'])
+ self.assertTrue(vol['deleted'])
+
+ self._restart_compute_service()
+
+ def test_stop_with_attached_volume(self):
+ """Make sure attach info is reflected to block device mapping"""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ vol1 = self._volume_create()
+ vol2 = self._volume_create()
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'volume_id': vol1['id'],
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance_wait(**kwargs)
+ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
+
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol1['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdb')
+
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_detached(vol)
+
+ self.cloud.compute_api.attach_volume(self.context,
+ instance_id=instance_id,
+ volume_id=vol2['id'],
+ device='/dev/vdc')
+ greenthread.sleep(0.3)
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdc')
+
+ self.cloud.compute_api.detach_volume(self.context,
+ volume_id=vol1['id'])
+ greenthread.sleep(0.3)
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_detached(vol)
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+ self._wait_for_stopped(ec2_instance_id)
+
+ for vol_id in (vol1['id'], vol2['id']):
+ vol = db.volume_get(self.context, vol_id)
+ self._assert_volume_detached(vol)
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ self._wait_for_running(ec2_instance_id)
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol2['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdc')
+
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_detached(vol)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ greenthread.sleep(0.3)
+
+ for vol_id in (vol1['id'], vol2['id']):
+ vol = db.volume_get(self.context, vol_id)
+ self.assertEqual(vol['id'], vol_id)
+ self._assert_volume_detached(vol)
+ db.volume_destroy(self.context, vol_id)
+
+ self._restart_compute_service()
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ greenthread.sleep(0.3)
+ return result['snapshotId']
+
+ def test_run_with_snapshot(self):
+ """Makes sure run/stop/start instance with snapshot works."""
+ vol = self._volume_create()
+ ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
+
+ ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
+ snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
+ ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
+ snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'snapshot_id': snapshot1_id,
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'snapshot_id': snapshot2_id,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance_wait(**kwargs)
+ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
+
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 2)
+ vol1_id = None
+ vol2_id = None
+ for vol in vols:
+ snapshot_id = vol['snapshot_id']
+ if snapshot_id == snapshot1_id:
+ vol1_id = vol['id']
+ mountpoint = '/dev/vdb'
+ elif snapshot_id == snapshot2_id:
+ vol2_id = vol['id']
+ mountpoint = '/dev/vdc'
+ else:
+ self.fail()
+
+ self._assert_volume_attached(vol, instance_id, mountpoint)
+
+ self.assertTrue(vol1_id)
+ self.assertTrue(vol2_id)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ greenthread.sleep(0.3)
+ self._wait_for_terminate(ec2_instance_id)
+
+ greenthread.sleep(0.3)
+ admin_ctxt = context.get_admin_context(read_deleted=False)
+ vol = db.volume_get(admin_ctxt, vol1_id)
+ self._assert_volume_detached(vol)
+ self.assertFalse(vol['deleted'])
+ db.volume_destroy(self.context, vol1_id)
+
+ greenthread.sleep(0.3)
+ admin_ctxt = context.get_admin_context(read_deleted=True)
+ vol = db.volume_get(admin_ctxt, vol2_id)
+ self.assertTrue(vol['deleted'])
+
+ for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
+ self.cloud.delete_snapshot(self.context, snapshot_id)
+ greenthread.sleep(0.3)
+ db.volume_destroy(self.context, vol['id'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index b4ac2dbc4..2fa4c7278 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -22,21 +22,21 @@ Tests For Compute
import mox
import stubout
+from nova.auth import manager
from nova import compute
+from nova.compute import instance_types
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
from nova import context
from nova import db
+from nova.db.sqlalchemy import models
from nova import exception
from nova import flags
+import nova.image.fake
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
-from nova.auth import manager
-from nova.compute import instance_types
-from nova.compute import manager as compute_manager
-from nova.compute import power_state
-from nova.db.sqlalchemy import models
-from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
+ self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def tearDown(self):
self.manager.delete_user(self.user)
@@ -228,6 +228,21 @@ class ComputeTestCase(test.TestCase):
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
+ def test_stop(self):
+ """Ensure instance can be stopped"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.stop_instance(self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_start(self):
+ """Ensure instance can be started"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.stop_instance(self.context, instance_id)
+ self.compute.start_instance(self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_pause(self):
"""Ensure instance can be paused"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
index 945d78794..6c25b396e 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/test_crypto.py
@@ -16,7 +16,11 @@
Tests for Crypto module.
"""
+import mox
+import stubout
+
from nova import crypto
+from nova import db
from nova import test
@@ -46,3 +50,82 @@ class SymmetricKeyTestCase(test.TestCase):
plain = decrypt(cipher_text)
self.assertEquals(plain_text, plain)
+
+
+class RevokeCertsTest(test.TestCase):
+
+ def setUp(self):
+ super(RevokeCertsTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(RevokeCertsTest, self).tearDown()
+
+ def test_revoke_certs_by_user_and_project(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_user_and_project(context,
+ user_id,
+ project_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
+ mock_certificate_get_all_by_user_and_project)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, file_name)
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_user_and_project(user_id, project_id)
+
+ self.mox.VerifyAll()
+
+ def test_revoke_certs_by_user(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_user(context, user_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_user',
+ mock_certificate_get_all_by_user)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_user(user_id)
+
+ self.mox.VerifyAll()
+
+ def test_revoke_certs_by_project(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_project(context, project_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_project',
+ mock_certificate_get_all_by_project)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_project(project_id)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 3a175b106..c0213250a 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -33,6 +33,7 @@ from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
+from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
@@ -228,6 +229,23 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
+ def test_instance_snapshot_fails_with_no_primary_vdi(self):
+ def create_bad_vbd(vm_ref, vdi_ref):
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'userdevice': 'fake',
+ 'currently_attached': False}
+ vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
+ xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+ self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
+ stubs.stubout_instance_snapshot(self.stubs)
+ instance = self._create_instance()
+
+ name = "MySnapshot"
+ self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
+
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
@@ -331,7 +349,7 @@ class XenAPIVMTestCase(test.TestCase):
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
- self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
+ self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index eb9626d08..6341e81d2 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -61,7 +61,7 @@ class ComputeDriver(object):
"""Return a list of InstanceInfo for all registered VMs"""
raise NotImplementedError()
- def spawn(self, instance, network_info=None):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Launch a VM for the specified instance"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 0225797d7..3a65fec8b 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -129,7 +129,7 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -237,6 +237,10 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ pass
+
def migrate_disk_and_power_off(self, instance, dest):
"""
Transfers the disk of a running instance in multiple phases, turning
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 05b4775c1..772e7eb59 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 20986d4d5..e1a683da8 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -67,11 +67,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#else
+ #if not ($getVar('ebs_root', False))
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
+ #end if
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
@@ -79,6 +81,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
+ #for $vol in $volumes
+ <disk type='block'>
+ <driver type='raw'/>
+ <source dev='${vol.device_path}'/>
+ <target dev='${vol.mount_device}' bus='${disk_bus}'/>
+ </disk>
+ #end for
#end if
#end if
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 98cdff311..96ef92825 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -40,6 +40,7 @@ import hashlib
import multiprocessing
import os
import random
+import re
import shutil
import subprocess
import sys
@@ -148,6 +149,10 @@ def _late_load_cheetah():
Template = t.Template
+def _strip_dev(mount_path):
+ return re.sub(r'^/dev/', '', mount_path)
+
+
class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
@@ -575,11 +580,14 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception
- def spawn(self, instance, network_info=None):
- xml = self.to_xml(instance, False, network_info)
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
+ xml = self.to_xml(instance, False, network_info=network_info,
+ block_device_mapping=block_device_mapping)
+ block_device_mapping = block_device_mapping or []
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info=network_info)
+ self._create_image(instance, xml, network_info=network_info,
+ block_device_mapping=block_device_mapping)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -761,7 +769,8 @@ class LibvirtConnection(driver.ComputeDriver):
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
- network_info=None):
+ network_info=None, block_device_mapping=None):
+ block_device_mapping = block_device_mapping or []
if not network_info:
network_info = netutils.get_network_info(inst)
@@ -824,16 +833,19 @@ class LibvirtConnection(driver.ComputeDriver):
size = None
root_fname += "_sm"
- self._cache_image(fn=self._fetch_image,
- target=basepath('disk'),
- fname=root_fname,
- cow=FLAGS.use_cow_images,
- image_id=disk_images['image_id'],
- user=user,
- project=project,
- size=size)
+ if not self._volume_in_mapping(self.root_mount_device,
+ block_device_mapping):
+ self._cache_image(fn=self._fetch_image,
+ target=basepath('disk'),
+ fname=root_fname,
+ cow=FLAGS.use_cow_images,
+ image_id=disk_images['image_id'],
+ user=user,
+ project=project,
+ size=size)
- if inst_type['local_gb']:
+ if inst_type['local_gb'] and not self._volume_in_mapping(
+ self.local_mount_device, block_device_mapping):
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
fname="local_%s" % inst_type['local_gb'],
@@ -948,7 +960,20 @@ class LibvirtConnection(driver.ComputeDriver):
return result
- def _prepare_xml_info(self, instance, rescue=False, network_info=None):
+ root_mount_device = 'vda' # FIXME for now. it's hard coded.
+ local_mount_device = 'vdb' # FIXME for now. it's hard coded.
+
+ def _volume_in_mapping(self, mount_device, block_device_mapping):
+ mount_device_ = _strip_dev(mount_device)
+ for vol in block_device_mapping:
+ vol_mount_device = _strip_dev(vol['mount_device'])
+ if vol_mount_device == mount_device_:
+ return True
+ return False
+
+ def _prepare_xml_info(self, instance, rescue=False, network_info=None,
+ block_device_mapping=None):
+ block_device_mapping = block_device_mapping or []
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -966,6 +991,16 @@ class LibvirtConnection(driver.ComputeDriver):
else:
driver_type = 'raw'
+ for vol in block_device_mapping:
+ vol['mount_device'] = _strip_dev(vol['mount_device'])
+ ebs_root = self._volume_in_mapping(self.root_mount_device,
+ block_device_mapping)
+ if self._volume_in_mapping(self.local_mount_device,
+ block_device_mapping):
+ local_gb = False
+ else:
+ local_gb = inst_type['local_gb']
+
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@@ -973,9 +1008,11 @@ class LibvirtConnection(driver.ComputeDriver):
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
'rescue': rescue,
- 'local': inst_type['local_gb'],
+ 'local': local_gb,
'driver_type': driver_type,
- 'nics': nics}
+ 'nics': nics,
+ 'ebs_root': ebs_root,
+ 'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc':
@@ -991,10 +1028,13 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['disk'] = xml_info['basepath'] + "/disk"
return xml_info
- def to_xml(self, instance, rescue=False, network_info=None):
+ def to_xml(self, instance, rescue=False, network_info=None,
+ block_device_mapping=None):
+ block_device_mapping = block_device_mapping or []
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- xml_info = self._prepare_xml_info(instance, rescue, network_info)
+ xml_info = self._prepare_xml_info(instance, rescue, network_info,
+ block_device_mapping)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 48edc5384..70adba74f 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -90,8 +90,6 @@ def fetch_image(image, instance, **kwargs):
func = _get_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _get_s3_image
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- func = _get_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@@ -105,8 +103,6 @@ def upload_image(image, instance, **kwargs):
func = _put_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _put_s3_image
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- func = _put_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@@ -192,8 +188,6 @@ def get_vmdk_size_and_properties(image, instance):
size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- raise NotImplementedError
LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
locals())
return size, properties
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 1c6d2572d..3c6345ec8 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -124,7 +124,7 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(instance)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 113198689..d5ac39473 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -146,6 +146,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
def create_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
+ 'userdevice': '0',
'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index b9d4346e4..f91958c57 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -157,7 +157,6 @@ class VMHelper(HelperBase):
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
- rec['PV_args'] = 'clocksource=jiffies'
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
@@ -284,19 +283,16 @@ class VMHelper(HelperBase):
@classmethod
def get_vdi_for_vm_safely(cls, session, vm_ref):
- vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
- if vdi_refs is None:
- raise Exception(_("No VDIs found for VM %s") % vm_ref)
- else:
- num_vdis = len(vdi_refs)
- if num_vdis != 1:
- raise Exception(
- _("Unexpected number of VDIs (%(num_vdis)s) found"
- " for VM %(vm_ref)s") % locals())
-
- vdi_ref = vdi_refs[0]
- vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
- return vdi_ref, vdi_rec
+ """Retrieves the primary VDI for a VM"""
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ for vbd in vbd_refs:
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ # Convention dictates the primary VDI will be userdevice 0
+ if vbd_rec['userdevice'] == '0':
+ vdi_rec = session.get_xenapi().VDI.get_record(vbd_rec['VDI'])
+ return vbd_rec['VDI'], vdi_rec
+ raise exception.Error(_("No primary VDI found for"
+ "%(vm_ref)s") % locals())
@classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
@@ -330,12 +326,6 @@ class VMHelper(HelperBase):
return template_vm_ref, template_vdi_uuids
@classmethod
- def get_sr(cls, session, sr_label='slices'):
- """Finds the SR named by the given name label and returns
- the UUID"""
- return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
-
- @classmethod
def get_sr_path(cls, session):
"""Return the path to our storage repository
@@ -790,8 +780,7 @@ class VMHelper(HelperBase):
@classmethod
def scan_default_sr(cls, session):
"""Looks for the system default SR and triggers a re-scan"""
- #FIXME(sirp/mdietz): refactor scan_default_sr in there
- sr_ref = cls.get_sr(session)
+ sr_ref = find_sr(session)
session.call_xenapi('SR.scan', sr_ref)
@@ -883,7 +872,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
- raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
+ raise exception.Exception(_("Unexpected number of VDIs"
+ "(%(num_vdis)s) found"
" for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index c6d2b0936..d105cf300 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -160,9 +160,24 @@ class VMOps(object):
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdis[0]['vdi_uuid'])
- use_pv_kernel = VMHelper.determine_is_pv(self._session,
- instance.id, first_vdi_ref, disk_image_type,
- instance.os_type)
+
+ vm_mode = instance.vm_mode and instance.vm_mode.lower()
+ if vm_mode == 'pv':
+ use_pv_kernel = True
+ elif vm_mode in ('hv', 'hvm'):
+ use_pv_kernel = False
+ vm_mode = 'hvm' # Normalize
+ else:
+ use_pv_kernel = VMHelper.determine_is_pv(self._session,
+ instance.id, first_vdi_ref, disk_image_type,
+ instance.os_type)
+ vm_mode = use_pv_kernel and 'pv' or 'hvm'
+
+ if instance.vm_mode != vm_mode:
+ # Update database with normalized (or determined) value
+ db.instance_update(context.get_admin_context(),
+ instance['id'], {'vm_mode': vm_mode})
+
vm_ref = VMHelper.create_vm(self._session, instance,
kernel, ramdisk, use_pv_kernel)
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 6d828e109..5fcec1715 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -194,7 +194,7 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance"""
self._vmops.spawn(instance)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index b07f2e94b..7d27abff9 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -21,6 +21,9 @@ Handles all requests relating to volumes.
"""
+from eventlet import greenthread
+
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -44,7 +47,8 @@ class API(base.Base):
if snapshot['status'] != "available":
raise exception.ApiError(
_("Snapshot status must be available"))
- size = snapshot['volume_size']
+ if not size:
+ size = snapshot['volume_size']
if quota.allowed_volumes(context, 1, size) < 1:
pid = context.project_id
@@ -73,6 +77,14 @@ class API(base.Base):
"snapshot_id": snapshot_id}})
return volume
+ # TODO(yamahata): eliminate dumb polling
+ def wait_creation(self, context, volume_id):
+ while True:
+ volume = self.get(context, volume_id)
+ if volume['status'] != 'creating':
+ return
+ greenthread.sleep(1)
+
def delete(self, context, volume_id):
volume = self.get(context, volume_id)
if volume['status'] != "available":
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 87e13277f..23e845deb 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -582,6 +582,14 @@ class FakeISCSIDriver(ISCSIDriver):
"""No setup necessary in fake mode."""
pass
+ def discover_volume(self, context, volume):
+ """Discover volume on a remote host."""
+ return "/dev/disk/by-path/volume-id-%d" % volume['id']
+
+ def undiscover_volume(self, volume):
+ """Undiscover volume on a remote host."""
+ pass
+
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""