summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLorin Hochstein <lorin@isi.edu>2011-06-22 23:34:56 -0400
committerLorin Hochstein <lorin@isi.edu>2011-06-22 23:34:56 -0400
commitdf44068a801aba373e8896bba235f2abca4e4c8a (patch)
tree942f208195d995b678a151986ad3d25932f1c2b1
parent6afcabac7442aa2e3944a3fef3d3452c189c1901 (diff)
parent6d6720e9b7e52461238ece684c9acc7183673bb8 (diff)
downloadnova-df44068a801aba373e8896bba235f2abca4e4c8a.tar.gz
nova-df44068a801aba373e8896bba235f2abca4e4c8a.tar.xz
nova-df44068a801aba373e8896bba235f2abca4e4c8a.zip
Merged from trunk
-rwxr-xr-xbin/nova-manage78
-rw-r--r--nova/api/ec2/apirequest.py78
-rw-r--r--nova/api/ec2/cloud.py61
-rw-r--r--nova/api/ec2/ec2utils.py94
-rw-r--r--nova/api/openstack/__init__.py3
-rw-r--r--nova/api/openstack/contrib/volumes.py6
-rw-r--r--nova/api/openstack/create_instance_helper.py19
-rw-r--r--nova/api/openstack/extensions.py13
-rw-r--r--nova/api/openstack/ips.py19
-rw-r--r--nova/api/openstack/notes.txt3
-rw-r--r--nova/api/openstack/server_metadata.py38
-rw-r--r--nova/api/openstack/servers.py8
-rw-r--r--nova/api/openstack/views/servers.py3
-rw-r--r--nova/api/openstack/wsgi.py8
-rw-r--r--nova/compute/api.py118
-rw-r--r--nova/compute/manager.py248
-rw-r--r--nova/compute/utils.py29
-rw-r--r--nova/db/api.py70
-rw-r--r--nova/db/sqlalchemy/api.py211
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py43
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py73
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/027_add_instance_type_extra_specs.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py)0
-rw-r--r--nova/db/sqlalchemy/models.py55
-rw-r--r--nova/flags.py4
-rw-r--r--nova/image/fake.py8
-rw-r--r--nova/image/local.py167
-rw-r--r--nova/scheduler/api.py103
-rw-r--r--nova/scheduler/simple.py8
-rw-r--r--nova/tests/api/openstack/fakes.py1
-rw-r--r--nova/tests/api/openstack/test_extensions.py13
-rw-r--r--nova/tests/api/openstack/test_images.py30
-rw-r--r--nova/tests/api/openstack/test_limits.py3
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py55
-rw-r--r--nova/tests/api/openstack/test_servers.py76
-rw-r--r--nova/tests/api/openstack/test_wsgi.py4
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/integrated/api/client.py16
-rw-r--r--nova/tests/scheduler/test_scheduler.py32
-rw-r--r--nova/tests/test_api.py2
-rw-r--r--nova/tests/test_cloud.py356
-rw-r--r--nova/tests/test_compute.py37
-rw-r--r--nova/tests/test_utils.py18
-rw-r--r--nova/tests/test_xenapi.py74
-rw-r--r--nova/utils.py15
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/fake.py21
-rw-r--r--nova/virt/hyperv.py2
-rw-r--r--nova/virt/libvirt.xml.template9
-rw-r--r--nova/virt/libvirt/connection.py76
-rw-r--r--nova/virt/vmwareapi/vmware_images.py6
-rw-r--r--nova/virt/vmwareapi_conn.py2
-rw-r--r--nova/virt/xenapi/fake.py1
-rw-r--r--nova/virt/xenapi/vm_utils.py35
-rw-r--r--nova/virt/xenapi/vmops.py72
-rw-r--r--nova/virt/xenapi_conn.py2
-rw-r--r--nova/volume/api.py14
-rw-r--r--nova/volume/driver.py8
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/agent36
-rw-r--r--run_tests.py6
-rw-r--r--tools/pip-requires2
61 files changed, 2092 insertions, 595 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 0147ae21b..000e834f0 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -97,7 +97,6 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('gateway_v6', 'nova.network.manager')
-flags.DECLARE('images_path', 'nova.image.local')
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
@@ -874,7 +873,7 @@ class InstanceTypeCommands(object):
try:
instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap)
- except exception.InvalidInputException:
+ except exception.InvalidInput:
print "Must supply valid parameters to create instance_type"
print e
sys.exit(1)
@@ -1056,16 +1055,6 @@ class ImageCommands(object):
machine_images = {}
other_images = {}
directory = os.path.abspath(directory)
- # NOTE(vish): If we're importing from the images path dir, attempt
- # to move the files out of the way before importing
- # so we aren't writing to the same directory. This
- # may fail if the dir was a mointpoint.
- if (FLAGS.image_service == 'nova.image.local.LocalImageService'
- and directory == os.path.abspath(FLAGS.images_path)):
- new_dir = "%s_bak" % directory
- os.rename(directory, new_dir)
- os.mkdir(directory)
- directory = new_dir
for fn in glob.glob("%s/*/info.json" % directory):
try:
image_path = os.path.join(fn.rpartition('/')[0], 'image')
@@ -1082,6 +1071,70 @@ class ImageCommands(object):
self._convert_images(machine_images)
+class AgentBuildCommands(object):
+ """Class for managing agent builds."""
+
+ def create(self, os, architecture, version, url, md5hash,
+ hypervisor='xen'):
+ """Creates a new agent build.
+ arguments: os architecture version url md5hash [hypervisor='xen']"""
+ ctxt = context.get_admin_context()
+ agent_build = db.agent_build_create(ctxt,
+ {'hypervisor': hypervisor,
+ 'os': os,
+ 'architecture': architecture,
+ 'version': version,
+ 'url': url,
+ 'md5hash': md5hash})
+
+ def delete(self, os, architecture, hypervisor='xen'):
+ """Deletes an existing agent build.
+ arguments: os architecture [hypervisor='xen']"""
+ ctxt = context.get_admin_context()
+ agent_build_ref = db.agent_build_get_by_triple(ctxt,
+ hypervisor, os, architecture)
+ db.agent_build_destroy(ctxt, agent_build_ref['id'])
+
+ def list(self, hypervisor=None):
+ """Lists all agent builds.
+ arguments: <none>"""
+ fmt = "%-10s %-8s %12s %s"
+ ctxt = context.get_admin_context()
+ by_hypervisor = {}
+ for agent_build in db.agent_build_get_all(ctxt):
+ buildlist = by_hypervisor.get(agent_build.hypervisor)
+ if not buildlist:
+ buildlist = by_hypervisor[agent_build.hypervisor] = []
+
+ buildlist.append(agent_build)
+
+ for key, buildlist in by_hypervisor.iteritems():
+ if hypervisor and key != hypervisor:
+ continue
+
+ print "Hypervisor: %s" % key
+ print fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32)
+ for agent_build in buildlist:
+ print fmt % (agent_build.os, agent_build.architecture,
+ agent_build.version, agent_build.md5hash)
+ print ' %s' % agent_build.url
+
+ print
+
+ def modify(self, os, architecture, version, url, md5hash,
+ hypervisor='xen'):
+ """Update an existing agent build.
+ arguments: os architecture version url md5hash [hypervisor='xen']
+ """
+ ctxt = context.get_admin_context()
+ agent_build_ref = db.agent_build_get_by_triple(ctxt,
+ hypervisor, os, architecture)
+ db.agent_build_update(ctxt, agent_build_ref['id'],
+ {'version': version,
+ 'url': url,
+ 'md5hash': md5hash})
+
+
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
@@ -1094,6 +1147,7 @@ class ConfigCommands(object):
CATEGORIES = [
('account', AccountCommands),
+ ('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
('fixed', FixedIpCommands),
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 6672e60bb..7d78c5cfa 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -21,22 +21,15 @@ APIRequest class
"""
import datetime
-import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from nova import log as logging
+from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.api.request")
-_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
-
-
-def _camelcase_to_underscore(str):
- return _c2u.sub(r'_\1', str).lower().strip('_')
-
-
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
@@ -51,59 +44,6 @@ def _database_to_isoformat(datetimeobj):
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
-def _try_convert(value):
- """Return a non-string from a string or unicode, if possible.
-
- ============= =====================================================
- When value is returns
- ============= =====================================================
- zero-length ''
- 'None' None
- 'True' True
- 'False' False
- '0', '-0' 0
- 0xN, -0xN int from hex (postitive) (N is any number)
- 0bN, -0bN int from binary (positive) (N is any number)
- * try conversion to int, float, complex, fallback value
-
- """
- if len(value) == 0:
- return ''
- if value == 'None':
- return None
- if value == 'True':
- return True
- if value == 'False':
- return False
- valueneg = value[1:] if value[0] == '-' else value
- if valueneg == '0':
- return 0
- if valueneg == '':
- return value
- if valueneg[0] == '0':
- if valueneg[1] in 'xX':
- return int(value, 16)
- elif valueneg[1] in 'bB':
- return int(value, 2)
- else:
- try:
- return int(value, 8)
- except ValueError:
- pass
- try:
- return int(value)
- except ValueError:
- pass
- try:
- return float(value)
- except ValueError:
- pass
- try:
- return complex(value)
- except ValueError:
- return value
-
-
class APIRequest(object):
def __init__(self, controller, action, version, args):
self.controller = controller
@@ -114,7 +54,7 @@ class APIRequest(object):
def invoke(self, context):
try:
method = getattr(self.controller,
- _camelcase_to_underscore(self.action))
+ ec2utils.camelcase_to_underscore(self.action))
except AttributeError:
controller = self.controller
action = self.action
@@ -125,19 +65,7 @@ class APIRequest(object):
# and reraise as 400 error.
raise Exception(_error)
- args = {}
- for key, value in self.args.items():
- parts = key.split(".")
- key = _camelcase_to_underscore(parts[0])
- if isinstance(value, str) or isinstance(value, unicode):
- # NOTE(vish): Automatically convert strings back
- # into their respective values
- value = _try_convert(value)
- if len(parts) > 1:
- d = args.get(key, {})
- d[parts[1]] = value
- value = d
- args[key] = value
+ args = ec2utils.dict_from_dotted_str(self.args.items())
for key in args.keys():
# NOTE(vish): Turn numeric dict keys into lists
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index e1c65ae40..97875f1f5 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -909,6 +909,25 @@ class CloudController(object):
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
+ for bdm in kwargs.get('block_device_mapping', []):
+ # NOTE(yamahata)
+ # BlockDevicedMapping.<N>.DeviceName
+ # BlockDevicedMapping.<N>.Ebs.SnapshotId
+ # BlockDevicedMapping.<N>.Ebs.VolumeSize
+ # BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
+ # BlockDevicedMapping.<N>.VirtualName
+ # => remove .Ebs and allow volume id in SnapshotId
+ ebs = bdm.pop('ebs', None)
+ if ebs:
+ ec2_id = ebs.pop('snapshot_id')
+ id = ec2utils.ec2_id_to_id(ec2_id)
+ if ec2_id.startswith('snap-'):
+ bdm['snapshot_id'] = id
+ elif ec2_id.startswith('vol-'):
+ bdm['volume_id'] = id
+ ebs.setdefault('delete_on_termination', True)
+ bdm.update(ebs)
+
image = self._get_image(context, kwargs['image_id'])
if image:
@@ -933,37 +952,54 @@ class CloudController(object):
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
- 'AvailabilityZone'))
+ 'AvailabilityZone'),
+ block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context,
instances[0]['reservation_id'])
+ def _do_instance(self, action, context, ec2_id):
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ action(context, instance_id=instance_id)
+
+ def _do_instances(self, action, context, instance_id):
+ for ec2_id in instance_id:
+ self._do_instance(action, context, ec2_id)
+
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
- for ec2_id in instance_id:
- instance_id = ec2utils.ec2_id_to_id(ec2_id)
- self.compute_api.delete(context, instance_id=instance_id)
+ self._do_instances(self.compute_api.delete, context, instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
- for ec2_id in instance_id:
- instance_id = ec2utils.ec2_id_to_id(ec2_id)
- self.compute_api.reboot(context, instance_id=instance_id)
+ self._do_instances(self.compute_api.reboot, context, instance_id)
+ return True
+
+ def stop_instances(self, context, instance_id, **kwargs):
+ """Stop each instances in instance_id.
+ Here instance_id is a list of instance ids"""
+ LOG.debug(_("Going to stop instances"))
+ self._do_instances(self.compute_api.stop, context, instance_id)
+ return True
+
+ def start_instances(self, context, instance_id, **kwargs):
+ """Start each instances in instance_id.
+ Here instance_id is a list of instance ids"""
+ LOG.debug(_("Going to start instances"))
+ self._do_instances(self.compute_api.start, context, instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2utils.ec2_id_to_id(instance_id)
- self.compute_api.rescue(context, instance_id=instance_id)
+ self._do_instance(self.compute_api.rescue, contect, instnace_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2utils.ec2_id_to_id(instance_id)
- self.compute_api.unrescue(context, instance_id=instance_id)
+ self._do_instance(self.compute_api.unrescue, context, instance_id)
return True
def update_instance(self, context, instance_id, **kwargs):
@@ -974,7 +1010,8 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
instance_id = ec2utils.ec2_id_to_id(instance_id)
- self.compute_api.update(context, instance_id=instance_id, **kwargs)
+ self.compute_api.update(context, instance_id=instance_id,
+ **changes)
return True
@staticmethod
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 163aa4ed2..222e1de1e 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -16,6 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import re
+
from nova import exception
@@ -30,3 +32,95 @@ def ec2_id_to_id(ec2_id):
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % instance_id
+
+
+_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
+
+
+def camelcase_to_underscore(str):
+ return _c2u.sub(r'_\1', str).lower().strip('_')
+
+
+def _try_convert(value):
+ """Return a non-string from a string or unicode, if possible.
+
+ ============= =====================================================
+ When value is returns
+ ============= =====================================================
+ zero-length ''
+ 'None' None
+ 'True' True case insensitive
+ 'False' False case insensitive
+ '0', '-0' 0
+ 0xN, -0xN int from hex (postitive) (N is any number)
+ 0bN, -0bN int from binary (positive) (N is any number)
+ * try conversion to int, float, complex, fallback value
+
+ """
+ if len(value) == 0:
+ return ''
+ if value == 'None':
+ return None
+ lowered_value = value.lower()
+ if lowered_value == 'true':
+ return True
+ if lowered_value == 'false':
+ return False
+ valueneg = value[1:] if value[0] == '-' else value
+ if valueneg == '0':
+ return 0
+ if valueneg == '':
+ return value
+ if valueneg[0] == '0':
+ if valueneg[1] in 'xX':
+ return int(value, 16)
+ elif valueneg[1] in 'bB':
+ return int(value, 2)
+ else:
+ try:
+ return int(value, 8)
+ except ValueError:
+ pass
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ try:
+ return float(value)
+ except ValueError:
+ pass
+ try:
+ return complex(value)
+ except ValueError:
+ return value
+
+
+def dict_from_dotted_str(items):
+ """parse multi dot-separated argument into dict.
+ EBS boot uses multi dot-separeted arguments like
+ BlockDeviceMapping.1.DeviceName=snap-id
+ Convert the above into
+ {'block_device_mapping': {'1': {'device_name': snap-id}}}
+ """
+ args = {}
+ for key, value in items:
+ parts = key.split(".")
+ key = camelcase_to_underscore(parts[0])
+ if isinstance(value, str) or isinstance(value, unicode):
+ # NOTE(vish): Automatically convert strings back
+ # into their respective values
+ value = _try_convert(value)
+
+ if len(parts) > 1:
+ d = args.get(key, {})
+ args[key] = d
+ for k in parts[1:-1]:
+ k = camelcase_to_underscore(k)
+ v = d.get(k, {})
+ d[k] = v
+ d = v
+ d[camelcase_to_underscore(parts[-1])] = value
+ else:
+ args[key] = value
+
+ return args
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 464ba1a9a..857a5431b 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -114,8 +114,7 @@ class APIRouter(base_wsgi.Router):
collection={'detail': 'GET',
'info': 'GET',
'select': 'POST',
- 'boot': 'POST'
- })
+ 'boot': 'POST'})
mapper.resource("console", "consoles",
controller=consoles.create_resource(),
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index feabdce89..e5e2c5b50 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -301,7 +301,7 @@ class Volumes(extensions.ExtensionDescriptor):
return "Volumes"
def get_alias(self):
- return "VOLUMES"
+ return "os-volumes"
def get_description(self):
return "Volumes support"
@@ -317,12 +317,12 @@ class Volumes(extensions.ExtensionDescriptor):
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
- res = extensions.ResourceExtension('volumes',
+ res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
- res = extensions.ResourceExtension('volume_attachments',
+ res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index fbc6318ef..436e524c1 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -94,7 +94,7 @@ class CreateInstanceHelper(object):
except Exception, e:
msg = _("Cannot find requested image %(image_href)s: %(e)s" %
locals())
- raise faults.Fault(exc.HTTPBadRequest(msg))
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
personality = body['server'].get('personality')
@@ -106,7 +106,7 @@ class CreateInstanceHelper(object):
if not 'name' in body['server']:
msg = _("Server name is not defined")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
zone_blob = body['server'].get('blob')
name = body['server']['name']
@@ -121,8 +121,7 @@ class CreateInstanceHelper(object):
extra_values = {
'instance_type': inst_type,
'image_ref': image_href,
- 'password': password
- }
+ 'password': password}
return (extra_values,
create_method(context,
@@ -138,14 +137,12 @@ class CreateInstanceHelper(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
- reservation_id=reservation_id
- )
- )
+ reservation_id=reservation_id))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
- raise faults.Fault(exc.HTTPBadRequest(msg))
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
# Let the caller deal with unhandled exceptions.
@@ -180,11 +177,11 @@ class CreateInstanceHelper(object):
def _validate_server_name(self, value):
if not isinstance(value, basestring):
msg = _("Server name is not a string or unicode")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
if value.strip() == '':
msg = _("Server name is an empty string")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
def _get_kernel_ramdisk_from_image(self, req, image_id):
"""Fetch an image from the ImageService, then if present, return the
@@ -265,7 +262,7 @@ class CreateInstanceHelper(object):
return utils.generate_password(16)
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
- raise exc.HTTPBadRequest(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
return password
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 54e17e23d..da06ecd15 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -374,6 +374,8 @@ class ExtensionManager(object):
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
+ return False
+ return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
@@ -412,15 +414,16 @@ class ExtensionManager(object):
'file': ext_path})
continue
new_ext = new_ext_class()
- self._check_extension(new_ext)
- self._add_extension(new_ext)
+ self.add_extension(new_ext)
+
+ def add_extension(self, ext):
+ # Do nothing if the extension doesn't check out
+ if not self._check_extension(ext):
+ return
- def _add_extension(self, ext):
alias = ext.get_alias()
LOG.audit(_('Loaded extension: %s'), alias)
- self._check_extension(ext)
-
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py
index abea71830..71646b6d3 100644
--- a/nova/api/openstack/ips.py
+++ b/nova/api/openstack/ips.py
@@ -32,25 +32,24 @@ class Controller(object):
self.compute_api = nova.compute.API()
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
- def index(self, req, server_id):
+ def _get_instance(self, req, server_id):
try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
+ instance = self.compute_api.get(
+ req.environ['nova.context'], server_id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
+ return instance
+
+ def index(self, req, server_id):
+ instance = self._get_instance(req, server_id)
return {'addresses': self.builder.build(instance)}
def public(self, req, server_id):
- try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
- except nova.exception.NotFound:
- return faults.Fault(exc.HTTPNotFound())
+ instance = self._get_instance(req, server_id)
return {'public': self.builder.build_public_parts(instance)}
def private(self, req, server_id):
- try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
- except nova.exception.NotFound:
- return faults.Fault(exc.HTTPNotFound())
+ instance = self._get_instance(req, server_id)
return {'private': self.builder.build_private_parts(instance)}
def show(self, req, server_id, id):
diff --git a/nova/api/openstack/notes.txt b/nova/api/openstack/notes.txt
index 2330f1002..4e95bffc8 100644
--- a/nova/api/openstack/notes.txt
+++ b/nova/api/openstack/notes.txt
@@ -7,9 +7,6 @@ image ids.
GlanceImageService(ImageService):
image ids are URIs.
-LocalImageService(ImageService):
-image ids are random strings.
-
OpenstackAPITranslationStore:
translates RS server/images/flavor/etc ids into formats required
by a given ImageService strategy.
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index 57666f6b7..8a314de22 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -18,9 +18,10 @@
from webob import exc
from nova import compute
-from nova import quota
from nova.api.openstack import faults
from nova.api.openstack import wsgi
+from nova import exception
+from nova import quota
class Controller(object):
@@ -45,7 +46,11 @@ class Controller(object):
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
- return self._get_metadata(context, server_id)
+ try:
+ return self._get_metadata(context, server_id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
def create(self, req, server_id, body):
self._check_body(body)
@@ -55,8 +60,13 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
metadata)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
except quota.QuotaError as error:
self._handle_quota_error(error)
+
return body
def update(self, req, server_id, id, body):
@@ -72,6 +82,10 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
body)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
except quota.QuotaError as error:
self._handle_quota_error(error)
@@ -80,16 +94,26 @@ class Controller(object):
def show(self, req, server_id, id):
""" Return a single metadata item """
context = req.environ['nova.context']
- data = self._get_metadata(context, server_id)
- if id in data['metadata']:
+ try:
+ data = self._get_metadata(context, server_id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
+ try:
return {id: data['metadata'][id]}
- else:
- return faults.Fault(exc.HTTPNotFound())
+ except KeyError:
+ msg = _("metadata item %s was not found" % (id))
+ raise exc.HTTPNotFound(explanation=msg)
def delete(self, req, server_id, id):
""" Deletes an existing metadata """
context = req.environ['nova.context']
- self.compute_api.delete_instance_metadata(context, server_id, id)
+ try:
+ self.compute_api.delete_instance_metadata(context, server_id, id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 798fdd7f7..b82a6de19 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -51,7 +51,7 @@ class Controller(object):
try:
servers = self._items(req, is_detail=False)
except exception.Invalid as err:
- return exc.HTTPBadRequest(str(err))
+ return exc.HTTPBadRequest(explanation=str(err))
return servers
def detail(self, req):
@@ -59,7 +59,7 @@ class Controller(object):
try:
servers = self._items(req, is_detail=True)
except exception.Invalid as err:
- return exc.HTTPBadRequest(str(err))
+ return exc.HTTPBadRequest(explanation=str(err))
return servers
def _get_view_builder(self, req):
@@ -488,11 +488,11 @@ class ControllerV11(Controller):
if (not 'changePassword' in input_dict
or not 'adminPass' in input_dict['changePassword']):
msg = _("No adminPass was specified")
- return exc.HTTPBadRequest(msg)
+ return exc.HTTPBadRequest(explanation=msg)
password = input_dict['changePassword']['adminPass']
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
- return exc.HTTPBadRequest(msg)
+ return exc.HTTPBadRequest(explanation=msg)
self.compute_api.set_admin_password(context, id, password)
return exc.HTTPAccepted()
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 245d0e3fa..cbfa5aae7 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -75,7 +75,7 @@ class ViewBuilder(object):
}
inst_dict = {
- 'id': int(inst['id']),
+ 'id': inst['id'],
'name': inst['display_name'],
'addresses': self.addresses_builder.build(inst),
'status': power_mapping[inst.get('state')]}
@@ -99,6 +99,7 @@ class ViewBuilder(object):
self._build_image(inst_dict, inst)
self._build_flavor(inst_dict, inst)
+ inst_dict['uuid'] = inst['uuid']
return dict(server=inst_dict)
def _build_image(self, response, inst):
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 3f8acf339..a57b7f72b 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -363,11 +363,11 @@ class Resource(wsgi.Application):
action, action_args, accept = self.deserializer.deserialize(
request)
except exception.InvalidContentType:
- return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
+ msg = _("Unsupported Content-Type")
+ return webob.exc.HTTPBadRequest(explanation=msg)
except exception.MalformedRequestBody:
- explanation = _("Malformed request body")
- return faults.Fault(webob.exc.HTTPBadRequest(
- explanation=explanation))
+ msg = _("Malformed request body")
+ return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg))
action_result = self.dispatch(request, action, action_args)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index e2c4cf8d7..a7ea88d51 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -34,6 +34,7 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -52,6 +53,18 @@ def generate_default_hostname(instance_id):
return str(instance_id)
+def _is_able_to_shutdown(instance, instance_id):
+ states = {'terminating': "Instance %s is already being terminated",
+ 'migrating': "Instance %s is being migrated",
+ 'stopping': "Instance %s is being stopped"}
+ msg = states.get(instance['state_description'])
+ if msg:
+ LOG.warning(_(msg), instance_id)
+ return False
+
+ return True
+
+
class API(base.Base):
"""API for interacting with the compute manager."""
@@ -165,6 +178,9 @@ class API(base.Base):
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
+ architecture = None
+ if 'properties' in image and 'arch' in image['properties']:
+ architecture = image['properties']['arch']
vm_mode = None
if 'properties' in image and 'vm_mode' in image['properties']:
vm_mode = image['properties']['vm_mode']
@@ -230,12 +246,13 @@ class API(base.Base):
'metadata': metadata,
'availability_zone': availability_zone,
'os_type': os_type,
+ 'architecture': architecture,
'vm_mode': vm_mode}
return (num_instances, base_options, security_groups)
def create_db_entry_for_new_instance(self, context, base_options,
- security_groups, num=1):
+ security_groups, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security
groups, MAC address, etc). This will called by create()
@@ -255,6 +272,23 @@ class API(base.Base):
instance_id,
security_group_id)
+ # NOTE(yamahata)
+ # tell vm driver to attach volume at boot time by updating
+ # BlockDeviceMapping
+ for bdm in block_device_mapping:
+ LOG.debug(_('bdm %s'), bdm)
+ assert 'device_name' in bdm
+ values = {
+ 'instance_id': instance_id,
+ 'device_name': bdm['device_name'],
+ 'delete_on_termination': bdm.get('delete_on_termination'),
+ 'virtual_name': bdm.get('virtual_name'),
+ 'snapshot_id': bdm.get('snapshot_id'),
+ 'volume_id': bdm.get('volume_id'),
+ 'volume_size': bdm.get('volume_size'),
+ 'no_device': bdm.get('no_device')}
+ self.db.block_device_mapping_create(elevated, values)
+
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
@@ -339,7 +373,7 @@ class API(base.Base):
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None):
+ reservation_id=None, block_device_mapping=None):
"""
Provision the instances by sending off a series of single
instance requests to the Schedulers. This is fine for trival
@@ -360,11 +394,13 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
+ block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = self.create_db_entry_for_new_instance(context,
- base_options, security_groups, num=num)
+ base_options, security_groups,
+ block_device_mapping, num=num)
instances.append(instance)
instance_id = instance['id']
@@ -474,24 +510,22 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
- @scheduler_api.reroute_compute("delete")
- def delete(self, context, instance_id):
- """Terminate an instance."""
- LOG.debug(_("Going to try to terminate %s"), instance_id)
+ def _get_instance(self, context, instance_id, action_str):
try:
- instance = self.get(context, instance_id)
+ return self.get(context, instance_id)
except exception.NotFound:
- LOG.warning(_("Instance %s was not found during terminate"),
- instance_id)
+ LOG.warning(_("Instance %(instance_id)s was not found during "
+ "%(action_str)s") %
+ {'instance_id': instance_id, 'action_str': action_str})
raise
- if instance['state_description'] == 'terminating':
- LOG.warning(_("Instance %s is already being terminated"),
- instance_id)
- return
+ @scheduler_api.reroute_compute("delete")
+ def delete(self, context, instance_id):
+ """Terminate an instance."""
+ LOG.debug(_("Going to try to terminate %s"), instance_id)
+ instance = self._get_instance(context, instance_id, 'terminating')
- if instance['state_description'] == 'migrating':
- LOG.warning(_("Instance %s is being migrated"), instance_id)
+ if not _is_able_to_shutdown(instance, instance_id):
return
self.update(context,
@@ -505,12 +539,59 @@ class API(base.Base):
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
+ terminate_volumes(self.db, context, instance_id)
self.db.instance_destroy(context, instance_id)
+ @scheduler_api.reroute_compute("stop")
+ def stop(self, context, instance_id):
+ """Stop an instance."""
+ LOG.debug(_("Going to try to stop %s"), instance_id)
+
+ instance = self._get_instance(context, instance_id, 'stopping')
+ if not _is_able_to_shutdown(instance, instance_id):
+ return
+
+ self.update(context,
+ instance['id'],
+ state_description='stopping',
+ state=power_state.NOSTATE,
+ terminated_at=utils.utcnow())
+
+ host = instance['host']
+ if host:
+ self._cast_compute_message('stop_instance', context,
+ instance_id, host)
+
+ def start(self, context, instance_id):
+ """Start an instance."""
+ LOG.debug(_("Going to try to start %s"), instance_id)
+ instance = self._get_instance(context, instance_id, 'starting')
+ if instance['state_description'] != 'stopped':
+ _state_description = instance['state_description']
+ LOG.warning(_("Instance %(instance_id)s is not "
+ "stopped(%(_state_description)s)") % locals())
+ return
+
+ # TODO(yamahata): injected_files isn't supported right now.
+ # It is used only for osapi. not for ec2 api.
+ # availability_zone isn't used by run_instance.
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "start_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id}})
+
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
- rv = self.db.instance_get(context, instance_id)
- return dict(rv.iteritems())
+ # NOTE(sirp): id used to be exclusively integer IDs; now we're
+ # accepting both UUIDs and integer IDs. The handling of this
+ # is done in db/sqlalchemy/api/instance_get
+ if utils.is_uuid_like(instance_id):
+ uuid = instance_id
+ instance = self.db.instance_get_by_uuid(context, uuid)
+ else:
+ instance = self.db.instance_get(context, instance_id)
+ return dict(instance.iteritems())
@scheduler_api.reroute_compute("get")
def routing_get(self, context, instance_id):
@@ -526,6 +607,7 @@ class API(base.Base):
"""Get all instances with this reservation_id, across
all available Zones (if any).
"""
+ context = context.elevated()
instances = self.db.instance_get_all_by_reservation(
context, reservation_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 245958de7..54eaf2082 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -53,6 +53,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -214,8 +215,63 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
- @exception.wrap_exception
- def run_instance(self, context, instance_id, **kwargs):
+ def _setup_block_device_mapping(self, context, instance_id):
+ """setup volumes for block device mapping"""
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'block_device_mapping')
+
+ volume_api = volume.API()
+ block_device_mapping = []
+ for bdm in self.db.block_device_mapping_get_all_by_instance(
+ context, instance_id):
+ LOG.debug(_("setting up bdm %s"), bdm)
+ if ((bdm['snapshot_id'] is not None) and
+ (bdm['volume_id'] is None)):
+ # TODO(yamahata): default name and description
+ vol = volume_api.create(context, bdm['volume_size'],
+ bdm['snapshot_id'], '', '')
+ # TODO(yamahata): creating volume simultaneously
+ # reduces creation time?
+ volume_api.wait_creation(context, vol['id'])
+ self.db.block_device_mapping_update(
+ context, bdm['id'], {'volume_id': vol['id']})
+ bdm['volume_id'] = vol['id']
+
+ if not ((bdm['snapshot_id'] is None) or
+ (bdm['volume_id'] is not None)):
+ LOG.error(_('corrupted state of block device mapping '
+ 'id: %(id)s '
+ 'snapshot: %(snapshot_id) volume: %(vollume_id)') %
+ {'id': bdm['id'],
+ 'snapshot_id': bdm['snapshot'],
+ 'volume_id': bdm['volume_id']})
+ raise exception.ApiError(_('broken block device mapping %d') %
+ bdm['id'])
+
+ if bdm['volume_id'] is not None:
+ volume_api.check_attach(context,
+ volume_id=bdm['volume_id'])
+ dev_path = self._attach_volume_boot(context, instance_id,
+ bdm['volume_id'],
+ bdm['device_name'])
+ block_device_mapping.append({'device_path': dev_path,
+ 'mount_device':
+ bdm['device_name']})
+ elif bdm['virtual_name'] is not None:
+ # TODO(yamahata): ephemeral/swap device support
+ LOG.debug(_('block_device_mapping: '
+ 'ephemeral device is not supported yet'))
+ else:
+ # TODO(yamahata): NoDevice support
+ assert bdm['no_device']
+ LOG.debug(_('block_device_mapping: '
+ 'no device is not supported yet'))
+
+ return block_device_mapping
+
+ def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -235,54 +291,79 @@ class ComputeManager(manager.SchedulerDependentManager):
'networking')
is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id)
- # NOTE(vish): This could be a cast because we don't do anything
- # with the address currently, but I'm leaving it as
- # a call to ensure that network setup completes. We
- # will eventually also need to save the address here.
- if not FLAGS.stub_network:
- address = rpc.call(context,
- self.get_network_topic(context),
- {"method": "allocate_fixed_ip",
- "args": {"instance_id": instance_id,
- "vpn": is_vpn}})
-
- self.network_manager.setup_compute_network(context,
- instance_id)
-
- # TODO(vish) check to make sure the availability zone matches
- self._update_state(context, instance_id, power_state.BUILDING)
-
try:
- self.driver.spawn(instance_ref)
- except Exception as ex: # pylint: disable=W0702
- msg = _("Instance '%(instance_id)s' failed to spawn. Is "
- "virtualization enabled in the BIOS? Details: "
- "%(ex)s") % locals()
- LOG.exception(msg)
-
- if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
- public_ip = self.network_api.allocate_floating_ip(context)
-
- self.db.floating_ip_set_auto_assigned(context, public_ip)
- fixed_ip = self.db.fixed_ip_get_by_address(context, address)
- floating_ip = self.db.floating_ip_get_by_address(context,
- public_ip)
-
- self.network_api.associate_floating_ip(context,
- floating_ip,
- fixed_ip,
- affect_auto_assigned=True)
+ # NOTE(vish): This could be a cast because we don't do anything
+ # with the address currently, but I'm leaving it as
+ # a call to ensure that network setup completes. We
+ # will eventually also need to save the address here.
+ if not FLAGS.stub_network:
+ address = rpc.call(context,
+ self.get_network_topic(context),
+ {"method": "allocate_fixed_ip",
+ "args": {"instance_id": instance_id,
+ "vpn": is_vpn}})
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
+ self.network_manager.setup_compute_network(context,
+ instance_id)
+
+ block_device_mapping = self._setup_block_device_mapping(
+ context,
+ instance_id)
+
+ # TODO(vish) check to make sure the availability zone matches
+ self._update_state(context, instance_id, power_state.BUILDING)
+
+ try:
+ self.driver.spawn(instance_ref,
+ block_device_mapping=block_device_mapping)
+ except Exception as ex: # pylint: disable=W0702
+ msg = _("Instance '%(instance_id)s' failed to spawn. Is "
+ "virtualization enabled in the BIOS? Details: "
+ "%(ex)s") % locals()
+ LOG.exception(msg)
+
+ if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
+ public_ip = self.network_api.allocate_floating_ip(context)
+
+ self.db.floating_ip_set_auto_assigned(context, public_ip)
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ public_ip)
+
+ self.network_api.associate_floating_ip(
+ context,
+ floating_ip,
+ fixed_ip,
+ affect_auto_assigned=True)
+
+ self._update_launched_at(context, instance_id)
+ self._update_state(context, instance_id)
+ except exception.InstanceNotFound:
+ # FIXME(wwolf): We are just ignoring InstanceNotFound
+ # exceptions here in case the instance was immediately
+ # deleted before it actually got created. This should
+ # be fixed once we have no-db-messaging
+ pass
+
+ @exception.wrap_exception
+ def run_instance(self, context, instance_id, **kwargs):
+ self._run_instance(context, instance_id, **kwargs)
@exception.wrap_exception
@checks_instance_lock
- def terminate_instance(self, context, instance_id):
- """Terminate an instance on this host."""
+ def start_instance(self, context, instance_id):
+ """Starting an instance on this host."""
+ # TODO(yamahata): injected_files isn't supported.
+ # Anyway OSAPI doesn't support stop/start yet
+ self._run_instance(context, instance_id)
+
+ def _shutdown_instance(self, context, instance_id, action_str):
+ """Shutdown an instance on this host."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("Terminating instance %s"), instance_id, context=context)
+ LOG.audit(_("%(action_str)s instance %(instance_id)s") %
+ {'action_str': action_str, 'instance_id': instance_id},
+ context=context)
fixed_ip = instance_ref.get('fixed_ip')
if not FLAGS.stub_network and fixed_ip:
@@ -318,18 +399,36 @@ class ComputeManager(manager.SchedulerDependentManager):
volumes = instance_ref.get('volumes') or []
for volume in volumes:
- self.detach_volume(context, instance_id, volume['id'])
- if instance_ref['state'] == power_state.SHUTOFF:
+ self._detach_volume(context, instance_id, volume['id'], False)
+
+ if (instance_ref['state'] == power_state.SHUTOFF and
+ instance_ref['state_description'] != 'stopped'):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
self.driver.destroy(instance_ref)
+ if action_str == 'Terminating':
+ terminate_volumes(self.db, context, instance_id)
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def terminate_instance(self, context, instance_id):
+ """Terminate an instance on this host."""
+ self._shutdown_instance(context, instance_id, 'Terminating')
+
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
+ def stop_instance(self, context, instance_id):
+ """Stopping an instance on this host."""
+ self._shutdown_instance(context, instance_id, 'Stopping')
+ # instance state will be updated to stopped by _poll_instance_states()
+
+ @exception.wrap_exception
+ @checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
@@ -472,6 +571,24 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
+ def agent_update(self, context, instance_id, url, md5hash):
+ """Update agent running on an instance on this host."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to update agent on a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ nm = instance_ref['name']
+ msg = _('instance %(nm)s: updating agent to %(url)s') % locals()
+ LOG.audit(msg)
+ self.driver.agent_update(instance_ref, url, md5hash)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
context = context.elevated()
@@ -693,7 +810,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
-
if instance_ref["state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
@@ -800,6 +916,22 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_vnc_console(instance_ref)
+ def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint):
+ """Attach a volume to an instance at boot time. So actual attach
+ is done by instance creation"""
+
+ # TODO(yamahata):
+ # should move check_attach to volume manager?
+ volume.API().check_attach(context, volume_id)
+
+ context = context.elevated()
+ LOG.audit(_("instance %(instance_id)s: booting with "
+ "volume %(volume_id)s at %(mountpoint)s") %
+ locals(), context=context)
+ dev_path = self.volume_manager.setup_compute_volume(context, volume_id)
+ self.db.volume_attached(context, volume_id, instance_id, mountpoint)
+ return dev_path
+
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@@ -817,6 +949,16 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id,
instance_id,
mountpoint)
+ values = {
+ 'instance_id': instance_id,
+ 'device_name': mountpoint,
+ 'delete_on_termination': False,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': volume_id,
+ 'volume_size': None,
+ 'no_device': None}
+ self.db.block_device_mapping_create(context, values)
except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
@@ -831,7 +973,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
- def detach_volume(self, context, instance_id, volume_id):
+ def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -847,8 +989,15 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
+ if destroy_bdm:
+ self.db.block_device_mapping_destroy_by_instance_and_volume(
+ context, instance_id, volume_id)
return True
+ def detach_volume(self, context, instance_id, volume_id):
+ """Detach a volume from an instance."""
+ return self._detach_volume(context, instance_id, volume_id, True)
+
def remove_volume(self, context, volume_id):
"""Remove volume on compute host.
@@ -1174,11 +1323,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"State=%(db_state)s, so setting state to "
"shutoff.") % locals())
vm_state = power_state.SHUTOFF
+ if db_instance['state_description'] == 'stopping':
+ self.db.instance_stop(context, db_instance['id'])
+ continue
else:
vm_state = vm_instance.state
vms_not_found_in_db.remove(name)
- if db_instance['state_description'] == 'migrating':
+ if (db_instance['state_description'] in ['migrating', 'stopping']):
# A situation which db record exists, but no instance"
# sometimes occurs while live-migration at src compute,
# this case should be ignored.
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
new file mode 100644
index 000000000..c8cb9bab8
--- /dev/null
+++ b/nova/compute/utils.py
@@ -0,0 +1,29 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 VA Linux Systems Japan K.K
+# Copyright (c) 2011 Isaku Yamahata
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import volume
+
+
+def terminate_volumes(db, context, instance_id):
+ """delete volumes of delete_on_termination=True in block device mapping"""
+ volume_api = volume.API()
+ for bdm in db.block_device_mapping_get_all_by_instance(context,
+ instance_id):
+ #LOG.debug(_("terminating bdm %s") % bdm)
+ if bdm['volume_id'] and bdm['delete_on_termination']:
+ volume_api.delete(context, bdm['volume_id'])
+ db.block_device_mapping_destroy(context, bdm['id'])
diff --git a/nova/db/api.py b/nova/db/api.py
index a1a434d3d..0c9f45db6 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -414,6 +414,16 @@ def instance_destroy(context, instance_id):
return IMPL.instance_destroy(context, instance_id)
+def instance_stop(context, instance_id):
+ """Stop the instance or raise if it does not exist."""
+ return IMPL.instance_stop(context, instance_id)
+
+
+def instance_get_by_uuid(context, uuid):
+ """Get an instance or raise if it does not exist."""
+ return IMPL.instance_get_by_uuid(context, uuid)
+
+
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
@@ -920,6 +930,36 @@ def snapshot_update(context, snapshot_id, values):
####################
+def block_device_mapping_create(context, values):
+ """Create an entry of block device mapping"""
+ return IMPL.block_device_mapping_create(context, values)
+
+
+def block_device_mapping_update(context, bdm_id, values):
+ """Create an entry of block device mapping"""
+ return IMPL.block_device_mapping_update(context, bdm_id, values)
+
+
+def block_device_mapping_get_all_by_instance(context, instance_id):
+ """Get all block device mapping belonging to a instance"""
+ return IMPL.block_device_mapping_get_all_by_instance(context, instance_id)
+
+
+def block_device_mapping_destroy(context, bdm_id):
+ """Destroy the block device mapping."""
+ return IMPL.block_device_mapping_destroy(context, bdm_id)
+
+
+def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
+ volume_id):
+ """Destroy the block device mapping or raise if it does not exist."""
+ return IMPL.block_device_mapping_destroy_by_instance_and_volume(
+ context, instance_id, volume_id)
+
+
+####################
+
+
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
@@ -1248,6 +1288,36 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
"""Create or update instance metadata."""
IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
+
+####################
+
+
+def agent_build_get_all(context):
+ """Get all agent builds."""
+ return IMPL.agent_build_get_all(context)
+
+
+def agent_build_destroy(context, agent_update_id):
+ """Destroy agent build entry."""
+ IMPL.agent_build_destroy(context, agent_update_id)
+
+
+def agent_build_update(context, agent_build_id, values):
+ """Update agent build entry."""
+ IMPL.agent_build_update(context, agent_build_id, values)
+
+
+def agent_build_create(context, values):
+ """Create a new agent build entry."""
+ return IMPL.agent_build_create(context, values)
+
+
+def agent_build_get_by_triple(context, hypervisor, os, architecture):
+ """Get agent build by hypervisor/OS/architecture triple."""
+ return IMPL.agent_build_get_by_triple(context, hypervisor, os,
+ architecture)
+
+
####################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 478837f7f..0692bc917 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -18,7 +18,7 @@
"""
Implementation of SQLAlchemy backend.
"""
-
+import traceback
import warnings
from nova import db
@@ -797,6 +797,8 @@ def instance_create(context, values):
values['metadata'] = _metadata_refs(values.get('metadata'))
instance_ref = models.Instance()
+ instance_ref['uuid'] = str(utils.gen_uuid())
+
instance_ref.update(values)
session = get_session()
@@ -840,37 +842,65 @@ def instance_destroy(context, instance_id):
@require_context
+def instance_stop(context, instance_id):
+ session = get_session()
+ with session.begin():
+ from nova.compute import power_state
+ session.query(models.Instance).\
+ filter_by(id=instance_id).\
+ update({'host': None,
+ 'state': power_state.SHUTOFF,
+ 'state_description': 'stopped',
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_id=instance_id).\
+ update({'updated_at': literal_column('updated_at')})
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ update({'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def instance_get_by_uuid(context, uuid, session=None):
+ partial = _build_instance_get(context, session=session)
+ result = partial.filter_by(uuid=uuid)
+ result = result.first()
+ if not result:
+ # FIXME(sirp): it would be nice if InstanceNotFound would accept a
+ # uuid parameter as well
+ raise exception.InstanceNotFound(instance_id=uuid)
+ return result
+
+
+@require_context
def instance_get(context, instance_id, session=None):
+ partial = _build_instance_get(context, session=session)
+ result = partial.filter_by(id=instance_id)
+ result = result.first()
+ if not result:
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ return result
+
+
+@require_context
+def _build_instance_get(context, session=None):
if not session:
session = get_session()
- result = None
+
+ partial = session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('security_groups.rules')).\
+ options(joinedload('volumes')).\
+ options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
+ options(joinedload('instance_type'))
if is_admin_context(context):
- result = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload_all('security_groups.rules')).\
- options(joinedload('volumes')).\
- options(joinedload_all('fixed_ip.network')).\
- options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
- filter_by(id=instance_id).\
- filter_by(deleted=can_read_deleted(context)).\
- first()
+ partial = partial.filter_by(deleted=can_read_deleted(context))
elif is_user_context(context):
- result = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload_all('security_groups.rules')).\
- options(joinedload('volumes')).\
- options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
- filter_by(project_id=context.project_id).\
- filter_by(id=instance_id).\
- filter_by(deleted=False).\
- first()
- if not result:
- raise exception.InstanceNotFound(instance_id=instance_id)
-
- return result
+ partial = partial.filter_by(project_id=context.project_id).\
+ filter_by(deleted=False)
+ return partial
@require_admin_context
@@ -1883,6 +1913,66 @@ def snapshot_update(context, snapshot_id, values):
@require_context
+def block_device_mapping_create(context, values):
+ bdm_ref = models.BlockDeviceMapping()
+ bdm_ref.update(values)
+
+ session = get_session()
+ with session.begin():
+ bdm_ref.save(session=session)
+
+
+@require_context
+def block_device_mapping_update(context, bdm_id, values):
+ session = get_session()
+ with session.begin():
+ session.query(models.BlockDeviceMapping).\
+ filter_by(id=bdm_id).\
+ filter_by(deleted=False).\
+ update(values)
+
+
+@require_context
+def block_device_mapping_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.BlockDeviceMapping).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ return []
+ return result
+
+
+@require_context
+def block_device_mapping_destroy(context, bdm_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.BlockDeviceMapping).\
+ filter_by(id=bdm_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
+ volume_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.BlockDeviceMapping).\
+ filter_by(instance_id=instance_id).\
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+###################
+
+
+@require_context
def security_group_get_all(context):
session = get_session()
return session.query(models.SecurityGroup).\
@@ -2630,7 +2720,17 @@ def zone_get_all(context):
####################
+
+def require_instance_exists(func):
+ def new_func(context, instance_id, *args, **kwargs):
+ db.api.instance_get(context, instance_id)
+ return func(context, instance_id, *args, **kwargs)
+ new_func.__name__ = func.__name__
+ return new_func
+
+
@require_context
+@require_instance_exists
def instance_metadata_get(context, instance_id):
session = get_session()
@@ -2646,6 +2746,7 @@ def instance_metadata_get(context, instance_id):
@require_context
+@require_instance_exists
def instance_metadata_delete(context, instance_id, key):
session = get_session()
session.query(models.InstanceMetadata).\
@@ -2658,6 +2759,7 @@ def instance_metadata_delete(context, instance_id, key):
@require_context
+@require_instance_exists
def instance_metadata_delete_all(context, instance_id):
session = get_session()
session.query(models.InstanceMetadata).\
@@ -2669,6 +2771,7 @@ def instance_metadata_delete_all(context, instance_id):
@require_context
+@require_instance_exists
def instance_metadata_get_item(context, instance_id, key):
session = get_session()
@@ -2685,6 +2788,7 @@ def instance_metadata_get_item(context, instance_id, key):
@require_context
+@require_instance_exists
def instance_metadata_update_or_create(context, instance_id, metadata):
session = get_session()
@@ -2704,6 +2808,61 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
return metadata
+
+####################
+
+
+@require_admin_context
+def agent_build_create(context, values):
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ agent_build_ref.save()
+ return agent_build_ref
+
+
+@require_admin_context
+def agent_build_get_by_triple(context, hypervisor, os, architecture,
+ session=None):
+ if not session:
+ session = get_session()
+ return session.query(models.AgentBuild).\
+ filter_by(hypervisor=hypervisor).\
+ filter_by(os=os).\
+ filter_by(architecture=architecture).\
+ filter_by(deleted=False).\
+ first()
+
+
+@require_admin_context
+def agent_build_get_all(context):
+ session = get_session()
+ return session.query(models.AgentBuild).\
+ filter_by(deleted=False).\
+ all()
+
+
+@require_admin_context
+def agent_build_destroy(context, agent_build_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.AgentBuild).\
+ filter_by(id=agent_build_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_admin_context
+def agent_build_update(context, agent_build_id, values):
+ session = get_session()
+ with session.begin():
+ agent_build_ref = session.query(models.AgentBuild).\
+ filter_by(id=agent_build_id). \
+ first()
+ agent_build_ref.update(values)
+ agent_build_ref.save(session=session)
+
+
####################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py
new file mode 100644
index 000000000..6e9b806cb
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py
@@ -0,0 +1,87 @@
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Isaku Yamahata
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, Table, Column
+from sqlalchemy import DateTime, Boolean, Integer, String
+from sqlalchemy import ForeignKey
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+snapshots = Table('snapshots', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+block_device_mapping = Table('block_device_mapping', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, autoincrement=True),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=False),
+ Column('device_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ Column('delete_on_termination',
+ Boolean(create_constraint=True, name=None),
+ default=False),
+ Column('virtual_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=True),
+ Column('snapshot_id',
+ Integer(),
+ ForeignKey('snapshots.id'),
+ nullable=True),
+ Column('volume_id', Integer(), ForeignKey('volumes.id'),
+ nullable=True),
+ Column('volume_size', Integer(), nullable=True),
+ Column('no_device',
+ Boolean(create_constraint=True, name=None),
+ nullable=True),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ try:
+ block_device_mapping.create()
+ except Exception:
+ logging.info(repr(block_device_mapping))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[block_device_mapping])
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ block_device_mapping.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py
new file mode 100644
index 000000000..27f30d536
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py
@@ -0,0 +1,43 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+
+from nova import utils
+
+
+meta = MetaData()
+
+instances = Table("instances", meta,
+ Column("id", Integer(), primary_key=True, nullable=False))
+uuid_column = Column("uuid", String(36))
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances.create_column(uuid_column)
+
+ rows = migrate_engine.execute(instances.select())
+ for row in rows:
+ instance_uuid = str(utils.gen_uuid())
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.id == row[0])\
+ .values(uuid=instance_uuid))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances.drop_column(uuid_column)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py
new file mode 100644
index 000000000..640e96138
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py
@@ -0,0 +1,73 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+#
+# New Tables
+#
+builds = Table('agent_builds', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('hypervisor',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('os',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('architecture',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('version',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('md5hash',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+#
+# New Column
+#
+
+architecture = Column('architecture', String(length=255))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (builds, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ # Add columns to existing tables
+ instances.create_column(architecture)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_instance_type_extra_specs.py
index f26ad6d2c..f26ad6d2c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_instance_type_extra_specs.py
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 96f85b4e5..4390f1be3 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -232,7 +232,9 @@ class Instance(BASE, NovaBase):
locked = Column(Boolean)
os_type = Column(String(255))
+ architecture = Column(String(255))
vm_mode = Column(String(255))
+ uuid = Column(String(36))
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
@@ -357,6 +359,45 @@ class Snapshot(BASE, NovaBase):
display_description = Column(String(255))
+class BlockDeviceMapping(BASE, NovaBase):
+ """Represents block device mapping that is defined by EC2"""
+ __tablename__ = "block_device_mapping"
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance = relationship(Instance,
+ backref=backref('balock_device_mapping'),
+ foreign_keys=instance_id,
+ primaryjoin='and_(BlockDeviceMapping.instance_id=='
+ 'Instance.id,'
+ 'BlockDeviceMapping.deleted=='
+ 'False)')
+ device_name = Column(String(255), nullable=False)
+
+ # default=False for compatibility of the existing code.
+ # With EC2 API,
+ # default True for ami specified device.
+ # default False for created with other timing.
+ delete_on_termination = Column(Boolean, default=False)
+
+ # for ephemeral device
+ virtual_name = Column(String(255), nullable=True)
+
+ # for snapshot or volume
+ snapshot_id = Column(Integer, ForeignKey('snapshots.id'), nullable=True)
+ # outer join
+ snapshot = relationship(Snapshot,
+ foreign_keys=snapshot_id)
+
+ volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
+ volume = relationship(Volume,
+ foreign_keys=volume_id)
+ volume_size = Column(Integer, nullable=True)
+
+ # for no device to suppress devices.
+ no_device = Column(Boolean, nullable=True)
+
+
class ExportDevice(BASE, NovaBase):
"""Represates a shelf and blade that a volume can be exported on."""
__tablename__ = 'export_devices'
@@ -688,6 +729,18 @@ class Zone(BASE, NovaBase):
password = Column(String(255))
+class AgentBuild(BASE, NovaBase):
+ """Represents an agent build."""
+ __tablename__ = 'agent_builds'
+ id = Column(Integer, primary_key=True)
+ hypervisor = Column(String(255))
+ os = Column(String(255))
+ architecture = Column(String(255))
+ version = Column(String(255))
+ url = Column(String(255))
+ md5hash = Column(String(255))
+
+
def register_models():
"""Register Models and create metadata.
@@ -701,7 +754,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- InstanceMetadata, InstanceTypeExtraSpecs, Migration)
+ AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/flags.py b/nova/flags.py
index acfcf8d68..57a4ecf2f 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -272,7 +272,7 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
DEFINE_list('glance_api_servers',
- ['127.0.0.1:9292'],
+ ['%s:9292' % _get_my_ip()],
'list of glance api servers available to nova (host:port)')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
@@ -364,7 +364,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.local.LocalImageService',
+DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
diff --git a/nova/image/fake.py b/nova/image/fake.py
index 70a5f0e22..c4b3d5fd6 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -120,6 +120,14 @@ class _FakeImageService(service.BaseImageService):
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
+ def show_by_name(self, context, name):
+ """Returns a dict containing image data for the given name."""
+ images = copy.deepcopy(self.images.values())
+ for image in images:
+ if name == image.get('name'):
+ return image
+ raise exception.ImageNotFound(image_id=name)
+
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
diff --git a/nova/image/local.py b/nova/image/local.py
deleted file mode 100644
index c7dee4573..000000000
--- a/nova/image/local.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import os.path
-import random
-import shutil
-
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.image import service
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('images_path', '$state_path/images',
- 'path to decrypted images')
-
-
-LOG = logging.getLogger('nova.image.local')
-
-
-class LocalImageService(service.BaseImageService):
- """Image service storing images to local disk.
-
- It assumes that image_ids are integers.
-
- """
-
- def __init__(self):
- self._path = FLAGS.images_path
-
- def _path_to(self, image_id, fname='info.json'):
- if fname:
- return os.path.join(self._path, '%08x' % int(image_id), fname)
- return os.path.join(self._path, '%08x' % int(image_id))
-
- def _ids(self):
- """The list of all image ids."""
- images = []
- for image_dir in os.listdir(self._path):
- try:
- unhexed_image_id = int(image_dir, 16)
- except ValueError:
- LOG.error(_('%s is not in correct directory naming format')
- % image_dir)
- else:
- images.append(unhexed_image_id)
- return images
-
- def index(self, context, filters=None, marker=None, limit=None):
- # TODO(blamar): Make use of filters, marker, and limit
- filtered = []
- image_metas = self.detail(context)
- for image_meta in image_metas:
- meta = utils.subset_dict(image_meta, ('id', 'name'))
- filtered.append(meta)
- return filtered
-
- def detail(self, context, filters=None, marker=None, limit=None):
- # TODO(blamar): Make use of filters, marker, and limit
- images = []
- for image_id in self._ids():
- try:
- image = self.show(context, image_id)
- images.append(image)
- except exception.NotFound:
- continue
- return images
-
- def show(self, context, image_id):
- try:
- with open(self._path_to(image_id)) as metadata_file:
- image_meta = json.load(metadata_file)
- if not self._is_image_available(context, image_meta):
- raise exception.ImageNotFound(image_id=image_id)
- return image_meta
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
-
- def show_by_name(self, context, name):
- """Returns a dict containing image data for the given name."""
- # NOTE(vish): Not very efficient, but the local image service
- # is for testing so it should be fine.
- images = self.detail(context)
- image = None
- for cantidate in images:
- if name == cantidate.get('name'):
- image = cantidate
- break
- if image is None:
- raise exception.ImageNotFound(image_id=name)
- return image
-
- def get(self, context, image_id, data):
- """Get image and metadata."""
- try:
- with open(self._path_to(image_id)) as metadata_file:
- metadata = json.load(metadata_file)
- with open(self._path_to(image_id, 'image')) as image_file:
- shutil.copyfileobj(image_file, data)
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
- return metadata
-
- def create(self, context, metadata, data=None):
- """Store the image data and return the new image."""
- image_id = random.randint(0, 2 ** 31 - 1)
- image_path = self._path_to(image_id, None)
- if not os.path.exists(image_path):
- os.mkdir(image_path)
- return self._store(context, image_id, metadata, data)
-
- def update(self, context, image_id, metadata, data=None):
- """Replace the contents of the given image with the new data."""
- # NOTE(vish): show is to check if image is available
- self.show(context, image_id)
- return self._store(context, image_id, metadata, data)
-
- def _store(self, context, image_id, metadata, data=None):
- metadata['id'] = image_id
- try:
- if data:
- location = self._path_to(image_id, 'image')
- with open(location, 'w') as image_file:
- shutil.copyfileobj(data, image_file)
- # NOTE(vish): update metadata similarly to glance
- metadata['status'] = 'active'
- metadata['location'] = location
- with open(self._path_to(image_id), 'w') as metadata_file:
- json.dump(metadata, metadata_file)
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
- return metadata
-
- def delete(self, context, image_id):
- """Delete the given image.
-
- :raises: ImageNotFound if the image does not exist.
-
- """
- # NOTE(vish): show is to check if image is available
- self.show(context, image_id)
- try:
- shutil.rmtree(self._path_to(image_id, None))
- except (IOError, ValueError):
- raise exception.ImageNotFound(image_id=image_id)
-
- def delete_all(self):
- """Clears out all images in local directory."""
- for image_id in self._ids():
- shutil.rmtree(self._path_to(image_id, None))
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 3b3195c2e..1bb047e2e 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -24,6 +24,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
+from nova import utils
from eventlet import greenpool
@@ -201,38 +202,78 @@ class RedirectResult(exception.Error):
class reroute_compute(object):
- """Decorator used to indicate that the method should
- delegate the call the child zones if the db query
- can't find anything."""
+ """
+ reroute_compute is responsible for trying to lookup a resource in the
+ current zone and if it's not found there, delegating the call to the
+ child zones.
+
+ Since reroute_compute will be making 'cross-zone' calls, the ID for the
+ object must come in as a UUID-- if we receive an integer ID, we bail.
+
+ The steps involved are:
+
+ 1. Validate that item_id is UUID like
+
+ 2. Lookup item by UUID in the zone local database
+
+ 3. If the item was found, then extract integer ID, and pass that to
+ the wrapped method. (This ensures that zone-local code can
+ continue to use integer IDs).
+
+ 4. If the item was not found, we delgate the call to a child zone
+ using the UUID.
+ """
def __init__(self, method_name):
self.method_name = method_name
+ def _route_to_child_zones(self, context, collection, item_uuid):
+ if not FLAGS.enable_zone_routing:
+ raise exception.InstanceNotFound(instance_id=item_uuid)
+
+ zones = db.zone_get_all(context)
+ if not zones:
+ raise exception.InstanceNotFound(instance_id=item_uuid)
+
+ # Ask the children to provide an answer ...
+ LOG.debug(_("Asking child zones ..."))
+ result = self._call_child_zones(zones,
+ wrap_novaclient_function(_issue_novaclient_command,
+ collection, self.method_name, item_uuid))
+ # Scrub the results and raise another exception
+ # so the API layers can bail out gracefully ...
+ raise RedirectResult(self.unmarshall_result(result))
+
def __call__(self, f):
def wrapped_f(*args, **kwargs):
- collection, context, item_id = \
+ collection, context, item_id_or_uuid = \
self.get_collection_context_and_id(args, kwargs)
- try:
- # Call the original function ...
+
+ attempt_reroute = False
+ if utils.is_uuid_like(item_id_or_uuid):
+ item_uuid = item_id_or_uuid
+ try:
+ instance = db.instance_get_by_uuid(context, item_uuid)
+ except exception.InstanceNotFound, e:
+ # NOTE(sirp): since a UUID was passed in, we can attempt
+ # to reroute to a child zone
+ attempt_reroute = True
+ LOG.debug(_("Instance %(item_uuid)s not found "
+ "locally: '%(e)s'" % locals()))
+ else:
+ # NOTE(sirp): since we're not re-routing in this case, and
+ # we we were passed a UUID, we need to replace that UUID
+ # with an integer ID in the argument list so that the
+ # zone-local code can continue to use integer IDs.
+ item_id = instance['id']
+ args = list(args) # needs to be mutable to replace
+ self.replace_uuid_with_id(args, kwargs, item_id)
+
+ if attempt_reroute:
+ return self._route_to_child_zones(context, collection,
+ item_uuid)
+ else:
return f(*args, **kwargs)
- except exception.InstanceNotFound, e:
- LOG.debug(_("Instance %(item_id)s not found "
- "locally: '%(e)s'" % locals()))
-
- if not FLAGS.enable_zone_routing:
- raise
-
- zones = db.zone_get_all(context)
- if not zones:
- raise
-
- # Ask the children to provide an answer ...
- LOG.debug(_("Asking child zones ..."))
- result = self._call_child_zones(zones,
- wrap_novaclient_function(_issue_novaclient_command,
- collection, self.method_name, item_id))
- # Scrub the results and raise another exception
- # so the API layers can bail out gracefully ...
- raise RedirectResult(self.unmarshall_result(result))
+
return wrapped_f
def _call_child_zones(self, zones, function):
@@ -251,6 +292,18 @@ class reroute_compute(object):
instance_id = args[2]
return ("servers", context, instance_id)
+ @staticmethod
+ def replace_uuid_with_id(args, kwargs, replacement_id):
+ """
+ Extracts the UUID parameter from the arg or kwarg list and replaces
+ it with an integer ID.
+ """
+ if 'instance_id' in kwargs:
+ kwargs['instance_id'] = replacement_id
+ elif len(args) > 1:
+ args.pop(2)
+ args.insert(2, replacement_id)
+
def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 87cdef11d..fc1b3142a 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -39,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000,
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
- def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
+ def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone']
@@ -75,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler):
" for this request. Is the appropriate"
" service running?"))
+ def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
+ return self._schedule_instance(context, instance_id, *_args, **_kwargs)
+
+ def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
+ return self._schedule_instance(context, instance_id, *_args, **_kwargs)
+
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index a10fb7433..f8d158ddd 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -39,7 +39,6 @@ from nova.api.openstack import limits
from nova.auth.manager import User, Project
import nova.image.fake
from nova.image import glance
-from nova.image import local
from nova.image import service
from nova.tests import fake_flags
from nova.wsgi import Router
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 60914c0a3..697c62e5c 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -128,6 +128,11 @@ class ResourceExtensionTest(unittest.TestCase):
self.assertEqual(response_body, response.body)
+class InvalidExtension(object):
+ def get_alias(self):
+ return "THIRD"
+
+
class ExtensionManagerTest(unittest.TestCase):
response_body = "Try to say this Mr. Knox, sir..."
@@ -144,6 +149,14 @@ class ExtensionManagerTest(unittest.TestCase):
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
+ def test_invalid_extensions(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ ext_mgr = ext_midware.ext_mgr
+ ext_mgr.add_extension(InvalidExtension())
+ self.assertTrue('FOXNSOX' in ext_mgr.extensions)
+ self.assertTrue('THIRD' not in ext_mgr.extensions)
+
class ActionExtensionTest(unittest.TestCase):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index be777df9b..e4204809f 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -135,36 +135,6 @@ class _BaseImageServiceTests(test.TestCase):
return fixture
-class LocalImageServiceTest(_BaseImageServiceTests):
-
- """Tests the local image service"""
-
- def setUp(self):
- super(LocalImageServiceTest, self).setUp()
- self.tempdir = tempfile.mkdtemp()
- self.flags(images_path=self.tempdir)
- self.stubs = stubout.StubOutForTesting()
- service_class = 'nova.image.local.LocalImageService'
- self.service = utils.import_object(service_class)
- self.context = context.RequestContext(None, None)
-
- def tearDown(self):
- shutil.rmtree(self.tempdir)
- self.stubs.UnsetAll()
- super(LocalImageServiceTest, self).tearDown()
-
- def test_get_all_ids_with_incorrect_directory_formats(self):
- # create some old-style image directories (starting with 'ami-')
- for x in [1, 2, 3]:
- tempfile.mkstemp(prefix='ami-', dir=self.tempdir)
- # create some valid image directories names
- for x in ["1485baed", "1a60f0ee", "3123a73d"]:
- os.makedirs(os.path.join(self.tempdir, x))
- found_image_ids = self.service._ids()
- self.assertEqual(True, isinstance(found_image_ids, list))
- self.assertEqual(3, len(found_image_ids), len(found_image_ids))
-
-
class GlanceImageServiceTest(_BaseImageServiceTests):
"""Tests the Glance image service, in particular that metadata translation
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 01613d1d8..38c959fae 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -672,8 +672,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
"""Only POSTs should work."""
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
- request = webob.Request.blank("/")
- request.body = self._request_data("GET", "/something")
+ request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 405)
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
index b583d40fe..0431e68d2 100644
--- a/nova/tests/api/openstack/test_server_metadata.py
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -21,6 +21,7 @@ import unittest
import webob
+from nova import exception
from nova import flags
from nova.api import openstack
from nova.tests.api.openstack import fakes
@@ -67,6 +68,14 @@ def stub_max_server_metadata():
return metadata
+def return_server(context, server_id):
+ return {'id': server_id}
+
+
+def return_server_nonexistant(context, server_id):
+ raise exception.InstanceNotFound()
+
+
class ServerMetaDataTest(unittest.TestCase):
def setUp(self):
@@ -76,6 +85,7 @@ class ServerMetaDataTest(unittest.TestCase):
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
def tearDown(self):
self.stubs.UnsetAll()
@@ -92,6 +102,13 @@ class ServerMetaDataTest(unittest.TestCase):
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
+ def test_index_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
@@ -114,13 +131,19 @@ class ServerMetaDataTest(unittest.TestCase):
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
+ def test_show_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_show_meta_not_found(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key6')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
@@ -132,6 +155,14 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
+ def test_delete_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -141,8 +172,8 @@ class ServerMetaDataTest(unittest.TestCase):
req.body = '{"metadata": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
@@ -156,6 +187,16 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
+ def test_create_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/100/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -170,6 +211,16 @@ class ServerMetaDataTest(unittest.TestCase):
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
+ def test_update_item_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/asdf/100/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_update_item_empty_body(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 8357df594..b53c6c9be 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -49,10 +49,22 @@ FLAGS = flags.FLAGS
FLAGS.verbose = True
-def return_server(context, id):
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_server_by_id(context, id):
return stub_instance(id)
+def return_server_by_uuid(context, uuid):
+ id = 1
+ return stub_instance(id, uuid=uuid)
+
+
def return_server_with_addresses(private, public):
def _return_server(context, id):
return stub_instance(id, private_address=private,
@@ -111,7 +123,8 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
- host=None, power_state=0, reservation_id=""):
+ host=None, power_state=0, reservation_id="",
+ uuid=FAKE_UUID):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -129,7 +142,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
server_name = "reservation_%s" % (reservation_id, )
instance = {
- "id": id,
+ "id": int(id),
"admin_pass": "",
"user_id": user_id,
"project_id": "",
@@ -157,7 +170,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"display_name": server_name,
"display_description": "",
"locked": False,
- "metadata": metadata}
+ "metadata": metadata,
+ "uuid": uuid}
instance["fixed_ip"] = {
"address": private_address,
@@ -196,8 +210,11 @@ class ServersTest(test.TestCase):
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
+ self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
@@ -229,6 +246,36 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
+ def test_get_server_by_uuid(self):
+ """
+ The steps involved with resolving a UUID are pretty complicated;
+ here's what's happening in this scenario:
+
+ 1. Show is calling `routing_get`
+
+ 2. `routing_get` is wrapped by `reroute_compute` which does the work
+ of resolving requests to child zones.
+
+ 3. `reroute_compute` looks up the UUID by hitting the stub
+ (returns_server_by_uuid)
+
+ 4. Since the stub return that the record exists, `reroute_compute`
+ considers the request to be 'zone local', so it replaces the UUID
+ in the argument list with an integer ID and then calls the inner
+ function ('get').
+
+ 5. The call to `get` hits the other stub 'returns_server_by_id` which
+ has the UUID set to FAKE_UUID
+
+ So, counterintuitively, we call `get` twice on the `show` command.
+ """
+ req = webob.Request.blank('/v1.0/servers/%s' % FAKE_UUID)
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['uuid'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+
def test_get_server_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1')
res = req.get_response(fakes.wsgi_app())
@@ -540,7 +587,8 @@ class ServersTest(test.TestCase):
def _setup_for_create_instance(self):
"""Shared implementation for tests below that create instance"""
def instance_create(context, inst):
- return {'id': '1', 'display_name': 'server_test'}
+ return {'id': 1, 'display_name': 'server_test',
+ 'uuid': FAKE_UUID}
def server_update(context, id, params):
return instance_create(context, id)
@@ -594,11 +642,22 @@ class ServersTest(test.TestCase):
self.assertEqual(1, server['id'])
self.assertEqual(2, server['flavorId'])
self.assertEqual(3, server['imageId'])
+ self.assertEqual(FAKE_UUID, server['uuid'])
self.assertEqual(res.status_int, 200)
def test_create_instance(self):
self._test_create_instance_helper()
+ def test_create_instance_has_uuid(self):
+ """Tests at the db-layer instead of API layer since that's where the
+ UUID is generated
+ """
+ ctxt = context.RequestContext(1, 1)
+ values = {}
+ instance = nova.db.api.instance_create(ctxt, values)
+ expected = FAKE_UUID
+ self.assertEqual(instance['uuid'], expected)
+
def test_create_instance_via_zones(self):
"""Server generated ReservationID"""
self._setup_for_create_instance()
@@ -1443,7 +1502,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 400)
def test_resized_server_has_correct_status(self):
- req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3)))
+ req = self.webreq('/1', 'GET')
def fake_migration_get(*args):
return {}
@@ -1850,7 +1909,8 @@ class TestServerInstanceCreation(test.TestCase):
self.injected_files = kwargs['injected_files']
else:
self.injected_files = None
- return [{'id': '1234', 'display_name': 'fakeinstance'}]
+ return [{'id': '1234', 'display_name': 'fakeinstance',
+ 'uuid': FAKE_UUID}]
def set_admin_password(self, *args, **kwargs):
pass
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index 2fa50ac9b..73a26a087 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -10,13 +10,13 @@ from nova.api.openstack import wsgi
class RequestTest(test.TestCase):
def test_content_type_missing(self):
- request = wsgi.Request.blank('/tests/123')
+ request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_unsupported(self):
- request = wsgi.Request.blank('/tests/123')
+ request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index ecefc464a..2297d2f0e 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True)
-FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
+FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
index eb9a3056e..76c03c5fa 100644
--- a/nova/tests/integrated/api/client.py
+++ b/nova/tests/integrated/api/client.py
@@ -221,30 +221,30 @@ class TestOpenStackClient(object):
return self.api_delete('/flavors/%s' % flavor_id)
def get_volume(self, volume_id):
- return self.api_get('/volumes/%s' % volume_id)['volume']
+ return self.api_get('/os-volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
- rel_url = '/volumes/detail' if detail else '/volumes'
+ rel_url = '/os-volumes/detail' if detail else '/os-volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
- return self.api_post('/volumes', volume)['volume']
+ return self.api_post('/os-volumes', volume)['volume']
def delete_volume(self, volume_id):
- return self.api_delete('/volumes/%s' % volume_id)
+ return self.api_delete('/os-volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
- return self.api_get('/servers/%s/volume_attachments/%s' %
+ return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))['volumeAttachment']
def get_server_volumes(self, server_id):
- return self.api_get('/servers/%s/volume_attachments' %
+ return self.api_get('/servers/%s/os-volume_attachments' %
(server_id))['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
- return self.api_post('/servers/%s/volume_attachments' %
+ return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment)['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
- return self.api_delete('/servers/%s/volume_attachments/%s' %
+ return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 0d7929996..4be59d411 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('instances_path', 'nova.compute.manager')
+FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
def schedule(context, topic, *args, **kwargs):
@@ -926,12 +930,23 @@ def zone_get_all(context):
]
+def fake_instance_get_by_uuid(context, uuid):
+ if FAKE_UUID_NOT_FOUND:
+ raise exception.InstanceNotFound(instance_id=uuid)
+ else:
+ return {'id': 1}
+
+
class FakeRerouteCompute(api.reroute_compute):
+ def __init__(self, method_name, id_to_return=1):
+ super(FakeRerouteCompute, self).__init__(method_name)
+ self.id_to_return = id_to_return
+
def _call_child_zones(self, zones, function):
return []
def get_collection_context_and_id(self, args, kwargs):
- return ("servers", None, 1)
+ return ("servers", None, self.id_to_return)
def unmarshall_result(self, zone_responses):
return dict(magic="found me")
@@ -960,6 +975,8 @@ class ZoneRedirectTest(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.enable_zone_routing = FLAGS.enable_zone_routing
FLAGS.enable_zone_routing = True
@@ -976,8 +993,19 @@ class ZoneRedirectTest(test.TestCase):
except api.RedirectResult, e:
self.fail(_("Successful database hit should succeed"))
- def test_trap_not_found_locally(self):
+ def test_trap_not_found_locally_id_passed(self):
+ """When an integer ID is not found locally, we cannot reroute to
+ another zone, so just return InstanceNotFound exception
+ """
decorator = FakeRerouteCompute("foo")
+ self.assertRaises(exception.InstanceNotFound,
+ decorator(go_boom), None, None, 1)
+
+ def test_trap_not_found_locally_uuid_passed(self):
+ """When a UUID is found, if the item isn't found locally, we should
+ try to reroute to a child zone to see if they have it
+ """
+ decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND)
try:
result = decorator(go_boom)(None, None, 1)
self.assertFail(_("Should have rerouted."))
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 7c0331eff..20b20fcbf 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -89,7 +89,7 @@ class FakeHttplibConnection(object):
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
- conv = apirequest._try_convert
+ conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('False'), False)
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index d2ff14f27..6327734f5 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -35,7 +35,7 @@ from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
-from nova.image import local
+from nova.image import fake
FLAGS = flags.FLAGS
@@ -56,6 +56,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
+ self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
@@ -69,8 +70,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast
@@ -303,7 +304,7 @@ class CloudTestCase(test.TestCase):
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
- self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
@@ -317,8 +318,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual(2, len(result3['imagesSet']))
# provide an non-existing image_id
self.stubs.UnsetAll()
- self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
@@ -329,8 +330,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True}
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
@@ -345,9 +346,9 @@ class CloudTestCase(test.TestCase):
def fake_update(meh, context, image_id, metadata, data=None):
return metadata
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
- self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
- self.stubs.Set(local.LocalImageService, 'update', fake_update)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
@@ -359,7 +360,7 @@ class CloudTestCase(test.TestCase):
def fake_delete(self, context, id):
return None
- self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertEqual(result['imageId'], 'ami-00000001')
@@ -369,18 +370,25 @@ class CloudTestCase(test.TestCase):
def fake_detail_empty(self, context):
return []
- self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
- def test_console_output(self):
- instance_type = FLAGS.default_instance_type
- max_count = 1
- kwargs = {'image_id': 'ami-1',
- 'instance_type': instance_type,
- 'max_count': max_count}
+ def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def _run_instance_wait(self, **kwargs):
+ ec2_instance_id = self._run_instance(**kwargs)
+ self._wait_for_running(ec2_instance_id)
+ return ec2_instance_id
+
+ def test_console_output(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=FLAGS.default_instance_type,
+ max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
@@ -389,9 +397,7 @@ class CloudTestCase(test.TestCase):
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_ajax_console(self):
- kwargs = {'image_id': 'ami-1'}
- rv = self.cloud.run_instances(self.context, **kwargs)
- instance_id = rv['instancesSet'][0]['instanceId']
+ instance_id = self._run_instance(image_id='ami-1')
output = self.cloud.get_ajax_console(context=self.context,
instance_id=[instance_id])
self.assertEquals(output['url'],
@@ -486,7 +492,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}}
self.stubs.UnsetAll()
- self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@@ -501,7 +507,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
- self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@@ -515,7 +521,7 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'status': 'active'}
- self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
@@ -544,7 +550,9 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {})
- self.cloud.update_instance(self.context, inst['id'],
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
+ self.cloud.update_instance(self.context, ec2_id,
+ display_name='c00l 1m4g3',
mac_address='DE:AD:BE:EF')
inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address'])
@@ -567,3 +575,299 @@ class CloudTestCase(test.TestCase):
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
db.volume_destroy(self.context, vol['id'])
+
+ def _restart_compute_service(self, periodic_interval=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval:
+ self.compute = self.start_service(
+ 'compute', periodic_interval=periodic_interval)
+ else:
+ self.compute = self.start_service('compute')
+
+ def _wait_for_state(self, ctxt, instance_id, predicate):
+ """Wait for an stopping instance to be a given state"""
+ id = ec2utils.ec2_id_to_id(instance_id)
+ while True:
+ info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
+ LOG.debug(info)
+ if predicate(info):
+ break
+ greenthread.sleep(1)
+
+ def _wait_for_running(self, instance_id):
+ def is_running(info):
+ return info['state_description'] == 'running'
+ self._wait_for_state(self.context, instance_id, is_running)
+
+ def _wait_for_stopped(self, instance_id):
+ def is_stopped(info):
+ return info['state_description'] == 'stopped'
+ self._wait_for_state(self.context, instance_id, is_stopped)
+
+ def _wait_for_terminate(self, instance_id):
+ def is_deleted(info):
+ return info['deleted']
+ elevated = self.context.elevated(read_deleted=True)
+ self._wait_for_state(elevated, instance_id, is_deleted)
+
+ def test_stop_start_instance(self):
+ """Makes sure stop/start instance works"""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1, }
+ instance_id = self._run_instance_wait(**kwargs)
+
+ # a running instance can't be started. It is just ignored.
+ result = self.cloud.start_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+ self._wait_for_stopped(instance_id)
+
+ result = self.cloud.start_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+ self._wait_for_running(instance_id)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+ self._wait_for_stopped(instance_id)
+
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
+ self.assertTrue(result)
+
+ self._restart_compute_service()
+
+ def _volume_create(self):
+ kwargs = {'status': 'available',
+ 'host': self.volume.host,
+ 'size': 1,
+ 'attach_status': 'detached', }
+ return db.volume_create(self.context, kwargs)
+
+ def _assert_volume_attached(self, vol, instance_id, mountpoint):
+ self.assertEqual(vol['instance_id'], instance_id)
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ def _assert_volume_detached(self, vol):
+ self.assertEqual(vol['instance_id'], None)
+ self.assertEqual(vol['mountpoint'], None)
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+
+ def test_stop_start_with_volume(self):
+ """Make sure run instance with block device mapping works"""
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ vol1 = self._volume_create()
+ vol2 = self._volume_create()
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'volume_id': vol1['id'],
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'volume_id': vol2['id'],
+ 'delete_on_termination': True, },
+ ]}
+ ec2_instance_id = self._run_instance_wait(**kwargs)
+ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
+
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
+
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdb')
+
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdc')
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+ self._wait_for_stopped(ec2_instance_id)
+
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_detached(vol)
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_detached(vol)
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ self._wait_for_running(ec2_instance_id)
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
+ self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
+ vol['mountpoint'] == '/dev/vdc')
+ self.assertEqual(vol['instance_id'], instance_id)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ greenthread.sleep(0.3)
+
+ admin_ctxt = context.get_admin_context(read_deleted=False)
+ vol = db.volume_get(admin_ctxt, vol1['id'])
+ self.assertFalse(vol['deleted'])
+ db.volume_destroy(self.context, vol1['id'])
+
+ greenthread.sleep(0.3)
+ admin_ctxt = context.get_admin_context(read_deleted=True)
+ vol = db.volume_get(admin_ctxt, vol2['id'])
+ self.assertTrue(vol['deleted'])
+
+ self._restart_compute_service()
+
+ def test_stop_with_attached_volume(self):
+ """Make sure attach info is reflected to block device mapping"""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ vol1 = self._volume_create()
+ vol2 = self._volume_create()
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'volume_id': vol1['id'],
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance_wait(**kwargs)
+ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
+
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol1['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdb')
+
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_detached(vol)
+
+ self.cloud.compute_api.attach_volume(self.context,
+ instance_id=instance_id,
+ volume_id=vol2['id'],
+ device='/dev/vdc')
+ greenthread.sleep(0.3)
+ vol = db.volume_get(self.context, vol2['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdc')
+
+ self.cloud.compute_api.detach_volume(self.context,
+ volume_id=vol1['id'])
+ greenthread.sleep(0.3)
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_detached(vol)
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+ self._wait_for_stopped(ec2_instance_id)
+
+ for vol_id in (vol1['id'], vol2['id']):
+ vol = db.volume_get(self.context, vol_id)
+ self._assert_volume_detached(vol)
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ self._wait_for_running(ec2_instance_id)
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol2['id'])
+ self._assert_volume_attached(vol, instance_id, '/dev/vdc')
+
+ vol = db.volume_get(self.context, vol1['id'])
+ self._assert_volume_detached(vol)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ greenthread.sleep(0.3)
+
+ for vol_id in (vol1['id'], vol2['id']):
+ vol = db.volume_get(self.context, vol_id)
+ self.assertEqual(vol['id'], vol_id)
+ self._assert_volume_detached(vol)
+ db.volume_destroy(self.context, vol_id)
+
+ self._restart_compute_service()
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ greenthread.sleep(0.3)
+ return result['snapshotId']
+
+ def test_run_with_snapshot(self):
+ """Makes sure run/stop/start instance with snapshot works."""
+ vol = self._volume_create()
+ ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
+
+ ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
+ snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
+ ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
+ snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'snapshot_id': snapshot1_id,
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'snapshot_id': snapshot2_id,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance_wait(**kwargs)
+ instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
+
+ vols = db.volume_get_all_by_instance(self.context, instance_id)
+ self.assertEqual(len(vols), 2)
+ vol1_id = None
+ vol2_id = None
+ for vol in vols:
+ snapshot_id = vol['snapshot_id']
+ if snapshot_id == snapshot1_id:
+ vol1_id = vol['id']
+ mountpoint = '/dev/vdb'
+ elif snapshot_id == snapshot2_id:
+ vol2_id = vol['id']
+ mountpoint = '/dev/vdc'
+ else:
+ self.fail()
+
+ self._assert_volume_attached(vol, instance_id, mountpoint)
+
+ self.assertTrue(vol1_id)
+ self.assertTrue(vol2_id)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ greenthread.sleep(0.3)
+ self._wait_for_terminate(ec2_instance_id)
+
+ greenthread.sleep(0.3)
+ admin_ctxt = context.get_admin_context(read_deleted=False)
+ vol = db.volume_get(admin_ctxt, vol1_id)
+ self._assert_volume_detached(vol)
+ self.assertFalse(vol['deleted'])
+ db.volume_destroy(self.context, vol1_id)
+
+ greenthread.sleep(0.3)
+ admin_ctxt = context.get_admin_context(read_deleted=True)
+ vol = db.volume_get(admin_ctxt, vol2_id)
+ self.assertTrue(vol['deleted'])
+
+ for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
+ self.cloud.delete_snapshot(self.context, snapshot_id)
+ greenthread.sleep(0.3)
+ db.volume_destroy(self.context, vol['id'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index b4ac2dbc4..439508b27 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -22,21 +22,21 @@ Tests For Compute
import mox
import stubout
+from nova.auth import manager
from nova import compute
+from nova.compute import instance_types
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
from nova import context
from nova import db
+from nova.db.sqlalchemy import models
from nova import exception
from nova import flags
+import nova.image.fake
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
-from nova.auth import manager
-from nova.compute import instance_types
-from nova.compute import manager as compute_manager
-from nova.compute import power_state
-from nova.db.sqlalchemy import models
-from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
- self.stubs.Set(local.LocalImageService, 'show', fake_show)
+ self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def tearDown(self):
self.manager.delete_user(self.user)
@@ -228,6 +228,21 @@ class ComputeTestCase(test.TestCase):
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
+ def test_stop(self):
+ """Ensure instance can be stopped"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.stop_instance(self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_start(self):
+ """Ensure instance can be started"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.stop_instance(self.context, instance_id)
+ self.compute.start_instance(self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_pause(self):
"""Ensure instance can be paused"""
instance_id = self._create_instance()
@@ -266,6 +281,14 @@ class ComputeTestCase(test.TestCase):
"File Contents")
self.compute.terminate_instance(self.context, instance_id)
+ def test_agent_update(self):
+ """Ensure instance can have its agent updated"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.agent_update(self.context, instance_id,
+ 'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff')
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 8f7e83c3e..3a3f914e4 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -275,3 +275,21 @@ class GenericUtilsTestCase(test.TestCase):
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
+
+
+class IsUUIDLikeTestCase(test.TestCase):
+ def assertUUIDLike(self, val, expected):
+ result = utils.is_uuid_like(val)
+ self.assertEqual(result, expected)
+
+ def test_good_uuid(self):
+ val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ self.assertUUIDLike(val, True)
+
+ def test_integer_passed(self):
+ val = 1
+ self.assertUUIDLike(val, False)
+
+ def test_non_uuid_string_passed(self):
+ val = 'foo-fooo'
+ self.assertUUIDLike(val, False)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index d1c88287a..d9a514745 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -33,12 +33,12 @@ from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
+from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi.vmops import SimpleDH
-from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
@@ -84,7 +84,8 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -191,7 +192,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
- self.stubs.Set(VMOps, 'reset_network', reset_network)
+ self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
@@ -211,7 +212,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
@@ -228,6 +230,23 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
+ def test_instance_snapshot_fails_with_no_primary_vdi(self):
+ def create_bad_vbd(vm_ref, vdi_ref):
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'userdevice': 'fake',
+ 'currently_attached': False}
+ vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
+ xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+ self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
+ stubs.stubout_instance_snapshot(self.stubs)
+ instance = self._create_instance()
+
+ name = "MySnapshot"
+ self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
+
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
@@ -352,7 +371,8 @@ class XenAPIVMTestCase(test.TestCase):
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
- instance_id=1, check_injection=False):
+ architecture="x86-64", instance_id=1,
+ check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
'project_id': self.project.id,
@@ -362,11 +382,14 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': os_type}
+ 'os_type': os_type,
+ 'architecture': architecture}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
+ self.assertTrue(instance.os_type)
+ self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -391,7 +414,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
- os_type="linux")
+ os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
@@ -420,7 +443,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
- os_type="windows")
+ os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
@@ -571,7 +594,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
return instance
@@ -581,8 +605,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
- self.alice = SimpleDH()
- self.bob = SimpleDH()
+ self.alice = vmops.SimpleDH()
+ self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
@@ -646,7 +670,8 @@ class XenAPIMigrateInstance(test.TestCase):
'local_gb': 5,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
@@ -685,6 +710,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
+ self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(
@@ -729,6 +755,28 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
+class CompareVersionTestCase(test.TestCase):
+ def test_less_than(self):
+ """Test that cmp_version compares a as less than b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
+
+ def test_greater_than(self):
+ """Test that cmp_version compares a as greater than b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
+
+ def test_equal(self):
+ """Test that cmp_version compares a as equal to b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
+
+ def test_non_lexical(self):
+ """Test that cmp_version compares non-lexically"""
+ self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
+
+ def test_length(self):
+ """Test that cmp_version compares by length as last resort"""
+ self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
+
+
class FakeXenApi(object):
"""Fake XenApi for testing HostState."""
diff --git a/nova/utils.py b/nova/utils.py
index 691134ada..e2ac16f31 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -35,6 +35,7 @@ import struct
import sys
import time
import types
+import uuid
from xml.sax import saxutils
from eventlet import event
@@ -726,3 +727,17 @@ def parse_server_string(server_str):
except:
LOG.debug(_('Invalid server_string: %s' % server_str))
return ('', '')
+
+
+def gen_uuid():
+ return uuid.uuid4()
+
+
+def is_uuid_like(val):
+ """For our purposes, a UUID is a string in canoical form:
+
+ aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
+ """
+ if not isinstance(val, basestring):
+ return False
+ return (len(val) == 36) and (val.count('-') == 4)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index eb9626d08..8dd7057a3 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -61,7 +61,7 @@ class ComputeDriver(object):
"""Return a list of InstanceInfo for all registered VMs"""
raise NotImplementedError()
- def spawn(self, instance, network_info=None):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Launch a VM for the specified instance"""
raise NotImplementedError()
@@ -234,6 +234,10 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def agent_update(self, instance, url, md5hash):
+ """Update agent on the VM instance."""
+ raise NotImplementedError()
+
def inject_network_info(self, instance):
"""inject network info for specified instance"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 0225797d7..94aecec24 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -129,7 +129,7 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -225,6 +225,21 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
+ def agent_update(self, instance, url, md5hash):
+ """
+ Update agent on the specified instance.
+
+ The first parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name. The second
+ parameter is the URL of the agent to be fetched and updated on the
+ instance; the third is the md5 hash of the file for verification
+ purposes.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+ """
+ pass
+
def rescue(self, instance):
"""
Rescue the specified instance.
@@ -237,6 +252,10 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ pass
+
def migrate_disk_and_power_off(self, instance, dest):
"""
Transfers the disk of a running instance in multiple phases, turning
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 05b4775c1..772e7eb59 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 20986d4d5..e1a683da8 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -67,11 +67,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#else
+ #if not ($getVar('ebs_root', False))
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
+ #end if
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
@@ -79,6 +81,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
+ #for $vol in $volumes
+ <disk type='block'>
+ <driver type='raw'/>
+ <source dev='${vol.device_path}'/>
+ <target dev='${vol.mount_device}' bus='${disk_bus}'/>
+ </disk>
+ #end for
#end if
#end if
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 98cdff311..96ef92825 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -40,6 +40,7 @@ import hashlib
import multiprocessing
import os
import random
+import re
import shutil
import subprocess
import sys
@@ -148,6 +149,10 @@ def _late_load_cheetah():
Template = t.Template
+def _strip_dev(mount_path):
+ return re.sub(r'^/dev/', '', mount_path)
+
+
class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
@@ -575,11 +580,14 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception
- def spawn(self, instance, network_info=None):
- xml = self.to_xml(instance, False, network_info)
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
+ xml = self.to_xml(instance, False, network_info=network_info,
+ block_device_mapping=block_device_mapping)
+ block_device_mapping = block_device_mapping or []
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info=network_info)
+ self._create_image(instance, xml, network_info=network_info,
+ block_device_mapping=block_device_mapping)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -761,7 +769,8 @@ class LibvirtConnection(driver.ComputeDriver):
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
- network_info=None):
+ network_info=None, block_device_mapping=None):
+ block_device_mapping = block_device_mapping or []
if not network_info:
network_info = netutils.get_network_info(inst)
@@ -824,16 +833,19 @@ class LibvirtConnection(driver.ComputeDriver):
size = None
root_fname += "_sm"
- self._cache_image(fn=self._fetch_image,
- target=basepath('disk'),
- fname=root_fname,
- cow=FLAGS.use_cow_images,
- image_id=disk_images['image_id'],
- user=user,
- project=project,
- size=size)
+ if not self._volume_in_mapping(self.root_mount_device,
+ block_device_mapping):
+ self._cache_image(fn=self._fetch_image,
+ target=basepath('disk'),
+ fname=root_fname,
+ cow=FLAGS.use_cow_images,
+ image_id=disk_images['image_id'],
+ user=user,
+ project=project,
+ size=size)
- if inst_type['local_gb']:
+ if inst_type['local_gb'] and not self._volume_in_mapping(
+ self.local_mount_device, block_device_mapping):
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
fname="local_%s" % inst_type['local_gb'],
@@ -948,7 +960,20 @@ class LibvirtConnection(driver.ComputeDriver):
return result
- def _prepare_xml_info(self, instance, rescue=False, network_info=None):
+ root_mount_device = 'vda' # FIXME for now. it's hard coded.
+ local_mount_device = 'vdb' # FIXME for now. it's hard coded.
+
+ def _volume_in_mapping(self, mount_device, block_device_mapping):
+ mount_device_ = _strip_dev(mount_device)
+ for vol in block_device_mapping:
+ vol_mount_device = _strip_dev(vol['mount_device'])
+ if vol_mount_device == mount_device_:
+ return True
+ return False
+
+ def _prepare_xml_info(self, instance, rescue=False, network_info=None,
+ block_device_mapping=None):
+ block_device_mapping = block_device_mapping or []
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -966,6 +991,16 @@ class LibvirtConnection(driver.ComputeDriver):
else:
driver_type = 'raw'
+ for vol in block_device_mapping:
+ vol['mount_device'] = _strip_dev(vol['mount_device'])
+ ebs_root = self._volume_in_mapping(self.root_mount_device,
+ block_device_mapping)
+ if self._volume_in_mapping(self.local_mount_device,
+ block_device_mapping):
+ local_gb = False
+ else:
+ local_gb = inst_type['local_gb']
+
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@@ -973,9 +1008,11 @@ class LibvirtConnection(driver.ComputeDriver):
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
'rescue': rescue,
- 'local': inst_type['local_gb'],
+ 'local': local_gb,
'driver_type': driver_type,
- 'nics': nics}
+ 'nics': nics,
+ 'ebs_root': ebs_root,
+ 'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc':
@@ -991,10 +1028,13 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['disk'] = xml_info['basepath'] + "/disk"
return xml_info
- def to_xml(self, instance, rescue=False, network_info=None):
+ def to_xml(self, instance, rescue=False, network_info=None,
+ block_device_mapping=None):
+ block_device_mapping = block_device_mapping or []
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- xml_info = self._prepare_xml_info(instance, rescue, network_info)
+ xml_info = self._prepare_xml_info(instance, rescue, network_info,
+ block_device_mapping)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 48edc5384..70adba74f 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -90,8 +90,6 @@ def fetch_image(image, instance, **kwargs):
func = _get_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _get_s3_image
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- func = _get_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@@ -105,8 +103,6 @@ def upload_image(image, instance, **kwargs):
func = _put_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _put_s3_image
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- func = _put_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@@ -192,8 +188,6 @@ def get_vmdk_size_and_properties(image, instance):
size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- raise NotImplementedError
LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
locals())
return size, properties
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 1c6d2572d..3c6345ec8 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -124,7 +124,7 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(instance)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 113198689..d5ac39473 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -146,6 +146,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
def create_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
+ 'userdevice': '0',
'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 11da221f2..f91958c57 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -283,19 +283,16 @@ class VMHelper(HelperBase):
@classmethod
def get_vdi_for_vm_safely(cls, session, vm_ref):
- vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
- if vdi_refs is None:
- raise Exception(_("No VDIs found for VM %s") % vm_ref)
- else:
- num_vdis = len(vdi_refs)
- if num_vdis != 1:
- raise Exception(
- _("Unexpected number of VDIs (%(num_vdis)s) found"
- " for VM %(vm_ref)s") % locals())
-
- vdi_ref = vdi_refs[0]
- vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
- return vdi_ref, vdi_rec
+ """Retrieves the primary VDI for a VM"""
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ for vbd in vbd_refs:
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ # Convention dictates the primary VDI will be userdevice 0
+ if vbd_rec['userdevice'] == '0':
+ vdi_rec = session.get_xenapi().VDI.get_record(vbd_rec['VDI'])
+ return vbd_rec['VDI'], vdi_rec
+ raise exception.Error(_("No primary VDI found for"
+ "%(vm_ref)s") % locals())
@classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
@@ -329,12 +326,6 @@ class VMHelper(HelperBase):
return template_vm_ref, template_vdi_uuids
@classmethod
- def get_sr(cls, session, sr_label='slices'):
- """Finds the SR named by the given name label and returns
- the UUID"""
- return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
-
- @classmethod
def get_sr_path(cls, session):
"""Return the path to our storage repository
@@ -789,8 +780,7 @@ class VMHelper(HelperBase):
@classmethod
def scan_default_sr(cls, session):
"""Looks for the system default SR and triggers a re-scan"""
- #FIXME(sirp/mdietz): refactor scan_default_sr in there
- sr_ref = cls.get_sr(session)
+ sr_ref = find_sr(session)
session.call_xenapi('SR.scan', sr_ref)
@@ -882,7 +872,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
- raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
+ raise exception.Exception(_("Unexpected number of VDIs"
+ "(%(num_vdis)s) found"
" for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d105cf300..2f4286184 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -47,6 +47,21 @@ LOG = logging.getLogger("nova.virt.xenapi.vmops")
FLAGS = flags.FLAGS
+def cmp_version(a, b):
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ a = a.split('.')
+ b = b.split('.')
+
+ # Compare each individual portion of both version strings
+ for va, vb in zip(a, b):
+ ret = int(va) - int(vb)
+ if ret:
+ return ret
+
+ # Fallback to comparing length last
+ return len(a) - len(b)
+
+
class VMOps(object):
"""
Management class for VM-related tasks
@@ -218,6 +233,34 @@ class VMOps(object):
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
+ ctx = context.get_admin_context()
+ agent_build = db.agent_build_get_by_triple(ctx, 'xen',
+ instance.os_type, instance.architecture)
+ if agent_build:
+ LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' + \
+ '/%(architecture)s is %(version)s') % agent_build)
+ else:
+ LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' + \
+ '/%(architecture)s') % {
+ 'hypervisor': 'xen',
+ 'os': instance.os_type,
+ 'architecture': instance.architecture})
+
+ def _check_agent_version():
+ version = self.get_agent_version(instance)
+ if not version:
+ LOG.info(_('No agent version returned by instance'))
+ return
+
+ LOG.info(_('Instance agent version: %s') % version)
+ if not agent_build:
+ return
+
+ if cmp_version(version, agent_build['version']) < 0:
+ LOG.info(_('Updating Agent to %s') % agent_build['version'])
+ self.agent_update(instance, agent_build['url'],
+ agent_build['md5hash'])
+
def _inject_files():
injected_files = instance.injected_files
if injected_files:
@@ -252,6 +295,7 @@ class VMOps(object):
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
+ _check_agent_version()
_inject_files()
_set_admin_password()
return True
@@ -458,6 +502,34 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
self._session.wait_for_task(task, instance.id)
+ def get_agent_version(self, instance):
+ """Get the version of the agent running on the VM instance."""
+
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id}
+ resp = self._make_agent_call('version', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ return resp_dict['message']
+
+ def agent_update(self, instance, url, md5sum):
+ """Update agent on the VM instance."""
+
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id, 'url': url, 'md5sum': md5sum}
+ resp = self._make_agent_call('agentupdate', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ if resp_dict['returncode'] != '0':
+ raise RuntimeError(resp_dict['message'])
+ return resp_dict['message']
+
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance.
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 6d828e109..5fcec1715 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -194,7 +194,7 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance"""
self._vmops.spawn(instance)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index b07f2e94b..7d27abff9 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -21,6 +21,9 @@ Handles all requests relating to volumes.
"""
+from eventlet import greenthread
+
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -44,7 +47,8 @@ class API(base.Base):
if snapshot['status'] != "available":
raise exception.ApiError(
_("Snapshot status must be available"))
- size = snapshot['volume_size']
+ if not size:
+ size = snapshot['volume_size']
if quota.allowed_volumes(context, 1, size) < 1:
pid = context.project_id
@@ -73,6 +77,14 @@ class API(base.Base):
"snapshot_id": snapshot_id}})
return volume
+ # TODO(yamahata): eliminate dumb polling
+ def wait_creation(self, context, volume_id):
+ while True:
+ volume = self.get(context, volume_id)
+ if volume['status'] != 'creating':
+ return
+ greenthread.sleep(1)
+
def delete(self, context, volume_id):
volume = self.get(context, volume_id)
if volume['status'] != "available":
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 87e13277f..23e845deb 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -582,6 +582,14 @@ class FakeISCSIDriver(ISCSIDriver):
"""No setup necessary in fake mode."""
pass
+ def discover_volume(self, context, volume):
+ """Discover volume on a remote host."""
+ return "/dev/disk/by-path/volume-id-%d" % volume['id']
+
+ def undiscover_volume(self, volume):
+ """Undiscover volume on a remote host."""
+ pass
+
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index 9e761f264..b8a1b936a 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
@@ -53,6 +53,19 @@ class TimeoutError(StandardError):
pass
+def version(self, arg_dict):
+ """Get version of agent."""
+ arg_dict["value"] = json.dumps({"name": "version", "value": ""})
+ request_id = arg_dict["id"]
+ arg_dict["path"] = "data/host/%s" % request_id
+ xenstore.write_record(self, arg_dict)
+ try:
+ resp = _wait_for_agent(self, request_id, arg_dict)
+ except TimeoutError, e:
+ raise PluginError(e)
+ return resp
+
+
def key_init(self, arg_dict):
"""Handles the Diffie-Hellman key exchange with the agent to
establish the shared secret key used to encrypt/decrypt sensitive
@@ -144,6 +157,23 @@ def inject_file(self, arg_dict):
return resp
+def agent_update(self, arg_dict):
+ """Expects an URL and md5sum of the contents, then directs the agent to
+ update itself."""
+ request_id = arg_dict["id"]
+ url = arg_dict["url"]
+ md5sum = arg_dict["md5sum"]
+ arg_dict["value"] = json.dumps({"name": "agentupdate",
+ "value": {"url": url, "md5sum": md5sum}})
+ arg_dict["path"] = "data/host/%s" % request_id
+ xenstore.write_record(self, arg_dict)
+ try:
+ resp = _wait_for_agent(self, request_id, arg_dict)
+ except TimeoutError, e:
+ raise PluginError(e)
+ return resp
+
+
def _agent_has_method(self, method):
"""Check that the agent has a particular method by checking its
features. Cache the features so we don't have to query the agent
@@ -201,7 +231,9 @@ def _wait_for_agent(self, request_id, arg_dict):
if __name__ == "__main__":
XenAPIPlugin.dispatch(
- {"key_init": key_init,
+ {"version": version,
+ "key_init": key_init,
"password": password,
"resetnetwork": resetnetwork,
- "inject_file": inject_file})
+ "inject_file": inject_file,
+ "agentupdate": agent_update})
diff --git a/run_tests.py b/run_tests.py
index 0944bb585..bb33f9139 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -211,6 +211,12 @@ class NovaTestResult(result.TextTestResult):
break
sys.stdout = stdout
+ # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
+ # error results in it failing to be initialized later. Otherwise,
+ # _handleElapsedTime will fail, causing the wrong error message to
+ # be outputted.
+ self.start_time = time.time()
+
def getDescription(self, test):
return str(test)
diff --git a/tools/pip-requires b/tools/pip-requires
index 7849dbea9..0da473109 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -15,7 +15,7 @@ python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
routes==1.12.3
-WebOb==0.9.8
+WebOb==1.0.8
wsgiref==0.1.2
mox==0.5.3
greenlet==0.3.1