summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors3
-rwxr-xr-xbin/nova-all2
-rw-r--r--etc/nova/nova.conf.sample2
-rw-r--r--etc/nova/rootwrap.d/compute.filters11
-rw-r--r--etc/nova/rootwrap.d/network.filters11
-rw-r--r--nova/api/ec2/cloud.py8
-rw-r--r--nova/api/metadata/base.py24
-rw-r--r--nova/api/metadata/handler.py4
-rw-r--r--nova/api/openstack/compute/contrib/disk_config.py32
-rw-r--r--nova/api/openstack/compute/contrib/extended_server_attributes.py37
-rw-r--r--nova/api/openstack/compute/contrib/extended_status.py41
-rw-r--r--nova/api/openstack/compute/image_metadata.py4
-rw-r--r--nova/api/openstack/compute/images.py10
-rw-r--r--nova/api/openstack/compute/servers.py40
-rw-r--r--nova/api/openstack/compute/views/servers.py7
-rw-r--r--nova/api/openstack/wsgi.py46
-rw-r--r--nova/compute/api.py173
-rw-r--r--nova/compute/manager.py7
-rw-r--r--nova/consoleauth/manager.py2
-rw-r--r--nova/db/api.py14
-rw-r--r--nova/db/sqlalchemy/api.py150
-rw-r--r--nova/flags.py3
-rw-r--r--nova/image/__init__.py51
-rw-r--r--nova/image/glance.py58
-rw-r--r--nova/image/s3.py19
-rw-r--r--nova/network/l3.py2
-rw-r--r--nova/network/linux_net.py49
-rw-r--r--nova/network/manager.py4
-rw-r--r--nova/network/quantum/nova_ipam_lib.py19
-rw-r--r--nova/notifications.py2
-rw-r--r--nova/scheduler/driver.py2
-rw-r--r--nova/scheduler/filters/affinity_filter.py5
-rw-r--r--nova/scheduler/host_manager.py3
-rw-r--r--nova/test.py4
-rw-r--r--nova/tests/api/ec2/test_cloud.py65
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_disk_config.py7
-rw-r--r--nova/tests/api/openstack/compute/test_images.py400
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py3
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py19
-rw-r--r--nova/tests/api/openstack/compute/test_urlmap.py6
-rw-r--r--nova/tests/api/openstack/fakes.py20
-rw-r--r--nova/tests/api/openstack/test_wsgi.py23
-rw-r--r--nova/tests/api/test_validator.py2
-rw-r--r--nova/tests/compute/test_compute.py19
-rw-r--r--nova/tests/fake_flags.py1
-rw-r--r--nova/tests/fakelibvirt.py6
-rw-r--r--nova/tests/image/fake.py (renamed from nova/image/fake.py)40
-rw-r--r--nova/tests/image/test_fake.py (renamed from nova/tests/test_image.py)41
-rw-r--r--nova/tests/image/test_glance.py154
-rw-r--r--nova/tests/image/test_s3.py14
-rw-r--r--nova/tests/integrated/integrated_helpers.py9
-rw-r--r--nova/tests/network/test_linux_net.py20
-rw-r--r--nova/tests/network/test_quantum.py2
-rw-r--r--nova/tests/scheduler/fakes.py3
-rw-r--r--nova/tests/scheduler/test_host_filters.py26
-rw-r--r--nova/tests/scheduler/test_host_manager.py4
-rw-r--r--nova/tests/test_compute_utils.py5
-rw-r--r--nova/tests/test_db_api.py10
-rw-r--r--nova/tests/test_imagecache.py2
-rw-r--r--nova/tests/test_libvirt.py244
-rw-r--r--nova/tests/test_libvirt_config.py189
-rw-r--r--nova/tests/test_metadata.py16
-rw-r--r--nova/tests/test_notifications.py2
-rw-r--r--nova/tests/test_nova_rootwrap.py2
-rw-r--r--nova/tests/test_quota.py17
-rw-r--r--nova/tests/test_virt_drivers.py26
-rw-r--r--nova/tests/test_vmwareapi.py7
-rw-r--r--nova/tests/test_volume_utils.py3
-rw-r--r--nova/tests/test_xenapi.py19
-rw-r--r--nova/tests/utils.py4
-rw-r--r--nova/tests/xenapi/stubs.py9
-rw-r--r--nova/utils.py18
-rw-r--r--nova/virt/connection.py2
-rw-r--r--nova/virt/disk/guestfs.py1
-rw-r--r--nova/virt/images.py9
-rw-r--r--nova/virt/libvirt/__init__.py17
-rw-r--r--nova/virt/libvirt/config.py249
-rw-r--r--nova/virt/libvirt/driver.py (renamed from nova/virt/libvirt/connection.py)65
-rw-r--r--nova/virt/libvirt/vif.py2
-rw-r--r--nova/virt/vmwareapi/vmware_images.py5
-rw-r--r--nova/virt/xenapi/vm_utils.py9
-rw-r--r--nova/virt/xenapi/vmops.py1
-rw-r--r--nova/virt/xenapi/volume_utils.py1
-rw-r--r--nova/volume/iscsi.py2
-rw-r--r--setup.py1
-rw-r--r--tools/test-requires2
-rwxr-xr-xtools/xenserver/vm_vdi_cleaner.py119
89 files changed, 1386 insertions, 1423 deletions
diff --git a/Authors b/Authors
index fb6f98e02..183289785 100644
--- a/Authors
+++ b/Authors
@@ -44,10 +44,12 @@ Cory Wright <corywright@gmail.com>
Craig Vyvial <cp16net@gmail.com>
Dan Prince <dprince@redhat.com>
Dan Wendlandt <dan@nicira.com>
+Dan Smith <danms@us.ibm.com>
Daniel P. Berrange <berrange@redhat.com>
Dave Lapsley <dlapsley@nicira.com>
Dave Walker <Dave.Walker@canonical.com>
David Pravec <David.Pravec@danix.org>
+David Shrewsbury <shrewsbury.dave@gmail.com>
David Subiros <david.perez5@hp.com>
Dean Troyer <dtroyer@gmail.com>
Deepak Garg <deepak.garg@citrix.com>
@@ -193,6 +195,7 @@ Sascha Peilicke <saschpe@suse.de>
Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com>
Scott Moser <smoser@ubuntu.com>
Sean Dague <sdague@linux.vnet.ibm.com>
+Sean M. Collins <sean@coreitpro.com>
Somik Behera <somikbehera@gmail.com>
Soren Hansen <soren.hansen@rackspace.com>
Stanislaw Pitucha <stanislaw.pitucha@hp.com>
diff --git a/bin/nova-all b/bin/nova-all
index 57e84b0dd..5c0644b8c 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -60,7 +60,7 @@ if __name__ == '__main__':
try:
servers.append(service.WSGIService(api))
except (Exception, SystemExit):
- logging.exception(_('Failed to load %s') % '%s-api' % api)
+ LOG.exception(_('Failed to load %s') % '%s-api' % api)
for mod in [s3server, xvp_proxy]:
try:
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 8726f5dcb..a84d212da 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1417,7 +1417,7 @@
#### soft reboot request is made. We fall back to hard reboot if
#### instance does not shutdown within this window.
-# libvirt_nonblocking=false
+# libvirt_nonblocking=true
#### (BoolOpt) Use a separated OS thread pool to realize non-blocking
#### libvirt calls
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index c2e760f0e..fffeff85a 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -78,6 +78,9 @@ cp: CommandFilter, /bin/cp, root
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
+# nova/network/linux_net.py: 'ip', 'route', 'add', ..
+# nova/network/linux_net.py: 'ip', 'route', 'del', .
+# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
ip: CommandFilter, /sbin/ip, root
# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
@@ -130,14 +133,6 @@ ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
arping: CommandFilter, /usr/bin/arping, root
arping_sbin: CommandFilter, /sbin/arping, root
-# nova/network/linux_net.py: 'route', '-n'
-# nova/network/linux_net.py: 'route', 'del', 'default', 'gw'
-# nova/network/linux_net.py: 'route', 'add', 'default', 'gw'
-# nova/network/linux_net.py: 'route', '-n'
-# nova/network/linux_net.py: 'route', 'del', 'default', 'gw', old_gw, ..
-# nova/network/linux_net.py: 'route', 'add', 'default', 'gw', old_gateway
-route: CommandFilter, /sbin/route, root
-
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, /usr/bin/dhcp_release, root
diff --git a/etc/nova/rootwrap.d/network.filters b/etc/nova/rootwrap.d/network.filters
index c85ab9a33..c635f12e4 100644
--- a/etc/nova/rootwrap.d/network.filters
+++ b/etc/nova/rootwrap.d/network.filters
@@ -21,6 +21,9 @@
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
+# nova/network/linux_net.py: 'ip', 'route', 'add', ..
+# nova/network/linux_net.py: 'ip', 'route', 'del', .
+# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
ip: CommandFilter, /sbin/ip, root
# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
@@ -48,14 +51,6 @@ ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
arping: CommandFilter, /usr/bin/arping, root
arping_sbin: CommandFilter, /sbin/arping, root
-# nova/network/linux_net.py: 'route', '-n'
-# nova/network/linux_net.py: 'route', 'del', 'default', 'gw'
-# nova/network/linux_net.py: 'route', 'add', 'default', 'gw'
-# nova/network/linux_net.py: 'route', '-n'
-# nova/network/linux_net.py: 'route', 'del', 'default', 'gw', old_gw, ..
-# nova/network/linux_net.py: 'route', 'add', 'default', 'gw', old_gateway
-route: CommandFilter, /sbin/route, root
-
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, /usr/bin/dhcp_release, root
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 1bb505659..75f28d510 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -334,7 +334,7 @@ class CloudController(object):
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
- if key_name != None and key_pairs == []:
+ if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.EC2APIError(msg)
@@ -1245,9 +1245,11 @@ class CloudController(object):
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
+ filters = {'name': ec2_id}
+ images = self.image_service.detail(context, filters=filters)
try:
- return self.image_service.show_by_name(context, ec2_id)
- except exception.NotFound:
+ return images[0]
+ except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 379643954..1b0ac441b 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -81,7 +81,7 @@ class InstanceMetadata():
self.mappings = _format_instance_mapping(ctxt, instance)
- if instance.get('user_data', None) != None:
+ if instance.get('user_data', None) is not None:
self.userdata_b64 = base64.b64decode(instance['user_data'])
else:
self.userdata_b64 = None
@@ -133,13 +133,21 @@ class InstanceMetadata():
for key in self.ec2_ids:
data['meta-data'][key] = self.ec2_ids[key]
- if self.userdata_b64 != None:
+ if self.userdata_b64 is not None:
data['user-data'] = self.userdata_b64
- # public-keys should be in meta-data only if user specified one
+ # public keys are strangely rendered in ec2 metadata service
+ # meta-data/public-keys/ returns '0=keyname' (with no trailing /)
+ # and only if there is a public key given.
+ # '0=keyname' means there is a normally rendered dict at
+ # meta-data/public-keys/0
+ #
+ # meta-data/public-keys/ : '0=%s' % keyname
+ # meta-data/public-keys/0/ : 'openssh-key'
+ # meta-data/public-keys/0/openssh-key : '%s' % publickey
if self.instance['key_name']:
data['meta-data']['public-keys'] = {
- '0': {'_name': self.instance['key_name'],
+ '0': {'_name': "0=" + self.instance['key_name'],
'openssh-key': self.instance['key_data']}}
if False: # TODO(vish): store ancestor ids
@@ -241,12 +249,14 @@ def ec2_md_print(data):
for key in sorted(data.keys()):
if key == '_name':
continue
- output += key
if isinstance(data[key], dict):
if '_name' in data[key]:
- output += '=' + str(data[key]['_name'])
+ output += str(data[key]['_name'])
else:
- output += '/'
+ output += key + '/'
+ else:
+ output += key
+
output += '\n'
return output[:-1]
elif isinstance(data, list):
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index f4f6b689c..59a7d030e 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -17,6 +17,7 @@
# under the License.
"""Metadata request handler."""
+import os
import webob.dec
import webob.exc
@@ -67,6 +68,9 @@ class MetadataRequestHandler(wsgi.Application):
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
+ if os.path.normpath("/" + req.path_info) == "/":
+ return(base.ec2_md_print(base.VERSIONS + ["latest"]))
+
try:
meta_data = self.get_metadata(remote_address)
except Exception:
diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py
index 041686a57..d803035af 100644
--- a/nova/api/openstack/compute/contrib/disk_config.py
+++ b/nova/api/openstack/compute/contrib/disk_config.py
@@ -21,7 +21,6 @@ from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import db
from nova import utils
ALIAS = 'OS-DCF'
@@ -102,30 +101,25 @@ class ServersDiskConfigTemplate(xmlutil.TemplateBuilder):
class ServerDiskConfigController(wsgi.Controller):
- def _add_disk_config(self, context, servers):
- # Get DB information for servers
- uuids = [server['id'] for server in servers]
- db_servers = db.instance_get_all_by_filters(context,
- {'uuid': uuids})
- db_servers_by_uuid = dict((s['uuid'], s) for s in db_servers)
-
+ def _add_disk_config(self, req, servers):
for server in servers:
- db_server = db_servers_by_uuid.get(server['id'])
- if db_server:
- value = db_server[INTERNAL_DISK_CONFIG]
- server[API_DISK_CONFIG] = disk_config_to_api(value)
+ db_server = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'show'/'detail' methods.
+ value = db_server[INTERNAL_DISK_CONFIG]
+ server[API_DISK_CONFIG] = disk_config_to_api(value)
- def _show(self, context, resp_obj):
+ def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
resp_obj.attach(xml=ServerDiskConfigTemplate())
server = resp_obj.obj['server']
- self._add_disk_config(context, [server])
+ self._add_disk_config(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- self._show(context, resp_obj)
+ self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
@@ -133,7 +127,7 @@ class ServerDiskConfigController(wsgi.Controller):
if 'servers' in resp_obj.obj and authorize(context):
resp_obj.attach(xml=ServersDiskConfigTemplate())
servers = resp_obj.obj['servers']
- self._add_disk_config(context, servers)
+ self._add_disk_config(req, servers)
def _set_disk_config(self, dict_):
if API_DISK_CONFIG in dict_:
@@ -147,7 +141,7 @@ class ServerDiskConfigController(wsgi.Controller):
if authorize(context):
self._set_disk_config(body['server'])
resp_obj = (yield)
- self._show(context, resp_obj)
+ self._show(req, resp_obj)
@wsgi.extends
def update(self, req, id, body):
@@ -155,7 +149,7 @@ class ServerDiskConfigController(wsgi.Controller):
if authorize(context):
self._set_disk_config(body['server'])
resp_obj = (yield)
- self._show(context, resp_obj)
+ self._show(req, resp_obj)
@wsgi.extends(action='rebuild')
def _action_rebuild(self, req, id, body):
@@ -163,7 +157,7 @@ class ServerDiskConfigController(wsgi.Controller):
if authorize(context):
self._set_disk_config(body['rebuild'])
resp_obj = (yield)
- self._show(context, resp_obj)
+ self._show(req, resp_obj)
@wsgi.extends(action='resize')
def _action_resize(self, req, id, body):
diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py
index 4c584ec05..46d4df2d1 100644
--- a/nova/api/openstack/compute/contrib/extended_server_attributes.py
+++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py
@@ -14,14 +14,11 @@
"""The Extended Server Attributes API extension."""
-from webob import exc
-
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import db
-from nova import exception
from nova import flags
from nova import log as logging
@@ -38,11 +35,6 @@ class ExtendedServerAttributesController(wsgi.Controller):
**kwargs)
self.compute_api = compute.API()
- def _get_instances(self, context, instance_uuids):
- filters = {'uuid': instance_uuids}
- instances = self.compute_api.get_all(context, filters)
- return dict((instance['uuid'], instance) for instance in instances)
-
def _get_hypervisor_hostname(self, context, instance):
compute_node = db.compute_node_get_by_host(context, instance["host"])
@@ -69,14 +61,11 @@ class ExtendedServerAttributesController(wsgi.Controller):
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedServerAttributeTemplate())
-
- try:
- instance = self.compute_api.get(context, id)
- except exception.NotFound:
- explanation = _("Server not found.")
- raise exc.HTTPNotFound(explanation=explanation)
-
- self._extend_server(context, resp_obj.obj['server'], instance)
+ server = resp_obj.obj['server']
+ db_instance = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'show' method.
+ self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
@@ -86,17 +75,11 @@ class ExtendedServerAttributesController(wsgi.Controller):
resp_obj.attach(xml=ExtendedServerAttributesTemplate())
servers = list(resp_obj.obj['servers'])
- instance_uuids = [server['id'] for server in servers]
- instances = self._get_instances(context, instance_uuids)
-
- for server_object in servers:
- try:
- instance_data = instances[server_object['id']]
- except KeyError:
- # Ignore missing instance data
- continue
-
- self._extend_server(context, server_object, instance_data)
+ for server in servers:
+ db_instance = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'detail' method.
+ self._extend_server(context, server, db_instance)
class Extended_server_attributes(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index 879d17b53..d17319c67 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -14,13 +14,10 @@
"""The Extended Status Admin API extension."""
-from webob import exc
-
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
-from nova import exception
from nova import flags
from nova import log as logging
@@ -35,14 +32,6 @@ class ExtendedStatusController(wsgi.Controller):
super(ExtendedStatusController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
- def _get_instances(self, context, instance_uuids):
- if not instance_uuids:
- return {}
-
- filters = {'uuid': instance_uuids}
- instances = self.compute_api.get_all(context, filters)
- return dict((instance['uuid'], instance) for instance in instances)
-
def _extend_server(self, server, instance):
for state in ['task_state', 'vm_state', 'power_state']:
key = "%s:%s" % (Extended_status.alias, state)
@@ -54,14 +43,11 @@ class ExtendedStatusController(wsgi.Controller):
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
-
- try:
- instance = self.compute_api.get(context, id)
- except exception.NotFound:
- explanation = _("Server not found.")
- raise exc.HTTPNotFound(explanation=explanation)
-
- self._extend_server(resp_obj.obj['server'], instance)
+ server = resp_obj.obj['server']
+ db_instance = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'show' method.
+ self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
@@ -69,19 +55,12 @@ class ExtendedStatusController(wsgi.Controller):
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
-
servers = list(resp_obj.obj['servers'])
- instance_uuids = [server['id'] for server in servers]
- instances = self._get_instances(context, instance_uuids)
-
- for server_object in servers:
- try:
- instance_data = instances[server_object['id']]
- except KeyError:
- # Ignore missing instance data
- continue
-
- self._extend_server(server_object, instance_data)
+ for server in servers:
+ db_instance = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'detail' method.
+ self._extend_server(server, db_instance)
class Extended_status(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 145e0395e..516b25d88 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -21,7 +21,7 @@ from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
from nova import flags
-from nova import image
+from nova.image import glance
FLAGS = flags.FLAGS
@@ -31,7 +31,7 @@ class Controller(object):
"""The image metadata API controller for the OpenStack API"""
def __init__(self):
- self.image_service = image.get_default_image_service()
+ self.image_service = glance.get_default_image_service()
def _get_image(self, context, image_id):
try:
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 629ba67d5..35d21da60 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -21,7 +21,7 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova import flags
-import nova.image
+import nova.image.glance
from nova import log as logging
import nova.utils
@@ -96,12 +96,12 @@ class Controller(wsgi.Controller):
def __init__(self, image_service=None, **kwargs):
"""Initialize new `ImageController`.
- :param image_service: `nova.image.glance:GlancemageService`
+ :param image_service: `nova.image.glance:GlanceImageService`
"""
super(Controller, self).__init__(**kwargs)
self._image_service = (image_service or
- nova.image.get_default_image_service())
+ nova.image.glance.get_default_image_service())
def _get_filters(self, req):
"""
@@ -177,8 +177,8 @@ class Controller(wsgi.Controller):
params[key] = val
try:
- images = self._image_service.index(context, filters=filters,
- **page_params)
+ images = self._image_service.detail(context, filters=filters,
+ **page_params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
return self._view_builder.index(req, images)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 69f1b2f3d..5a8e1f645 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -461,16 +461,20 @@ class Controller(wsgi.Controller):
limited_list = self._limit_items(instance_list, req)
if is_detail:
self._add_instance_faults(context, limited_list)
- return self._view_builder.detail(req, limited_list)
+ response = self._view_builder.detail(req, limited_list)
else:
- return self._view_builder.index(req, limited_list)
+ response = self._view_builder.index(req, limited_list)
+ req.cache_db_instances(limited_list)
+ return response
- def _get_server(self, context, instance_uuid):
+ def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
- return self.compute_api.get(context, instance_uuid)
+ instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
raise exc.HTTPNotFound()
+ req.cache_db_instance(instance)
+ return instance
def _validate_server_name(self, value):
if not isinstance(value, basestring):
@@ -580,6 +584,7 @@ class Controller(wsgi.Controller):
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id)
+ req.cache_db_instance(instance)
self._add_instance_faults(context, [instance])
return self._view_builder.show(req, instance)
except exception.NotFound:
@@ -654,9 +659,6 @@ class Controller(wsgi.Controller):
self._validate_user_data(user_data)
availability_zone = server_dict.get('availability_zone')
- name = server_dict['name']
- self._validate_server_name(name)
- name = name.strip()
block_device_mapping = self._get_block_device_mapping(server_dict)
@@ -732,6 +734,7 @@ class Controller(wsgi.Controller):
if ret_resv_id:
return {'reservation_id': resv_id}
+ req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if '_is_precooked' in server['server'].keys():
@@ -744,8 +747,8 @@ class Controller(wsgi.Controller):
return self._add_location(robj)
- def _delete(self, context, id):
- instance = self._get_server(context, id)
+ def _delete(self, context, req, instance_uuid):
+ instance = self._get_server(context, req, instance_uuid)
if FLAGS.reclaim_instance_interval:
self.compute_api.soft_delete(context, instance)
else:
@@ -789,6 +792,7 @@ class Controller(wsgi.Controller):
try:
instance = self.compute_api.get(ctxt, id)
+ req.cache_db_instance(instance)
self.compute_api.update(ctxt, instance, **update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
@@ -804,7 +808,7 @@ class Controller(wsgi.Controller):
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
- instance = self._get_server(context, id)
+ instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
@@ -824,7 +828,7 @@ class Controller(wsgi.Controller):
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
- instance = self._get_server(context, id)
+ instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
@@ -856,7 +860,7 @@ class Controller(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
- instance = self._get_server(context, id)
+ instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
@@ -871,7 +875,7 @@ class Controller(wsgi.Controller):
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
- instance = self._get_server(context, instance_id)
+ instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
@@ -891,7 +895,7 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
"""Destroys a server."""
try:
- self._delete(req.environ['nova.context'], id)
+ self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.InstanceInvalidState as state_error:
@@ -947,7 +951,7 @@ class Controller(wsgi.Controller):
if not isinstance(password, basestring):
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
- server = self._get_server(context, id)
+ server = self._get_server(context, req, id)
self.compute_api.set_admin_password(context, server, password)
return webob.Response(status_int=202)
@@ -1009,7 +1013,7 @@ class Controller(wsgi.Controller):
password = utils.generate_password(FLAGS.password_length)
context = req.environ['nova.context']
- instance = self._get_server(context, id)
+ instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
@@ -1065,7 +1069,7 @@ class Controller(wsgi.Controller):
except exception.InstanceTypeDiskTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
- instance = self._get_server(context, id)
+ instance = self._get_server(context, req, id)
self._add_instance_faults(context, [instance])
view = self._view_builder.show(req, instance)
@@ -1103,7 +1107,7 @@ class Controller(wsgi.Controller):
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
- instance = self._get_server(context, id)
+ instance = self._get_server(context, req, id)
try:
image = self.compute_api.snapshot(context,
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index f14773e98..b0c2fb2b2 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -179,7 +179,12 @@ class ViewBuilder(common.ViewBuilder):
}
def _get_flavor(self, request, instance):
- flavor_id = instance["instance_type"]["flavorid"]
+ instance_type = instance["instance_type"]
+ if not instance_type:
+ LOG.warn(_("Instance has had its instance_type removed "
+ "from the DB"), instance=instance)
+ return {}
+ flavor_id = instance_type["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
flavor_id,
"flavors")
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index f15891c8f..4f78a4697 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -64,6 +64,52 @@ _MEDIA_TYPE_MAP = {
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
+ def __init__(self, *args, **kwargs):
+ super(Request, self).__init__(*args, **kwargs)
+ self._extension_data = {'db_instances': {}}
+
+ def cache_db_instances(self, instances):
+ """
+ Allow API methods to store instances from a DB query to be
+ used by API extensions within the same API request.
+
+ An instance of this class only lives for the lifetime of a
+ single API request, so there's no need to implement full
+ cache management.
+ """
+ db_instances = self._extension_data['db_instances']
+ for instance in instances:
+ db_instances[instance['uuid']] = instance
+
+ def cache_db_instance(self, instance):
+ """
+ Allow API methods to store an instance from a DB query to be
+ used by API extensions within the same API request.
+
+ An instance of this class only lives for the lifetime of a
+ single API request, so there's no need to implement full
+ cache management.
+ """
+ self.cache_db_instances([instance])
+
+ def get_db_instances(self):
+ """
+ Allow an API extension to get previously stored instances within
+ the same API request.
+
+ Note that the instance data will be slightly stale.
+ """
+ return self._extension_data['db_instances']
+
+ def get_db_instance(self, instance_uuid):
+ """
+ Allow an API extension to get a previously stored instance
+ within the same API request.
+
+ Note that the instance data will be slightly stale.
+ """
+ return self._extension_data['db_instances'].get(instance_uuid)
+
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
diff --git a/nova/compute/api.py b/nova/compute/api.py
index caec8f4b6..557dbb930 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -39,7 +39,7 @@ from nova import crypto
from nova.db import base
from nova import exception
from nova import flags
-import nova.image
+from nova.image import glance
from nova import log as logging
from nova import network
from nova import notifications
@@ -121,7 +121,7 @@ class API(base.Base):
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
- nova.image.get_default_image_service())
+ glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
@@ -374,8 +374,8 @@ class API(base.Base):
self._check_injected_file_quota(context, injected_files)
self._check_requested_networks(context, requested_networks)
- (image_service, image_id) = nova.image.get_image_service(context,
- image_href)
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_href)
image = image_service.show(context, image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
@@ -398,8 +398,6 @@ class API(base.Base):
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, image, image_service)
- self.security_group_api.ensure_default(context)
-
if key_data is None and key_name:
key_pair = self.db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
@@ -521,9 +519,6 @@ class API(base.Base):
"""tell vm driver to create ephemeral/swap device at boot time by
updating BlockDeviceMapping
"""
- instance_type = (instance_type or
- instance_types.get_default_instance_type())
-
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("bdm %s"), bdm)
@@ -583,6 +578,74 @@ class API(base.Base):
self.db.block_device_mapping_update_or_create(elevated_context,
values)
+ def _populate_instance_for_bdm(self, context, instance, instance_type,
+ image, block_device_mapping):
+ """Populate instance block device mapping information."""
+ # FIXME(comstud): Why do the block_device_mapping DB calls
+ # require elevated context?
+ elevated = context.elevated()
+ instance_uuid = instance['uuid']
+ mappings = image['properties'].get('mappings', [])
+ if mappings:
+ instance['shutdown_terminate'] = False
+ self._update_image_block_device_mapping(elevated,
+ instance_type, instance_uuid, mappings)
+
+ image_bdm = image['properties'].get('block_device_mapping', [])
+ for mapping in (image_bdm, block_device_mapping):
+ if not mapping:
+ continue
+ instance['shutdown_terminate'] = False
+ self._update_block_device_mapping(elevated,
+ instance_type, instance_uuid, mapping)
+
+ def _populate_instance_names(self, instance):
+ """Populate instance display_name and hostname."""
+ display_name = instance.get('display_name')
+ hostname = instance.get('hostname')
+
+ if display_name is None:
+ display_name = self._default_display_name(instance['uuid'])
+ instance['display_name'] = display_name
+ if hostname is None:
+ hostname = display_name
+ instance['hostname'] = utils.sanitize_hostname(hostname)
+
+ def _default_display_name(self, instance_uuid):
+ return "Server %s" % instance_uuid
+
+ def _populate_instance_for_create(self, base_options, image,
+ security_groups):
+ """Build the beginning of a new instance."""
+
+ instance = base_options
+ if not instance.get('uuid'):
+ # Generate the instance_uuid here so we can use it
+ # for additional setup before creating the DB entry.
+ instance['uuid'] = str(utils.gen_uuid())
+
+ instance['launch_index'] = 0
+ instance['vm_state'] = vm_states.BUILDING
+ instance['task_state'] = task_states.SCHEDULING
+ instance['architecture'] = image['properties'].get('architecture')
+ instance['info_cache'] = {'network_info': '[]'}
+
+ # Store image properties so we can use them later
+ # (for notifications, etc). Only store what we can.
+ instance.setdefault('system_metadata', {})
+ for key, value in image['properties'].iteritems():
+ new_value = str(value)[:255]
+ instance['system_metadata']['image_%s' % key] = new_value
+
+ # Use 'default' security_group if none specified.
+ if security_groups is None:
+ security_groups = ['default']
+ elif not isinstance(security_groups, list):
+ security_groups = [security_groups]
+ instance['security_groups'] = security_groups
+
+ return instance
+
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
@@ -594,83 +657,26 @@ class API(base.Base):
This is called by the scheduler after a location for the
instance has been determined.
"""
- elevated = context.elevated()
- if security_group is None:
- security_group = ['default']
- if not isinstance(security_group, list):
- security_group = [security_group]
-
- security_groups = []
- for security_group_name in security_group:
- group = self.db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
- security_groups.append(group['id'])
+ instance = self._populate_instance_for_create(base_options,
+ image, security_group)
- # Store image properties so we can use them later
- # (for notifications, etc). Only store what we can.
- base_options.setdefault('system_metadata', {})
- for key, value in image['properties'].iteritems():
- new_value = str(value)[:255]
- base_options['system_metadata']['image_%s' % key] = new_value
-
- base_options.setdefault('launch_index', 0)
- instance = self.db.instance_create(context, base_options)
+ self._populate_instance_names(instance)
- # Commit the reservations
- if reservations:
- QUOTAS.commit(context, reservations)
+ self._populate_instance_for_bdm(context, instance,
+ instance_type, image, block_device_mapping)
- instance_id = instance['id']
- instance_uuid = instance['uuid']
+ instance = self.db.instance_create(context, instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
- for security_group_id in security_groups:
- self.db.instance_add_security_group(elevated,
- instance_uuid,
- security_group_id)
-
- # BlockDeviceMapping table
- self._update_image_block_device_mapping(elevated, instance_type,
- instance_uuid, image['properties'].get('mappings', []))
- self._update_block_device_mapping(elevated, instance_type,
- instance_uuid,
- image['properties'].get('block_device_mapping', []))
- # override via command line option
- self._update_block_device_mapping(elevated, instance_type,
- instance_uuid, block_device_mapping)
-
- # Set sane defaults if not specified
- updates = {}
-
- display_name = instance.get('display_name')
- if display_name is None:
- display_name = self._default_display_name(instance_id)
-
- hostname = instance.get('hostname')
- if hostname is None:
- hostname = display_name
-
- updates['display_name'] = display_name
- updates['hostname'] = utils.sanitize_hostname(hostname)
- updates['vm_state'] = vm_states.BUILDING
- updates['task_state'] = task_states.SCHEDULING
-
- updates['architecture'] = image['properties'].get('architecture')
-
- if (image['properties'].get('mappings', []) or
- image['properties'].get('block_device_mapping', []) or
- block_device_mapping):
- updates['shutdown_terminate'] = False
-
- return self.update(context, instance, **updates)
+ # Commit the reservations
+ if reservations:
+ QUOTAS.commit(context, reservations)
- def _default_display_name(self, instance_id):
- return "Server %s" % instance_id
+ return instance
def _schedule_run_instance(self,
use_call,
@@ -749,7 +755,7 @@ class API(base.Base):
# only going to create 1 instance.
# This speeds up API responses for builds
# as we don't need to wait for the scheduler.
- create_instance_here = max_count == 1
+ create_instance_here = max_count == 1 or max_count == None
(instances, reservation_id) = self._create_instance(
context, instance_type,
@@ -1213,7 +1219,7 @@ class API(base.Base):
def _get_image(self, context, image_href):
"""Throws an ImageNotFound exception if image_href does not exist."""
- (image_service, image_id) = nova.image.get_image_service(context,
+ (image_service, image_id) = glance.get_remote_image_service(context,
image_href)
return image_service.show(context, image_id)
@@ -1926,16 +1932,7 @@ class SecurityGroupAPI(base.Base):
:param context: the security context
"""
- try:
- self.db.security_group_get_by_name(context,
- context.project_id,
- 'default')
- except exception.NotFound:
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': context.user_id,
- 'project_id': context.project_id}
- self.db.security_group_create(context, values)
+ self.db.security_group_ensure_default(context)
def create(self, context, name, description):
try:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index a9816817b..deca04177 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -57,7 +57,7 @@ from nova.compute import vm_states
import nova.context
from nova import exception
from nova import flags
-import nova.image
+from nova.image import glance
from nova import log as logging
from nova import manager
from nova import network
@@ -205,7 +205,8 @@ def wrap_instance_fault(function):
def _get_image_meta(context, image_ref):
- image_service, image_id = nova.image.get_image_service(context, image_ref)
+ image_service, image_id = glance.get_remote_image_service(context,
+ image_ref)
return image_service.show(context, image_id)
@@ -1056,7 +1057,7 @@ class ComputeManager(manager.SchedulerDependentManager):
marker = batch[-1]['id']
return images
- image_service = nova.image.get_default_image_service()
+ image_service = glance.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance_uuid}
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 09f9a4046..049ce1fce 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -71,7 +71,7 @@ class ConsoleAuthManager(manager.Manager):
def check_token(self, context, token):
token_str = self.mc.get(token)
- token_valid = (token_str != None)
+ token_valid = (token_str is not None)
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s)"), locals())
if token_valid:
return jsonutils.loads(token_str)
diff --git a/nova/db/api.py b/nova/db/api.py
index e0d150506..dc859f748 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -563,9 +563,9 @@ def instance_get(context, instance_id):
return IMPL.instance_get(context, instance_id)
-def instance_get_all(context):
+def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
- return IMPL.instance_get_all(context)
+ return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
@@ -1154,6 +1154,11 @@ def get_snapshot_uuid_by_ec2_id(context, ec2_id):
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
+
+def ec2_snapshot_create(context, snapshot_id, forced_id=None):
+ return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
+
+
####################
@@ -1276,6 +1281,11 @@ def security_group_create(context, values):
return IMPL.security_group_create(context, values)
+def security_group_ensure_default(context):
+ """Ensure default security group exists for a project_id."""
+ return IMPL.security_group_ensure_default(context)
+
+
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index eabd03a22..55effd645 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -611,13 +611,13 @@ def compute_node_utilization_set(context, host, free_ram_mb=None,
raise exception.NotFound(_("No ComputeNode for %(host)s") %
locals())
- if free_ram_mb != None:
+ if free_ram_mb is not None:
compute_node.free_ram_mb = free_ram_mb
- if free_disk_gb != None:
+ if free_disk_gb is not None:
compute_node.free_disk_gb = free_disk_gb
- if work != None:
+ if work is not None:
compute_node.current_workload = work
- if vms != None:
+ if vms is not None:
compute_node.running_vms = vms
return compute_node
@@ -1329,14 +1329,34 @@ def instance_create(context, values):
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(utils.gen_uuid())
+ instance_ref['info_cache'] = models.InstanceInfoCache()
+ info_cache = values.pop('info_cache', None)
+ if info_cache is not None:
+ instance_ref['info_cache'].update(info_cache)
+ security_groups = values.pop('security_groups', [])
instance_ref.update(values)
+ def _get_sec_group_models(session, security_groups):
+ models = []
+ default_group = security_group_ensure_default(context,
+ session=session)
+ if 'default' in security_groups:
+ models.append(default_group)
+ # Generate a new list, so we don't modify the original
+ security_groups = [x for x in security_groups if x != 'default']
+ if security_groups:
+ models.extend(_security_group_get_by_names(context,
+ session, context.project_id, security_groups))
+ return models
+
session = get_session()
with session.begin():
+ instance_ref.security_groups = _get_sec_group_models(session,
+ security_groups)
instance_ref.save(session=session)
-
- # and creat the info_cache table entry for instance
- instance_info_cache_create(context, {'instance_id': instance_ref['uuid']})
+ # NOTE(comstud): This forces instance_type to be loaded so it
+ # exists in the ref when we return. Fixes lazy loading issues.
+ instance_ref.instance_type
return instance_ref
@@ -1435,13 +1455,14 @@ def _build_instance_get(context, session=None):
@require_admin_context
-def instance_get_all(context):
- return model_query(context, models.Instance).\
- options(joinedload('info_cache')).\
- options(joinedload('security_groups')).\
- options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
- all()
+def instance_get_all(context, columns_to_join=None):
+ if columns_to_join is None:
+ columns_to_join = ['info_cache', 'security_groups',
+ 'metadata', 'instance_type']
+ query = model_query(context, models.Instance)
+ for column in columns_to_join:
+ query = query.options(joinedload(column))
+ return query.all()
@require_context
@@ -2928,15 +2949,13 @@ def _volume_get_query(context, session=None, project_only=False):
@require_context
-def _ec2_volume_get_query(context, session=None, project_only=False):
- return model_query(context, models.VolumeIdMapping, session=session,
- project_only=project_only)
+def _ec2_volume_get_query(context, session=None):
+ return model_query(context, models.VolumeIdMapping, session=session)
@require_context
-def _ec2_snapshot_get_query(context, session=None, project_only=False):
- return model_query(context, models.SnapshotIdMapping, session=session,
- project_only=project_only)
+def _ec2_snapshot_get_query(context, session=None):
+ return model_query(context, models.SnapshotIdMapping, session=session)
@require_context
@@ -3025,9 +3044,7 @@ def ec2_volume_create(context, volume_uuid, id=None):
@require_context
def get_ec2_volume_id_by_uuid(context, volume_id, session=None):
- result = _ec2_volume_get_query(context,
- session=session,
- project_only=True).\
+ result = _ec2_volume_get_query(context, session=session).\
filter_by(uuid=volume_id).\
first()
@@ -3039,9 +3056,7 @@ def get_ec2_volume_id_by_uuid(context, volume_id, session=None):
@require_context
def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
- result = _ec2_volume_get_query(context,
- session=session,
- project_only=True).\
+ result = _ec2_volume_get_query(context, session=session).\
filter_by(id=ec2_id).\
first()
@@ -3066,9 +3081,7 @@ def ec2_snapshot_create(context, snapshot_uuid, id=None):
@require_context
def get_ec2_snapshot_id_by_uuid(context, snapshot_id, session=None):
- result = _ec2_snapshot_get_query(context,
- session=session,
- project_only=True).\
+ result = _ec2_snapshot_get_query(context, session=session).\
filter_by(uuid=snapshot_id).\
first()
@@ -3080,9 +3093,7 @@ def get_ec2_snapshot_id_by_uuid(context, snapshot_id, session=None):
@require_context
def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
- result = _ec2_snapshot_get_query(context,
- session=session,
- project_only=True).\
+ result = _ec2_snapshot_get_query(context, session=session).\
filter_by(id=ec2_id).\
first()
@@ -3331,10 +3342,33 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
###################
def _security_group_get_query(context, session=None, read_deleted=None,
- project_only=False):
- return model_query(context, models.SecurityGroup, session=session,
- read_deleted=read_deleted, project_only=project_only).\
- options(joinedload_all('rules'))
+ project_only=False, join_rules=True):
+ query = model_query(context, models.SecurityGroup, session=session,
+ read_deleted=read_deleted, project_only=project_only)
+ if join_rules:
+ query = query.options(joinedload_all('rules'))
+ return query
+
+
+def _security_group_get_by_names(context, session, project_id, group_names):
+ """
+ Get security group models for a project by a list of names.
+ Raise SecurityGroupNotFoundForProject for a name not found.
+ """
+ query = _security_group_get_query(context, session=session,
+ read_deleted="no", join_rules=False).\
+ filter_by(project_id=project_id).\
+ filter(models.SecurityGroup.name.in_(group_names))
+ sg_models = query.all()
+ if len(sg_models) == len(group_names):
+ return sg_models
+ # Find the first one missing and raise
+ group_names_from_models = [x.name for x in sg_models]
+ for group_name in group_names:
+ if group_name not in group_names_from_models:
+ raise exception.SecurityGroupNotFoundForProject(
+ project_id=project_id, security_group_id=group_name)
+ # Not Reached
@require_context
@@ -3358,13 +3392,23 @@ def security_group_get(context, security_group_id, session=None):
@require_context
-def security_group_get_by_name(context, project_id, group_name):
- result = _security_group_get_query(context, read_deleted="no").\
- filter_by(project_id=project_id).\
- filter_by(name=group_name).\
- options(joinedload_all('instances')).\
- first()
+def security_group_get_by_name(context, project_id, group_name,
+ columns_to_join=None, session=None):
+ if session is None:
+ session = get_session()
+
+ query = _security_group_get_query(context, session=session,
+ read_deleted="no", join_rules=False).\
+ filter_by(project_id=project_id).\
+ filter_by(name=group_name)
+
+ if columns_to_join is None:
+ columns_to_join = ['instances', 'rules']
+
+ for column in columns_to_join:
+ query = query.options(joinedload_all(column))
+ result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
@@ -3418,16 +3462,34 @@ def security_group_in_use(context, group_id):
@require_context
-def security_group_create(context, values):
+def security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
- security_group_ref.save()
+ if session is None:
+ session = get_session()
+ security_group_ref.save(session=session)
return security_group_ref
+def security_group_ensure_default(context, session=None):
+ """Ensure default security group exists for a project_id."""
+ try:
+ default_group = security_group_get_by_name(context,
+ context.project_id, 'default',
+ columns_to_join=[], session=session)
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ default_group = security_group_create(context, values,
+ session=session)
+ return default_group
+
+
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
diff --git a/nova/flags.py b/nova/flags.py
index c427a7e94..bb769c4dd 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -322,9 +322,6 @@ global_opts = [
cfg.StrOpt('firewall_driver',
default='nova.virt.firewall.IptablesFirewallDriver',
help='Firewall driver (defaults to iptables)'),
- cfg.StrOpt('image_service',
- default='nova.image.glance.GlanceImageService',
- help='The service to use for retrieving and searching images.'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
diff --git a/nova/image/__init__.py b/nova/image/__init__.py
index 18dcc2fa2..e69de29bb 100644
--- a/nova/image/__init__.py
+++ b/nova/image/__init__.py
@@ -1,51 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import nova
-from nova import flags
-from nova.image import glance
-from nova.openstack.common import importutils
-
-FLAGS = flags.FLAGS
-
-
-def get_default_image_service():
- ImageService = importutils.import_class(FLAGS.image_service)
- return ImageService()
-
-
-def get_image_service(context, image_href):
- """Get the proper image_service and id for the given image_href.
-
- The image_href param can be an href of the form
- http://myglanceserver:9292/images/42, or just an int such as 42. If the
- image_href is an int, then the default image service is returned.
-
- :param image_href: image ref/id for an image
- :returns: a tuple of the form (image_service, image_id)
-
- """
- # check if this is not a uri
- if '/' not in str(image_href):
- return (get_default_image_service(), image_href)
-
- else:
- (glance_client, image_id) = glance._get_glance_client(context,
- image_href)
- image_service = nova.image.glance.GlanceImageService(glance_client)
- return (image_service, image_id)
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 0edc0d1a0..6691afeb0 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -156,21 +156,6 @@ class GlanceImageService(object):
raise exception.GlanceConnectionFailed(
reason=_('Maximum attempts reached'))
- def index(self, context, **kwargs):
- """Calls out to Glance for a list of images available."""
- params = self._extract_query_params(kwargs)
- image_metas = self._get_images(context, **params)
-
- images = []
- for image_meta in image_metas:
- # NOTE(sirp): We need to use `get_images_detailed` and not
- # `get_images` here because we need `is_public` and `properties`
- # included so we can filter by user
- if self._is_image_available(context, image_meta):
- meta_subset = utils.subset_dict(image_meta, ('id', 'name'))
- images.append(meta_subset)
- return images
-
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
@@ -250,15 +235,7 @@ class GlanceImageService(object):
base_image_meta = self._translate_from_glance(image_meta)
return base_image_meta
- def show_by_name(self, context, name):
- """Returns a dict containing image data for the given name."""
- image_metas = self.detail(context, filters={'name': name})
- try:
- return image_metas[0]
- except (IndexError, TypeError):
- raise exception.ImageNotFound(image_id=name)
-
- def get(self, context, image_id, data):
+ def download(self, context, image_id, data):
"""Calls out to Glance for metadata and data and writes data."""
try:
image_meta, image_chunks = self._call_retry(context, 'get_image',
@@ -269,9 +246,6 @@ class GlanceImageService(object):
for chunk in image_chunks:
data.write(chunk)
- base_image_meta = self._translate_from_glance(image_meta)
- return base_image_meta
-
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image id.
@@ -452,7 +426,7 @@ def _convert_to_string(metadata):
def _limit_attributes(image_meta):
- IMAGE_ATTRIBUTES = ['size', 'disk_format',
+ IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'checksum', 'id',
'name', 'created_at', 'updated_at',
'deleted_at', 'deleted', 'status',
@@ -511,3 +485,31 @@ def _translate_plain_exception(exc_type, exc_value):
if exc_type is glance_exception.Invalid:
return exception.Invalid(exc_value)
return exc_value
+
+
+def get_remote_image_service(context, image_href):
+ """Create an image_service and parse the id from the given image_href.
+
+ The image_href param can be an href of the form
+ 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
+ or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
+ image_href is a standalone id, then the default image service is returned.
+
+ :param image_href: href that describes the location of an image
+ :returns: a tuple of the form (image_service, image_id)
+
+ """
+ #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
+ # standalone image ID
+ if '/' not in str(image_href):
+ image_service = get_default_image_service()
+ image_id = image_href
+ else:
+ (glance_client, image_id) = _get_glance_client(context, image_href)
+ image_service = GlanceImageService(glance_client)
+
+ return (image_service, image_id)
+
+
+def get_default_image_service():
+ return GlanceImageService()
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 9fcfd5c89..df2758b3a 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -33,7 +33,7 @@ from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import exception
from nova import flags
-from nova import image
+from nova.image import glance
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
@@ -69,7 +69,7 @@ class S3ImageService(object):
def __init__(self, service=None, *args, **kwargs):
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
- self.service = service or image.get_default_image_service()
+ self.service = service or glance.get_default_image_service()
self.service.__init__(*args, **kwargs)
def _translate_uuids_to_ids(self, context, images):
@@ -136,16 +136,11 @@ class S3ImageService(object):
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
- def index(self, context):
+ def detail(self, context, **kwargs):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
- images = self.service.index(context, sort_dir='asc')
- return self._translate_uuids_to_ids(context, images)
-
- def detail(self, context):
- #NOTE(bcwaldon): sort asc to make sure we assign lower ids
- # to older images
- images = self.service.detail(context, sort_dir='asc')
+ kwargs.setdefault('sort_dir', 'asc')
+ images = self.service.detail(context, **kwargs)
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
@@ -153,10 +148,6 @@ class S3ImageService(object):
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
- def show_by_name(self, context, name):
- image = self.service.show_by_name(context, name)
- return self._translate_uuid_to_id(context, image)
-
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
diff --git a/nova/network/l3.py b/nova/network/l3.py
index bcd5d87c0..034678aa5 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -85,7 +85,7 @@ class LinuxNetL3(L3Driver):
self.initialized = True
def is_initialized(self):
- return self.initialized == True
+ return self.initialized
def initialize_network(self, cidr):
linux_net.add_snat_rule(cidr)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 42fb9cc10..828174b3a 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -548,24 +548,27 @@ def initialize_gateway_device(dev, network_ref):
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
- gateway = None
- out, err = _execute('route', '-n', run_as_root=True)
- for line in out.split('\n'):
- fields = line.split()
- if fields and fields[0] == '0.0.0.0' and fields[-1] == dev:
- gateway = fields[1]
- _execute('route', 'del', 'default', 'gw', gateway,
- 'dev', dev, run_as_root=True,
- check_exit_code=[0, 7])
+ old_routes = []
+ result = _execute('ip', 'route', 'show', 'dev', dev,
+ run_as_root=True)
+ if result:
+ out, err = result
+ for line in out.split('\n'):
+ fields = line.split()
+ if fields and 'via' in fields:
+ old_routes.append(fields)
+ _execute('ip', 'route', 'del', fields[0],
+ 'dev', dev, run_as_root=True)
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
- if gateway:
- _execute('route', 'add', 'default', 'gw', gateway,
- run_as_root=True, check_exit_code=[0, 7])
+
+ for fields in old_routes:
+ _execute('ip', 'route', 'add', *fields,
+ run_as_root=True)
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', network_ref['dhcp_server'],
'-A', '-I', dev,
@@ -1030,16 +1033,16 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
- old_gateway = None
- out, err = _execute('route', '-n', run_as_root=True)
+ # NOTE(danms): We also need to copy routes to the bridge so as
+ # not to break existing connectivity on the interface
+ old_routes = []
+ out, err = _execute('ip', 'route', 'show', 'dev', interface)
for line in out.split('\n'):
fields = line.split()
- if (fields and fields[0] == '0.0.0.0' and
- fields[-1] == interface):
- old_gateway = fields[1]
- _execute('route', 'del', 'default', 'gw', old_gateway,
- 'dev', interface, run_as_root=True,
- check_exit_code=[0, 7])
+ if fields and 'via' in fields:
+ old_routes.append(fields)
+ _execute('ip', 'route', 'del', *fields,
+ run_as_root=True)
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
@@ -1050,9 +1053,9 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
- if old_gateway:
- _execute('route', 'add', 'default', 'gw', old_gateway,
- run_as_root=True, check_exit_code=[0, 7])
+ for fields in old_routes:
+ _execute('ip', 'route', 'add', *fields,
+ run_as_root=True)
if (err and err != "device %s is already a member of a bridge;"
"can't enslave it to bridge %s.\n" % (interface, bridge)):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index fb37499e4..2fc75eec1 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -230,7 +230,7 @@ class RPCAllocateFixedIP(object):
def deallocate_fixed_ip(self, context, address, host, **kwargs):
"""Call the superclass deallocate_fixed_ip if i'm the correct host
- otherwise cast to the correct host"""
+ otherwise call to the correct host"""
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
network = self._get_network_by_id(context, fixed_ip['network_id'])
@@ -243,7 +243,7 @@ class RPCAllocateFixedIP(object):
topic = rpc.queue_get_for(context, FLAGS.network_topic, host)
args = {'address': address,
'host': host}
- rpc.cast(context, topic,
+ rpc.call(context, topic,
{'method': 'deallocate_fixed_ip',
'args': args})
else:
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
index 55a5718ed..ceb589562 100644
--- a/nova/network/quantum/nova_ipam_lib.py
+++ b/nova/network/quantum/nova_ipam_lib.py
@@ -47,10 +47,6 @@ class QuantumNovaIPAMLib(object):
"""
self.net_manager = net_manager
- # NOTE(s0mik) : If DHCP is not in use, we need to timeout IPs
- # periodically. See comment in deallocate_ips_by_vif for more
- self.net_manager.timeout_fixed_ips = not self.net_manager.DHCP
-
def create_subnet(self, context, label, tenant_id,
quantum_net_id, priority, cidr=None,
gateway=None, gateway_v6=None, cidr_v6=None,
@@ -224,16 +220,12 @@ class QuantumNovaIPAMLib(object):
# be disassociated with the instance-id by a call to one of two
# methods inherited from FlatManager:
# - if DHCP is in use, a lease expiring in dnsmasq triggers
- # a call to release_fixed_ip in the network manager.
- # - otherwise, _disassociate_stale_fixed_ips is called periodically
- # to disassociate all fixed ips that are unallocated
- # but still associated with an instance-id.
+ # a call to release_fixed_ip in the network manager, or it will
+ # be timed out periodically if the lease fails.
+ # - otherwise, we release the ip immediately
read_deleted_context = admin_context.elevated(read_deleted='yes')
for fixed_ip in fixed_ips:
- db.fixed_ip_update(admin_context, fixed_ip['address'],
- {'allocated': False,
- 'virtual_interface_id': None})
fixed_id = fixed_ip['id']
floating_ips = self.net_manager.db.floating_ip_get_by_fixed_ip_id(
admin_context,
@@ -252,6 +244,11 @@ class QuantumNovaIPAMLib(object):
read_deleted_context,
address,
affect_auto_assigned=True)
+ db.fixed_ip_update(admin_context, fixed_ip['address'],
+ {'allocated': False,
+ 'virtual_interface_id': None})
+ if not self.net_manager.DHCP:
+ db.fixed_ip_disassociate(admin_context, fixed_ip['address'])
if len(fixed_ips) == 0:
LOG.error(_('No fixed IPs to deallocate for vif %s'),
diff --git a/nova/notifications.py b/nova/notifications.py
index 654ba5d25..29bca2c2f 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -158,7 +158,7 @@ def bandwidth_usage(instance_ref, audit_start,
admin_context = nova.context.get_admin_context(read_deleted='yes')
if (instance_ref.get('info_cache') and
- instance_ref['info_cache'].get('network_info')):
+ instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
nw_info = network_model.NetworkInfo.hydrate(cached_info)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 41a2f5c19..64f068ff2 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -50,7 +50,7 @@ FLAGS = flags.FLAGS
FLAGS.register_opts(scheduler_driver_opts)
flags.DECLARE('instances_path', 'nova.compute.manager')
-flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
+flags.DECLARE('libvirt_type', 'nova.virt.libvirt.driver')
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
index b4e1a3034..e2a38b1e6 100644
--- a/nova/scheduler/filters/affinity_filter.py
+++ b/nova/scheduler/filters/affinity_filter.py
@@ -18,7 +18,6 @@
import netaddr
from nova.compute import api as compute
-from nova import flags
from nova.scheduler import filters
@@ -39,6 +38,8 @@ class DifferentHostFilter(AffinityFilter):
me = host_state.host
affinity_uuids = scheduler_hints.get('different_host', [])
+ if isinstance(affinity_uuids, basestring):
+ affinity_uuids = [affinity_uuids]
if affinity_uuids:
return not any([i for i in affinity_uuids
if self._affinity_host(context, i) == me])
@@ -57,6 +58,8 @@ class SameHostFilter(AffinityFilter):
me = host_state.host
affinity_uuids = scheduler_hints.get('same_host', [])
+ if isinstance(affinity_uuids, basestring):
+ affinity_uuids = [affinity_uuids]
if affinity_uuids:
return any([i for i
in affinity_uuids
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 2d5f41bae..6de5ebc8f 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -333,7 +333,8 @@ class HostManager(object):
host_state_map[host] = host_state
# "Consume" resources from the host the instance resides on.
- instances = db.instance_get_all(context)
+ instances = db.instance_get_all(context,
+ columns_to_join=['instance_type'])
for instance in instances:
host = instance['host']
if not host:
diff --git a/nova/test.py b/nova/test.py
index 1839fd1e4..da115300e 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -32,7 +32,6 @@ import nose.plugins.skip
import stubout
from nova import flags
-import nova.image.fake
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
@@ -150,9 +149,6 @@ class TestCase(unittest.TestCase):
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
- if FLAGS.image_service == 'nova.image.fake.FakeImageService':
- nova.image.fake.FakeImageService_reset()
-
# Reset any overridden flags
FLAGS.reset()
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 6dd6fe916..90724977a 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -36,12 +36,12 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova.image import fake
from nova.image import s3
from nova import log as logging
from nova.network import api as network_api
from nova.openstack.common import rpc
from nova import test
+from nova.tests.image import fake
from nova import utils
@@ -94,6 +94,24 @@ class CloudTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True)
+ def fake_show(meh, context, id):
+ return {'id': id,
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
def dumb(*args, **kwargs):
pass
@@ -115,18 +133,6 @@ class CloudTestCase(test.TestCase):
self.project_id,
is_admin=True)
- def fake_show(meh, context, id):
- return {'id': id,
- 'container_format': 'ami',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine',
- 'image_state': 'available'}}
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
-
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
@@ -137,6 +143,10 @@ class CloudTestCase(test.TestCase):
db.api.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+ def tearDown(self):
+ super(CloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
@@ -1123,6 +1133,9 @@ class CloudTestCase(test.TestCase):
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
+ def fake_detail_none(self, context, **kwargs):
+ return []
+
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
@@ -1138,7 +1151,7 @@ class CloudTestCase(test.TestCase):
# provide a non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
- self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
@@ -1206,7 +1219,7 @@ class CloudTestCase(test.TestCase):
return i
raise exception.ImageNotFound(image_id=image_id)
- def fake_detail(meh, context):
+ def fake_detail(meh, context, **kwargs):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
@@ -1308,8 +1321,13 @@ class CloudTestCase(test.TestCase):
'container_format': 'ami',
'is_public': True}
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
@@ -1355,6 +1373,11 @@ class CloudTestCase(test.TestCase):
def fake_show(meh, context, id):
return copy.deepcopy(fake_metadata)
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
def fake_update(meh, context, image_id, metadata, data=None):
self.assertEqual(metadata['properties']['kernel_id'],
fake_metadata['properties']['kernel_id'])
@@ -1366,7 +1389,7 @@ class CloudTestCase(test.TestCase):
return image
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
@@ -1468,7 +1491,7 @@ class CloudTestCase(test.TestCase):
# invalid image
self.stubs.UnsetAll()
- def fake_detail_empty(self, context):
+ def fake_detail_empty(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
@@ -1618,7 +1641,6 @@ class CloudTestCase(test.TestCase):
'container_format': 'ami',
'status': 'active'}
- self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
@@ -1723,7 +1745,11 @@ class CloudTestCase(test.TestCase):
'type': 'machine'},
'status': 'active'}
+ def fake_id_to_glance_id(context, id):
+ return 'cedef40a-ed67-4d10-800e-17455edce175'
+
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
+ self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
@@ -2352,7 +2378,6 @@ class CloudTestCase(test.TestCase):
for i in range(3, 7):
db.api.s3_image_create(self.context, 'ami-%d' % i)
- self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('stop', image_id='ami-3')
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index d917c3352..cd213dd09 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -22,7 +22,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova.image import fake
+from nova.tests.image import fake
from nova import log as logging
from nova.openstack.common import importutils
from nova.openstack.common import rpc
@@ -50,7 +50,7 @@ class EC2ValidateTestCase(test.TestCase):
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
- self.image_service = importutils.import_object(FLAGS.image_service)
+ self.image_service = fake.FakeImageService()
self.user_id = 'fake'
self.project_id = 'fake'
@@ -80,8 +80,14 @@ class EC2ValidateTestCase(test.TestCase):
'type': 'machine',
'image_state': 'available'}}
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(self, context, None)
+ image['name'] = kwargs.get('name')
+ return [image]
+
+ fake.stub_out_image_service(self.stubs)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
@@ -93,6 +99,10 @@ class EC2ValidateTestCase(test.TestCase):
db.api.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+ def tearDown(self):
+ super(EC2ValidateTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
#EC2_API tests (InvalidInstanceID.Malformed)
def test_console_output(self):
for ec2_id, e in self.ec2_id_exception_map:
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index 080cf8428..fc19a9545 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -52,7 +52,7 @@ class AggregateTestCase(test.TestCase):
def test_index(self):
def stub_list_aggregates(context):
- if context == None:
+ if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index e63c28080..9999d94da 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -24,6 +24,7 @@ from nova.openstack.common import jsonutils
import nova.openstack.common.rpc
from nova import test
from nova.tests.api.openstack import fakes
+import nova.tests.image.fake
MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
@@ -45,6 +46,8 @@ class DiskConfigTestCase(test.TestCase):
def setUp(self):
super(DiskConfigTestCase, self).setUp()
self.flags(verbose=True)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
+
fakes.stub_out_nw_api(self.stubs)
FAKE_INSTANCES = [
@@ -120,6 +123,10 @@ class DiskConfigTestCase(test.TestCase):
self.app = compute.APIRouter()
+ def tearDown(self):
+ super(DiskConfigTestCase, self).tearDown()
+ nova.tests.image.fake.FakeImageService_reset()
+
def assertDiskConfig(self, dict_, value):
self.assert_(API_DISK_CONFIG in dict_)
self.assertEqual(dict_[API_DISK_CONFIG], value)
diff --git a/nova/tests/api/openstack/compute/test_images.py b/nova/tests/api/openstack/compute/test_images.py
index 3b100f3f7..af1dee30b 100644
--- a/nova/tests/api/openstack/compute/test_images.py
+++ b/nova/tests/api/openstack/compute/test_images.py
@@ -173,276 +173,6 @@ class ImagesControllerTest(test.TestCase):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, fake_req, 'unknown')
- def test_get_image_index(self):
- fake_req = fakes.HTTPRequest.blank('/v2/fake/images')
- response_list = self.controller.index(fake_req)['images']
-
- expected_images = [
- {
- "id": "123",
- "name": "public image",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/123",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/123",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/123" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "124",
- "name": "queued snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/124",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/124",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/124" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "125",
- "name": "saving snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/125",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/125",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/125" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "126",
- "name": "active snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/126",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/126",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/126" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "127",
- "name": "killed snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/127",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/127",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/127" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "128",
- "name": "deleted snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/128",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/128",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/128" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "129",
- "name": "pending_delete snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/129",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/129",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/129" %
- utils.generate_glance_url()
- },
- ],
- },
- {
- "id": "130",
- "name": None,
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/130",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/130",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": "%s/fake/images/130" %
- utils.generate_glance_url()
- },
- ],
- },
- ]
-
- self.assertDictListMatch(response_list, expected_images)
-
- def test_get_image_index_with_limit(self):
- request = fakes.HTTPRequest.blank('/v2/fake/images?limit=3')
- response = self.controller.index(request)
- response_list = response["images"]
- response_links = response["images_links"]
-
- alternate = "%s/fake/images/%s"
-
- expected_images = [
- {
- "id": "123",
- "name": "public image",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/123",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/123",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 123),
- },
- ],
- },
- {
- "id": "124",
- "name": "queued snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/124",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/124",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 124),
- },
- ],
- },
- {
- "id": "125",
- "name": "saving snapshot",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/images/125",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/125",
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 125),
- },
- ],
- },
- ]
-
- self.assertDictListMatch(response_list, expected_images)
- self.assertEqual(response_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(response_links[0]['href'])
- self.assertEqual('/v2/fake/images', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['3'], 'marker': ['125']}, params)
-
- def test_get_image_index_with_limit_and_extra_params(self):
- request = fakes.HTTPRequest.blank('/v2/fake/images?limit=3&extra=bo')
- response = self.controller.index(request)
- response_links = response["images_links"]
-
- self.assertEqual(response_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(response_links[0]['href'])
- self.assertEqual('/v2/fake/images', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch(
- {'limit': ['3'], 'marker': ['125'], 'extra': ['bo']},
- params)
-
- def test_get_image_index_with_big_limit(self):
- """
- Make sure we don't get images_links if limit is set
- and the number of images returned is < limit
- """
- request = fakes.HTTPRequest.blank('/v2/fake/images?limit=30')
- response = self.controller.index(request)
-
- self.assertEqual(response.keys(), ['images'])
- self.assertEqual(len(response['images']), 8)
-
def test_get_image_details(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
response = self.controller.detail(request)
@@ -815,120 +545,6 @@ class ImagesControllerTest(test.TestCase):
self.assertDictMatch({'limit': ['2'], 'marker': ['124']}, params)
- def test_image_filter_with_name(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'name': 'testname'}
- request = fakes.HTTPRequest.blank('/v2/images?name=testname')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_with_min_ram(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'min_ram': '0'}
- request = fakes.HTTPRequest.blank('/v2/images?minRam=0')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_with_min_disk(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'min_disk': '7'}
- request = fakes.HTTPRequest.blank('/v2/images?minDisk=7')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_with_status(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'status': 'active'}
- request = fakes.HTTPRequest.blank('/v2/images?status=ACTIVE')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_with_property(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'property-test': '3'}
- request = fakes.HTTPRequest.blank('/v2/images?property-test=3')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_server(self):
- image_service = self.mox.CreateMockAnything()
- uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
- ref = 'http://localhost:8774/servers/' + uuid
- filters = {'property-instance_uuid': uuid}
- request = fakes.HTTPRequest.blank('/v2/images?server=' + ref)
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_changes_since(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'changes-since': '2011-01-24T17:08Z'}
- request = fakes.HTTPRequest.blank('/v2/images?changes-since='
- '2011-01-24T17:08Z')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_with_type(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'property-image_type': 'BASE'}
- request = fakes.HTTPRequest.blank('/v2/images?type=BASE')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_filter_not_supported(self):
- image_service = self.mox.CreateMockAnything()
- filters = {'status': 'active'}
- request = fakes.HTTPRequest.blank('/v2/images?status=ACTIVE&'
- 'UNSUPPORTEDFILTER=testname')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_no_filters(self):
- image_service = self.mox.CreateMockAnything()
- filters = {}
- request = fakes.HTTPRequest.blank('/v2/images')
- context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
- self.mox.ReplayAll()
- controller = images.Controller(image_service=image_service)
- controller.index(request)
-
- def test_image_invalid_marker(self):
- class InvalidImageService(object):
-
- def index(self, *args, **kwargs):
- raise exception.Invalid('meow')
-
- request = fakes.HTTPRequest.blank('/v2/images?marker=invalid')
- controller = images.Controller(image_service=InvalidImageService())
- self.assertRaises(webob.exc.HTTPBadRequest, controller.index, request)
-
def test_image_detail_filter_with_name(self):
image_service = self.mox.CreateMockAnything()
filters = {'name': 'testname'}
@@ -970,10 +586,10 @@ class ImagesControllerTest(test.TestCase):
filters = {'property-instance_uuid': uuid}
request = fakes.HTTPRequest.blank(url)
context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
+ image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
- controller.index(request)
+ controller.detail(request)
def test_image_detail_filter_server_uuid(self):
image_service = self.mox.CreateMockAnything()
@@ -982,10 +598,10 @@ class ImagesControllerTest(test.TestCase):
filters = {'property-instance_uuid': uuid}
request = fakes.HTTPRequest.blank(url)
context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
+ image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
- controller.index(request)
+ controller.detail(request)
def test_image_detail_filter_changes_since(self):
image_service = self.mox.CreateMockAnything()
@@ -993,20 +609,20 @@ class ImagesControllerTest(test.TestCase):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?changes-since=2011-01-24T17:08Z')
context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
+ image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
- controller.index(request)
+ controller.detail(request)
def test_image_detail_filter_with_type(self):
image_service = self.mox.CreateMockAnything()
filters = {'property-image_type': 'BASE'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?type=BASE')
context = request.environ['nova.context']
- image_service.index(context, filters=filters).AndReturn([])
+ image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
- controller.index(request)
+ controller.detail(request)
def test_image_detail_filter_not_supported(self):
image_service = self.mox.CreateMockAnything()
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 3e3b4eafc..19d752a9f 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -27,6 +27,7 @@ from nova import flags
from nova.openstack.common import importutils
from nova import test
from nova.tests.api.openstack import fakes
+import nova.tests.image.fake
from nova import utils
@@ -69,7 +70,7 @@ class ServerActionsControllerTest(test.TestCase):
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
- fakes.stub_out_image_service(self.stubs)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = importutils.import_object(service_class)
self.service.delete_all()
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 613747ce5..91a3ef68d 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -35,12 +35,12 @@ from nova.compute import vm_states
import nova.db
from nova.db.sqlalchemy import models
from nova import flags
-import nova.image.fake
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
+import nova.tests.image.fake
from nova import utils
@@ -99,7 +99,7 @@ class ServersControllerTest(test.TestCase):
self.flags(verbose=True, use_ipv6=False)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
- fakes.stub_out_image_service(self.stubs)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
return_server = fakes.fake_instance_get()
return_servers = fakes.fake_instance_get_all_by_filters()
self.stubs.Set(nova.db, 'instance_get_all_by_filters',
@@ -1484,7 +1484,7 @@ class ServersControllerCreateTest(test.TestCase):
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
- fakes.stub_out_image_service(self.stubs)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
self.stubs.Set(nova.db, 'instance_add_security_group',
@@ -2849,6 +2849,19 @@ class ServersViewBuilderTest(test.TestCase):
self.view_builder = views.servers.ViewBuilder()
self.request = fakes.HTTPRequest.blank("/v2")
+ def test_get_flavor_valid_instance_type(self):
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+ expected = {"id": "1",
+ "links": [{"rel": "bookmark",
+ "href": flavor_bookmark}]}
+ result = self.view_builder._get_flavor(self.request, self.instance)
+ self.assertEqual(result, expected)
+
+ def test_get_flavor_deleted_instance_type(self):
+ self.instance['instance_type'] = {}
+ result = self.view_builder._get_flavor(self.request, self.instance)
+ self.assertEqual(result, {})
+
def test_build_server(self):
self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
diff --git a/nova/tests/api/openstack/compute/test_urlmap.py b/nova/tests/api/openstack/compute/test_urlmap.py
index 67524bcf5..780879e3c 100644
--- a/nova/tests/api/openstack/compute/test_urlmap.py
+++ b/nova/tests/api/openstack/compute/test_urlmap.py
@@ -19,6 +19,7 @@ from nova import log as logging
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
+import nova.tests.image.fake
LOG = logging.getLogger(__name__)
@@ -27,6 +28,11 @@ class UrlmapTest(test.TestCase):
def setUp(self):
super(UrlmapTest, self).setUp()
fakes.stub_out_rate_limiting(self.stubs)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
+
+ def tearDown(self):
+ super(UrlmapTest, self).tearDown()
+ nova.tests.image.fake.FakeImageService_reset()
def test_path_version_v1_1(self):
"""Test URL path specifying v1.1 returns v2 content."""
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 1479a8f6e..a6dcd4d2f 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -37,7 +37,7 @@ from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
-import nova.image.fake
+import nova.image.glance
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
@@ -125,14 +125,6 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True):
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
-def stub_out_image_service(stubs):
- def fake_get_image_service(context, image_href):
- return (nova.image.fake.FakeImageService(), image_href)
- stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
- stubs.Set(nova.image, 'get_default_image_service',
- lambda: nova.image.fake.FakeImageService())
-
-
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
@@ -276,10 +268,12 @@ def stub_out_glance_add_image(stubs, sent_to_glance):
def stub_out_glance(stubs):
- def fake_get_image_service():
+ def fake_get_remote_image_service():
client = glance_stubs.StubGlanceClient(_make_image_fixtures())
return nova.image.glance.GlanceImageService(client)
- stubs.Set(nova.image, 'get_default_image_service', fake_get_image_service)
+ stubs.Set(nova.image.glance,
+ 'get_default_image_service',
+ fake_get_remote_image_service)
class FakeToken(object):
@@ -301,13 +295,13 @@ class FakeRequestContext(context.RequestContext):
return super(FakeRequestContext, self).__init__(*args, **kwargs)
-class HTTPRequest(webob.Request):
+class HTTPRequest(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
- out = webob.Request.blank(*args, **kwargs)
+ out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index f32704775..f45450495 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -75,6 +75,27 @@ class RequestTest(test.TestCase):
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
+ def test_cache_and_retrieve_instances(self):
+ request = wsgi.Request.blank('/foo')
+ instances = []
+ for x in xrange(3):
+ instances.append({'uuid': 'uuid%s' % x})
+ # Store 2
+ request.cache_db_instances(instances[:2])
+ # Store 1
+ request.cache_db_instance(instances[2])
+ self.assertEqual(request.get_db_instance('uuid0'),
+ instances[0])
+ self.assertEqual(request.get_db_instance('uuid1'),
+ instances[1])
+ self.assertEqual(request.get_db_instance('uuid2'),
+ instances[2])
+ self.assertEqual(request.get_db_instance('uuid3'), None)
+ self.assertEqual(request.get_db_instances(),
+ {'uuid0': instances[0],
+ 'uuid1': instances[1],
+ 'uuid2': instances[2]})
+
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
@@ -215,7 +236,7 @@ class ResourceTest(test.TestCase):
expected = 'off'
self.assertEqual(actual, expected)
- def test_get_method_unknown_controller_action(self):
+ def test_get_method_unknown_controller_method(self):
class Controller(object):
def index(self, req, pants=None):
return pants
diff --git a/nova/tests/api/test_validator.py b/nova/tests/api/test_validator.py
index 875cb6de4..132e67e95 100644
--- a/nova/tests/api/test_validator.py
+++ b/nova/tests/api/test_validator.py
@@ -26,7 +26,7 @@ class ValidatorTestCase(test.TestCase):
def test_validate(self):
fixture = {
- 'foo': lambda val: val == True
+ 'foo': lambda val: val is True
}
self.assertTrue(
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index c816acc95..9ae099b60 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -39,7 +39,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova.image import fake as fake_image
+from nova.tests.image import fake as fake_image
from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
@@ -125,11 +125,13 @@ class BaseTestCase(test.TestCase):
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
+ fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
def tearDown(self):
+ fake_image.FakeImageService_reset()
instances = db.instance_get_all(self.context.elevated())
for instance in instances:
db.instance_destroy(self.context.elevated(), instance['uuid'])
@@ -2159,7 +2161,20 @@ class ComputeAPITestCase(BaseTestCase):
len(db.instance_get_all(context.get_admin_context())))
def test_default_hostname_generator(self):
- cases = [(None, 'server-1'), ('Hello, Server!', 'hello-server'),
+ fake_uuids = [str(utils.gen_uuid()) for x in xrange(4)]
+
+ orig_populate = self.compute_api._populate_instance_for_create
+
+ def _fake_populate(base_options, *args, **kwargs):
+ base_options['uuid'] = fake_uuids.pop(0)
+ return orig_populate(base_options, *args, **kwargs)
+
+ self.stubs.Set(self.compute_api,
+ '_populate_instance_for_create',
+ _fake_populate)
+
+ cases = [(None, 'server-%s' % fake_uuids[0]),
+ ('Hello, Server!', 'hello-server'),
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')]
for display_name, hostname in cases:
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 3d05d7605..5bcfb4c71 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -38,7 +38,6 @@ def set_defaults(conf):
conf.set_default('fake_network', True)
conf.set_default('fake_rabbit', True)
conf.set_default('flat_network_bridge', 'br100')
- conf.set_default('image_service', 'nova.image.fake.FakeImageService')
conf.set_default('iscsi_num_targets', 8)
conf.set_default('network_size', 8)
conf.set_default('num_networks', 2)
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index 3e4c63719..4350a8878 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -797,3 +797,9 @@ def openAuth(uri, auth, flags):
"this auth method"))
return Connection(uri, readonly=False)
+
+
+virDomain = Domain
+
+
+virConnect = Connection
diff --git a/nova/image/fake.py b/nova/tests/image/fake.py
index 996459081..99b1a6175 100644
--- a/nova/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
+# Copyright 2012 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -22,6 +23,7 @@ import datetime
from nova import exception
from nova import flags
+import nova.image.glance
from nova import log as logging
from nova import utils
@@ -41,7 +43,6 @@ class _FakeImageService(object):
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
- # NOTE(bcwaldon): was image '123456'
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
@@ -56,7 +57,6 @@ class _FakeImageService(object):
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
- # NOTE(bcwaldon): was image 'fake'
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
@@ -70,7 +70,6 @@ class _FakeImageService(object):
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
- # NOTE(bcwaldon): was image '2'
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
@@ -84,7 +83,6 @@ class _FakeImageService(object):
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
- # NOTE(bcwaldon): was image '1'
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
@@ -98,7 +96,6 @@ class _FakeImageService(object):
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
- # NOTE(bcwaldon): was image '3'
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
@@ -113,7 +110,6 @@ class _FakeImageService(object):
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
- # NOTE(sirp): was image '6'
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
@@ -129,7 +125,6 @@ class _FakeImageService(object):
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
- # NOTE(sirp): was image '7'
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
@@ -156,23 +151,13 @@ class _FakeImageService(object):
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
- def index(self, context, **kwargs):
- """Returns list of images."""
- retval = []
- for img in self.images.values():
- retval += [dict([(k, v) for k, v in img.iteritems()
- if k in ['id', 'name']])]
- return retval
-
- #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
- def get(self, context, image_id, data):
- metadata = self.show(context, image_id)
+ def download(self, context, image_id, data):
+ self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
- return metadata
def show(self, context, image_id):
"""Get data about specified image.
@@ -187,14 +172,6 @@ class _FakeImageService(object):
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
- def show_by_name(self, context, name):
- """Returns a dict containing image data for the given name."""
- images = copy.deepcopy(self.images.values())
- for image in images:
- if name == image.get('name'):
- return image
- raise exception.ImageNotFound(image_id=name)
-
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
@@ -258,3 +235,12 @@ def FakeImageService():
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
+
+
+def stub_out_image_service(stubs):
+ def fake_get_remote_image_service(context, image_href):
+ return (FakeImageService(), image_href)
+ stubs.Set(nova.image.glance, 'get_remote_image_service',
+ lambda x, y: (FakeImageService(), y))
+ stubs.Set(nova.image.glance, 'get_default_image_service',
+ lambda: FakeImageService())
diff --git a/nova/tests/test_image.py b/nova/tests/image/test_fake.py
index 6482d779b..bd7ae7d5b 100644
--- a/nova/tests/test_image.py
+++ b/nova/tests/image/test_fake.py
@@ -20,19 +20,19 @@ import StringIO
from nova import context
from nova import exception
-import nova.image
+import nova.tests.image.fake
from nova import test
-class _ImageTestCase(test.TestCase):
+class FakeImageServiceTestCase(test.TestCase):
def setUp(self):
- super(_ImageTestCase, self).setUp()
+ super(FakeImageServiceTestCase, self).setUp()
+ self.image_service = nova.tests.image.fake.FakeImageService()
self.context = context.get_admin_context()
- def test_index(self):
- res = self.image_service.index(self.context)
- for image in res:
- self.assertEquals(set(image.keys()), set(['id', 'name']))
+ def tearDown(self):
+ super(FakeImageServiceTestCase, self).setUp()
+ nova.tests.image.fake.FakeImageService_reset()
def test_detail(self):
res = self.image_service.detail(self.context)
@@ -59,32 +59,19 @@ class _ImageTestCase(test.TestCase):
check_is_bool(image, 'deleted')
check_is_bool(image, 'is_public')
- def test_index_and_detail_have_same_results(self):
- index = self.image_service.index(self.context)
- detail = self.image_service.detail(self.context)
- index_set = set([(i['id'], i['name']) for i in index])
- detail_set = set([(i['id'], i['name']) for i in detail])
- self.assertEqual(index_set, detail_set)
-
def test_show_raises_imagenotfound_for_invalid_id(self):
self.assertRaises(exception.ImageNotFound,
self.image_service.show,
self.context,
'this image does not exist')
- def test_show_by_name(self):
- self.assertRaises(exception.ImageNotFound,
- self.image_service.show_by_name,
- self.context,
- 'this image does not exist')
-
def test_create_adds_id(self):
- index = self.image_service.index(self.context)
+ index = self.image_service.detail(self.context)
image_count = len(index)
self.image_service.create(self.context, {})
- index = self.image_service.index(self.context)
+ index = self.image_service.detail(self.context)
self.assertEquals(len(index), image_count + 1)
self.assertTrue(index[0]['id'])
@@ -126,7 +113,7 @@ class _ImageTestCase(test.TestCase):
self.image_service.create(self.context, {'id': '33', 'foo': 'bar'})
self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
self.image_service.delete_all()
- index = self.image_service.index(self.context)
+ index = self.image_service.detail(self.context)
self.assertEquals(len(index), 0)
def test_create_then_get(self):
@@ -136,11 +123,5 @@ class _ImageTestCase(test.TestCase):
{'id': '32', 'foo': 'bar'},
data=s1)
s2 = StringIO.StringIO()
- self.image_service.get(self.context, '32', data=s2)
+ self.image_service.download(self.context, '32', data=s2)
self.assertEquals(s2.getvalue(), blob, 'Did not get blob back intact')
-
-
-class FakeImageTestCase(_ImageTestCase):
- def setUp(self):
- super(FakeImageTestCase, self).setUp()
- self.image_service = nova.image.fake.FakeImageService()
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 1ed36b2bf..0518007c9 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -134,6 +134,7 @@ class TestGlanceImageService(test.TestCase):
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
+ 'owner': None,
}
self.assertDictMatch(image_meta, expected)
@@ -165,18 +166,19 @@ class TestGlanceImageService(test.TestCase):
'deleted': None,
'status': None,
'properties': {},
+ 'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertDictMatch(actual, expected)
def test_create(self):
fixture = self._make_fixture(name='test image')
- num_images = len(self.service.index(self.context))
+ num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertEquals(num_images + 1,
- len(self.service.index(self.context)))
+ len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
@@ -188,108 +190,24 @@ class TestGlanceImageService(test.TestCase):
self.context,
'bad image id')
- def test_create_and_show_non_existing_image_by_name(self):
- self.assertRaises(exception.ImageNotFound,
- self.service.show_by_name,
- self.context,
- 'bad image id')
-
- def test_index(self):
- fixture = self._make_fixture(name='test image')
- image_id = self.service.create(self.context, fixture)['id']
- image_metas = self.service.index(self.context)
- expected = [{'id': image_id, 'name': 'test image'}]
- self.assertDictListMatch(image_metas, expected)
-
- def test_index_default_limit(self):
- fixtures = []
- ids = []
- for i in range(10):
- fixture = self._make_fixture(name='TestImage %d' % (i))
- fixtures.append(fixture)
- ids.append(self.service.create(self.context, fixture)['id'])
-
- image_metas = self.service.index(self.context)
- i = 0
- for meta in image_metas:
- expected = {'id': 'DONTCARE',
- 'name': 'TestImage %d' % (i)}
- self.assertDictMatch(meta, expected)
- i = i + 1
-
- def test_index_marker(self):
- fixtures = []
- ids = []
- for i in range(10):
- fixture = self._make_fixture(name='TestImage %d' % (i))
- fixtures.append(fixture)
- ids.append(self.service.create(self.context, fixture)['id'])
-
- image_metas = self.service.index(self.context, marker=ids[1])
- self.assertEquals(len(image_metas), 8)
- i = 2
- for meta in image_metas:
- expected = {'id': 'DONTCARE',
- 'name': 'TestImage %d' % (i)}
- self.assertDictMatch(meta, expected)
- i = i + 1
-
- def test_index_limit(self):
- fixtures = []
- ids = []
- for i in range(10):
- fixture = self._make_fixture(name='TestImage %d' % (i))
- fixtures.append(fixture)
- ids.append(self.service.create(self.context, fixture)['id'])
-
- image_metas = self.service.index(self.context, limit=5)
- self.assertEquals(len(image_metas), 5)
-
- def test_index_marker_and_limit(self):
- fixtures = []
- ids = []
- for i in range(10):
- fixture = self._make_fixture(name='TestImage %d' % (i))
- fixtures.append(fixture)
- ids.append(self.service.create(self.context, fixture)['id'])
-
- image_metas = self.service.index(self.context, marker=ids[3], limit=1)
- self.assertEquals(len(image_metas), 1)
- i = 4
- for meta in image_metas:
- expected = {'id': ids[i],
- 'name': 'TestImage %d' % (i)}
- self.assertDictMatch(meta, expected)
- i = i + 1
-
- def test_index_invalid_marker(self):
- fixtures = []
- ids = []
- for i in range(10):
- fixture = self._make_fixture(name='TestImage %d' % (i))
- fixtures.append(fixture)
- ids.append(self.service.create(self.context, fixture)['id'])
-
- self.assertRaises(exception.Invalid, self.service.index,
- self.context, marker='invalidmarker')
-
- def test_index_private_image(self):
+ def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['is_public'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
- image_id = self.service.create(self.context, fixture)['id']
+ self.service.create(self.context, fixture)['id']
proj = self.context.project_id
self.context.project_id = 'proj1'
- image_metas = self.service.index(self.context)
+ image_metas = self.service.detail(self.context)
self.context.project_id = proj
- expected = [{'id': 'DONTCARE', 'name': 'test image'}]
- self.assertDictListMatch(image_metas, expected)
+ self.assertEqual(1, len(image_metas))
+ self.assertEqual(image_metas[0]['name'], 'test image')
+ self.assertEqual(image_metas[0]['is_public'], False)
def test_detail_marker(self):
fixtures = []
@@ -318,7 +236,8 @@ class TestGlanceImageService(test.TestCase):
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
- 'deleted': None
+ 'deleted': None,
+ 'owner': None,
}
self.assertDictMatch(meta, expected)
@@ -335,6 +254,18 @@ class TestGlanceImageService(test.TestCase):
image_metas = self.service.detail(self.context, limit=5)
self.assertEquals(len(image_metas), 5)
+ def test_detail_default_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture(name='TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context)
+ for i, meta in enumerate(image_metas):
+ self.assertEqual(meta['name'], 'TestImage %d' % (i))
+
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
@@ -362,7 +293,8 @@ class TestGlanceImageService(test.TestCase):
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
- 'deleted': None
+ 'deleted': None,
+ 'owner': None,
}
self.assertDictMatch(meta, expected)
i = i + 1
@@ -392,20 +324,20 @@ class TestGlanceImageService(test.TestCase):
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
- num_images = len(self.service.index(self.context))
- self.assertEquals(0, num_images, str(self.service.index(self.context)))
+ num_images = len(self.service.detail(self.context))
+ self.assertEquals(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
- num_images = len(self.service.index(self.context))
- self.assertEquals(2, num_images, str(self.service.index(self.context)))
+ num_images = len(self.service.detail(self.context))
+ self.assertEquals(2, num_images)
self.service.delete(self.context, ids[0])
- num_images = len(self.service.index(self.context))
+ num_images = len(self.service.detail(self.context))
self.assertEquals(1, num_images)
def test_delete_not_by_owner(self):
@@ -416,11 +348,11 @@ class TestGlanceImageService(test.TestCase):
properties = {'project_id': 'proj1'}
fixture['properties'] = properties
- num_images = len(self.service.index(self.context))
+ num_images = len(self.service.detail(self.context))
self.assertEquals(0, num_images)
image_id = self.service.create(self.context, fixture)['id']
- num_images = len(self.service.index(self.context))
+ num_images = len(self.service.detail(self.context))
self.assertEquals(1, num_images)
proj_id = self.context.project_id
@@ -431,7 +363,7 @@ class TestGlanceImageService(test.TestCase):
self.context.project_id = proj_id
- num_images = len(self.service.index(self.context))
+ num_images = len(self.service.detail(self.context))
self.assertEquals(1, num_images)
def test_show_passes_through_to_client(self):
@@ -455,6 +387,7 @@ class TestGlanceImageService(test.TestCase):
'deleted': None,
'status': None,
'properties': {},
+ 'owner': None,
}
self.assertEqual(image_meta, expected)
@@ -501,6 +434,7 @@ class TestGlanceImageService(test.TestCase):
'deleted': None,
'status': None,
'properties': {},
+ 'owner': None,
},
]
self.assertEqual(image_metas, expected)
@@ -519,15 +453,7 @@ class TestGlanceImageService(test.TestCase):
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
- def test_get_makes_datetimes(self):
- fixture = self._make_datetime_fixture()
- image_id = self.service.create(self.context, fixture)['id']
- writer = NullWriter()
- image_meta = self.service.get(self.context, image_id, writer)
- self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
- self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
-
- def test_get_with_retries(self):
+ def test_download_with_retries(self):
tries = [0]
class GlanceBusyException(Exception):
@@ -549,12 +475,12 @@ class TestGlanceImageService(test.TestCase):
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
- self.assertRaises(GlanceBusyException, service.get, self.context,
+ self.assertRaises(GlanceBusyException, service.download, self.context,
image_id, writer)
# Now lets enable retries. No exception should happen now.
self.flags(glance_num_retries=1)
- service.get(self.context, image_id, writer)
+ service.download(self.context, image_id, writer)
def test_client_raises_forbidden(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
@@ -566,7 +492,7 @@ class TestGlanceImageService(test.TestCase):
service = glance.GlanceImageService(client=client)
image_id = 1 # doesn't matter
writer = NullWriter()
- self.assertRaises(exception.ImageNotAuthorized, service.get,
+ self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_glance_client_image_id(self):
diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py
index 9f078fc49..9b4b5757f 100644
--- a/nova/tests/image/test_s3.py
+++ b/nova/tests/image/test_s3.py
@@ -24,6 +24,7 @@ import tempfile
from nova import context
import nova.db.api
from nova import exception
+from nova.tests.image import fake
from nova.image import s3
from nova import test
@@ -81,14 +82,19 @@ file_manifest_xml = """<?xml version="1.0" ?>
class TestS3ImageService(test.TestCase):
def setUp(self):
super(TestS3ImageService, self).setUp()
- self.flags(image_service='nova.image.fake.FakeImageService')
- self.image_service = s3.S3ImageService()
self.context = context.RequestContext(None, None)
# set up one fixture to test shows, should have id '1'
nova.db.api.s3_image_create(self.context,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
+ fake.stub_out_image_service(self.stubs)
+ self.image_service = s3.S3ImageService()
+
+ def tearDown(self):
+ super(TestS3ImageService, self).tearDown()
+ fake.FakeImageService_reset()
+
def _assertEqualList(self, list0, list1, keys):
self.assertEqual(len(list0), len(list1))
key = keys[0]
@@ -183,8 +189,8 @@ class TestS3ImageService(test.TestCase):
eventlet.sleep()
translated = self.image_service._translate_id_to_uuid(context, img)
uuid = translated['id']
- self.glance_service = nova.image.get_default_image_service()
- updated_image = self.glance_service.update(self.context, uuid,
+ image_service = fake.FakeImageService()
+ updated_image = image_service.update(self.context, uuid,
{'is_public': True}, None,
{'x-glance-registry-purge-props': False})
self.assertTrue(updated_image['is_public'])
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index e205dfb18..a514de677 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -22,10 +22,10 @@ Provides common functionality for integrated unit tests
import random
import string
-import nova.image.glance
from nova.log import logging
from nova import service
from nova import test # For the flags
+import nova.tests.image.fake
from nova.tests.integrated.api import client
from nova import utils
@@ -65,10 +65,7 @@ class _IntegratedTestBase(test.TestCase):
self.flags(**f)
self.flags(verbose=True)
- def fake_get_image_service(context, image_href):
- image_id = str(image_href).split('/')[-1]
- return (nova.image.fake.FakeImageService(), image_id)
- self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
self.flags(compute_scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
@@ -84,6 +81,7 @@ class _IntegratedTestBase(test.TestCase):
def tearDown(self):
self.osapi.stop()
+ nova.tests.image.fake.FakeImageService_reset()
super(_IntegratedTestBase, self).tearDown()
def _start_api_service(self):
@@ -108,7 +106,6 @@ class _IntegratedTestBase(test.TestCase):
f['osapi_volume_listen_port'] = 0
f['metadata_listen_port'] = 0
- f['image_service'] = 'nova.image.fake.FakeImageService'
f['fake_network'] = True
return f
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index 4b60b624a..0660bb0f3 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -409,7 +409,7 @@ class LinuxNetworkTestCase(test.TestCase):
executes.append(args)
if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
return existing, ""
- if args[0] == 'route' and args[1] == '-n':
+ if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show':
return routes, ""
self.stubs.Set(utils, 'execute', fake_execute)
network = {'dhcp_server': '192.168.1.1',
@@ -429,7 +429,7 @@ class LinuxNetworkTestCase(test.TestCase):
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
- ('route', '-n'),
+ ('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
@@ -442,8 +442,8 @@ class LinuxNetworkTestCase(test.TestCase):
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_resets_route(self):
- routes = ("0.0.0.0 192.68.0.1 0.0.0.0 "
- "UG 100 0 0 eth0")
+ routes = ("default via 192.168.0.1 dev eth0\n"
+ "192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n")
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
@@ -453,15 +453,19 @@ class LinuxNetworkTestCase(test.TestCase):
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
- ('route', '-n'),
- ('route', 'del', 'default', 'gw', '192.68.0.1', 'dev', 'eth0'),
+ ('ip', 'route', 'show', 'dev', 'eth0'),
+ ('ip', 'route', 'del', 'default', 'dev', 'eth0'),
+ ('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
- ('route', 'add', 'default', 'gw', '192.68.0.1'),
+ ('ip', 'route', 'add', 'default', 'via', '192.168.0.1',
+ 'dev', 'eth0'),
+ ('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254',
+ 'dev', 'eth0', 'proto', 'static'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
@@ -492,7 +496,7 @@ class LinuxNetworkTestCase(test.TestCase):
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
- ('route', '-n'),
+ ('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
diff --git a/nova/tests/network/test_quantum.py b/nova/tests/network/test_quantum.py
index 255444c7b..7f93c9c2b 100644
--- a/nova/tests/network/test_quantum.py
+++ b/nova/tests/network/test_quantum.py
@@ -492,7 +492,7 @@ class QuantumManagerTestCase(QuantumNovaTestCase):
net = db.network_get_by_uuid(ctx.elevated(), net_id)
self.assertTrue(net is not None)
self.assertEquals(net['uuid'], net_id)
- self.assertTrue(net['host'] != None)
+ self.assertTrue(net['host'] is not None)
class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 83fbfe8b7..29d4807ac 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -139,4 +139,5 @@ def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'instance_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
- db.instance_get_all(mox.IgnoreArg()).AndReturn(INSTANCES)
+ db.instance_get_all(mox.IgnoreArg(),
+ columns_to_join=['instance_type']).AndReturn(INSTANCES)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index cb619dbac..c6fabc1b5 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -124,6 +124,19 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_affinity_different_filter_no_list_passes(self):
+ filt_cls = self.class_map['DifferentHostFilter']()
+ host = fakes.FakeHostState('host1', 'compute', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host2'})
+ instance_uuid = instance.uuid
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'different_host': instance_uuid}}
+
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
@@ -149,6 +162,19 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_affinity_same_filter_no_list_passes(self):
+ filt_cls = self.class_map['SameHostFilter']()
+ host = fakes.FakeHostState('host1', 'compute', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host1'})
+ instance_uuid = instance.uuid
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'same_host': instance_uuid}}
+
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 987bffea1..d28e51c00 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -250,7 +250,9 @@ class HostManagerTestCase(test.TestCase):
db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
# Invalid service
host_manager.LOG.warn("No service for compute ID 5")
- db.instance_get_all(context).AndReturn(fakes.INSTANCES)
+ db.instance_get_all(context,
+ columns_to_join=['instance_type']).AndReturn(
+ fakes.INSTANCES)
self.mox.ReplayAll()
host_states = self.host_manager.get_all_host_states(context, topic)
diff --git a/nova/tests/test_compute_utils.py b/nova/tests/test_compute_utils.py
index 50c85073f..0955134ba 100644
--- a/nova/tests/test_compute_utils.py
+++ b/nova/tests/test_compute_utils.py
@@ -22,12 +22,12 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import flags
-import nova.image.fake
from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
from nova import test
from nova.tests import fake_network
+import nova.tests.image.fake
from nova import utils
@@ -61,7 +61,8 @@ class UsageInfoTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
- self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(nova.tests.image.fake._FakeImageService,
+ 'show', fake_show)
def _create_instance(self, params={}):
"""Create a test instance"""
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index aaddb08a6..8eeaf2e40 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -401,6 +401,16 @@ class DbApiTestCase(test.TestCase):
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'bar', now)
self.assertEqual(result, 0)
+ def test_get_vol_mapping_non_admin(self):
+ ref = db.ec2_volume_create(self.context, 'fake-uuid')
+ ec2_id = db.get_ec2_volume_id_by_uuid(self.context, 'fake-uuid')
+ self.assertEqual(ref['id'], ec2_id)
+
+ def test_get_snap_mapping_non_admin(self):
+ ref = db.ec2_snapshot_create(self.context, 'fake-uuid')
+ ec2_id = db.get_ec2_snapshot_id_by_uuid(self.context, 'fake-uuid')
+ self.assertEqual(ref['id'], ec2_id)
+
def _get_fake_aggr_values():
return {'name': 'fake_aggregate',
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 43ee7ed93..9ab10cc51 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -429,7 +429,7 @@ class ImageCacheManagerTestCase(test.TestCase):
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum(
img, fname, create_if_missing=True)
- self.assertTrue(res == None)
+ self.assertTrue(res is None)
def test_verify_checksum_invalid(self):
img = {'container_format': 'ami', 'id': '42'}
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index ccf271bed..2bc20d2b0 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -40,12 +40,13 @@ from nova.openstack.common import jsonutils
from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_network
+import nova.tests.image.fake
from nova import utils
from nova.virt import driver
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import config
-from nova.virt.libvirt import connection
+from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
@@ -55,11 +56,11 @@ from nova.volume import driver as volume_driver
try:
import libvirt
- connection.libvirt = libvirt
+ libvirt_driver.libvirt = libvirt
except ImportError:
# TODO(sdague): there should be a cleaner way to handle this
# in the case where libvirt python isn't installed
- connection.libvirt = ""
+ libvirt_driver.libvirt = ""
libvirt = None
@@ -165,7 +166,7 @@ class LibvirtVolumeTestCase(test.TestCase):
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location, iqn)
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
@@ -204,7 +205,7 @@ class LibvirtVolumeTestCase(test.TestCase):
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location, iqn)
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
@@ -317,7 +318,7 @@ class LibvirtVolumeTestCase(test.TestCase):
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location, iqn)
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
@@ -413,22 +414,25 @@ class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
- connection._late_load_cheetah()
+ libvirt_driver._late_load_cheetah()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
self.flags(instances_path='')
self.call_libvirt_dependant_setup = False
- connection.libvirt_utils = fake_libvirt_utils
+ libvirt_driver.libvirt_utils = fake_libvirt_utils
def fake_extend(image, size):
pass
- self.stubs.Set(connection.disk, 'extend', fake_extend)
+ self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
+
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
- connection.libvirt_utils = libvirt_utils
+ libvirt_driver.libvirt_utils = libvirt_utils
+ nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
test_instance = {'memory_kb': '1024000',
@@ -458,11 +462,10 @@ class LibvirtConnTestCase(test.TestCase):
for key, val in kwargs.items():
fake.__setattr__(key, val)
- self.flags(image_service='nova.image.fake.FakeImageService')
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn = fake
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
@@ -486,7 +489,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(my_ip=ip)
self.flags(host=host)
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
expected = {
'ip': ip,
'initiator': initiator,
@@ -499,7 +502,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertDictMatch(expected, result)
def test_get_guest_config(self):
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -543,7 +546,7 @@ class LibvirtConnTestCase(test.TestCase):
"catchup")
def test_get_guest_config_with_two_nics(self):
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref,
@@ -575,7 +578,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None,
@@ -594,6 +597,23 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(type(cfg.devices[2]),
config.LibvirtConfigGuestConsole)
+ def test_get_guest_config_with_block_device(self):
+ conn = libvirt_driver.LibvirtDriver(True)
+
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': [
+ {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
+ {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+
+ cfg = conn.get_guest_config(instance_ref, [], None, None, info)
+ self.assertEquals(type(cfg.devices[2]),
+ config.LibvirtConfigGuestDisk)
+ self.assertEquals(cfg.devices[2].target_dev, 'vdc')
+ self.assertEquals(type(cfg.devices[3]),
+ config.LibvirtConfigGuestDisk)
+ self.assertEquals(cfg.devices[3].target_dev, 'vdd')
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -667,12 +687,12 @@ class LibvirtConnTestCase(test.TestCase):
"cdrom", "ide")
def test_list_instances(self):
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.lookupByID = self.fake_lookup
- connection.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
@@ -719,12 +739,12 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup(id):
return FakeVirtDomain(xml[id])
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.listDomainsID = lambda: range(4)
- connection.LibvirtDriver._conn.lookupByID = fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
@@ -778,22 +798,20 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_name(name):
return FakeVirtDomain(xml[1])
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.listDomainsID = lambda: range(4)
- connection.LibvirtDriver._conn.lookupByID = fake_lookup
- connection.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_in_ami_format(self):
- self.flags(image_service='nova.image.fake.FakeImageService')
-
# Start test
- image_service = importutils.import_object(FLAGS.image_service)
+ image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
@@ -810,14 +828,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(connection.utils, 'execute')
- connection.utils.execute = self.fake_execute
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -828,10 +846,8 @@ class LibvirtConnTestCase(test.TestCase):
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_in_raw_format(self):
- self.flags(image_service='nova.image.fake.FakeImageService')
-
# Start test
- image_service = importutils.import_object(FLAGS.image_service)
+ image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -844,14 +860,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(connection.utils, 'execute')
- connection.utils.execute = self.fake_execute
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -862,11 +878,10 @@ class LibvirtConnTestCase(test.TestCase):
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_in_qcow2_format(self):
- self.flags(image_service='nova.image.fake.FakeImageService')
self.flags(snapshot_image_format='qcow2')
# Start test
- image_service = importutils.import_object(FLAGS.image_service)
+ image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -879,14 +894,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(connection.utils, 'execute')
- connection.utils.execute = self.fake_execute
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -897,10 +912,8 @@ class LibvirtConnTestCase(test.TestCase):
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_no_image_architecture(self):
- self.flags(image_service='nova.image.fake.FakeImageService')
-
# Start test
- image_service = importutils.import_object(FLAGS.image_service)
+ image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
@@ -918,14 +931,14 @@ class LibvirtConnTestCase(test.TestCase):
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(connection.utils, 'execute')
- connection.utils.execute = self.fake_execute
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -935,10 +948,8 @@ class LibvirtConnTestCase(test.TestCase):
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_no_original_image(self):
- self.flags(image_service='nova.image.fake.FakeImageService')
-
# Start test
- image_service = importutils.import_object(FLAGS.image_service)
+ image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
@@ -952,14 +963,14 @@ class LibvirtConnTestCase(test.TestCase):
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
- self.mox.StubOutWithMock(connection.LibvirtDriver, '_conn')
- connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(connection.utils, 'execute')
- connection.utils.execute = self.fake_execute
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
@@ -969,9 +980,9 @@ class LibvirtConnTestCase(test.TestCase):
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
- connection.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
@@ -981,7 +992,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, network_info, None, False)
tree = etree.fromstring(xml)
@@ -998,7 +1009,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
self.assertEquals(conn.uri, 'lxc:///')
@@ -1041,7 +1052,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info)
@@ -1070,14 +1081,14 @@ class LibvirtConnTestCase(test.TestCase):
def connection_supports_direct_io_stub(*args, **kwargs):
return directio_supported
- self.stubs.Set(connection.LibvirtDriver,
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = connection.LibvirtDriver(True).to_xml(instance_ref,
+ xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1089,7 +1100,7 @@ class LibvirtConnTestCase(test.TestCase):
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
- xml = connection.LibvirtDriver(True).to_xml(instance_ref,
+ xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1102,7 +1113,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = connection.LibvirtDriver(True).to_xml(instance_ref,
+ xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1115,7 +1126,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = connection.LibvirtDriver(True).to_xml(instance_ref,
+ xml = libvirt_driver.LibvirtDriver(True).to_xml(instance_ref,
network_info,
image_meta)
tree = etree.fromstring(xml)
@@ -1204,7 +1215,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
self.assertEquals(conn.uri, expected_uri)
@@ -1233,7 +1244,7 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
- conn = connection.LibvirtDriver(True)
+ conn = libvirt_driver.LibvirtDriver(True)
self.assertEquals(conn.uri, testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
@@ -1266,7 +1277,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
try:
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -1326,7 +1337,7 @@ class LibvirtConnTestCase(test.TestCase):
#start test
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
@@ -1347,7 +1358,7 @@ class LibvirtConnTestCase(test.TestCase):
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
@@ -1356,7 +1367,8 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
conn.volume_driver_method('connect_volume',
- v['connection_info'], v['mount_device'])
+ v['connection_info'],
+ v['mount_device'].rpartition("/")[2])
# Starting test
self.mox.ReplayAll()
@@ -1378,7 +1390,7 @@ class LibvirtConnTestCase(test.TestCase):
# qemu-img should be mockd since test environment might not have
# large disk space.
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson % tmpdir)
@@ -1434,7 +1446,7 @@ class LibvirtConnTestCase(test.TestCase):
os.path.getsize('/test/disk.local').AndReturn((21474836480))
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
info = conn.get_instance_disk_info(instance_ref.name)
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
@@ -1466,7 +1478,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
@@ -1524,10 +1536,10 @@ class LibvirtConnTestCase(test.TestCase):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
- connection.LibvirtDriver._conn.lookupByName = fake_lookup
- connection.libvirt_utils = fake_libvirt_utils
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ libvirt_driver.libvirt_utils = fake_libvirt_utils
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
@@ -1568,16 +1580,16 @@ class LibvirtConnTestCase(test.TestCase):
return fp.read()
self.create_fake_libvirt_mock()
- connection.LibvirtDriver._conn.lookupByName = fake_lookup
- connection.LibvirtDriver._flush_libvirt_console = _fake_flush
- connection.libvirt_utils = fake_libvirt_utils
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
+ libvirt_driver.libvirt_utils = fake_libvirt_utils
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
output = conn.get_console_output(instance)
self.assertEquals("foo", output)
def test_get_host_ip_addr(self):
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
@@ -1587,7 +1599,7 @@ class LibvirtConnTestCase(test.TestCase):
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC)):
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getCapabilities")
@@ -1607,7 +1619,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.UnsetStubs()
def test_volume_in_mapping(self):
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
@@ -1647,7 +1659,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = db.instance_create(self.context, self.test_instance)
@@ -1670,7 +1682,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -1690,7 +1702,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN}
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -1711,7 +1723,7 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
raise exception.InstanceNotFound()
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
@@ -1721,7 +1733,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_available_least_handles_missing(self):
"""Ensure destroy calls managedSaveRemove for saved instance"""
- conn = connection.LibvirtDriver(False)
+ conn = libvirt_driver.LibvirtDriver(False)
def list_instances():
return ['fake']
@@ -1781,11 +1793,11 @@ class HostStateTestCase(test.TestCase):
return 13091
def test_update_status(self):
- self.mox.StubOutWithMock(connection, 'LibvirtDriver')
- connection.LibvirtDriver(True).AndReturn(self.FakeConnection())
+ self.mox.StubOutWithMock(libvirt_driver, 'LibvirtDriver')
+ libvirt_driver.LibvirtDriver(True).AndReturn(self.FakeConnection())
self.mox.ReplayAll()
- hs = connection.HostState(True)
+ hs = libvirt_driver.HostState(True)
stats = hs._stats
self.assertEquals(stats["vcpus"], 1)
self.assertEquals(stats["vcpus_used"], 0)
@@ -2540,10 +2552,10 @@ disk size: 4.4M''', ''))
class LibvirtDriverTestCase(test.TestCase):
- """Test for nova.virt.libvirt.connection.LibvirtDriver."""
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
- self.libvirtconnection = connection.LibvirtDriver(read_only=True)
+ self.libvirtconnection = libvirt_driver.LibvirtDriver(read_only=True)
def _create_instance(self, params=None):
"""Create a test instance"""
@@ -2572,7 +2584,7 @@ class LibvirtDriverTestCase(test.TestCase):
return db.instance_create(context.get_admin_context(), inst)
def test_migrate_disk_and_power_off_exception(self):
- """Test for nova.virt.libvirt.connection.LivirtConnection
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
self.counter = 0
@@ -2609,7 +2621,7 @@ class LibvirtDriverTestCase(test.TestCase):
None, ins_ref, '10.0.0.2', None, None)
def test_migrate_disk_and_power_off(self):
- """Test for nova.virt.libvirt.connection.LivirtConnection
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
@@ -2653,7 +2665,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.assertEquals(out, disk_info_text)
def test_wait_for_running(self):
- """Test for nova.virt.libvirt.connection.LivirtConnection
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
._wait_for_running. """
def fake_get_info(instance):
@@ -2684,7 +2696,7 @@ class LibvirtDriverTestCase(test.TestCase):
'uuid': 'other_uuid'})
def test_finish_migration(self):
- """Test for nova.virt.libvirt.connection.LivirtConnection
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
@@ -2720,7 +2732,7 @@ class LibvirtDriverTestCase(test.TestCase):
return {'state': power_state.RUNNING}
self.flags(use_cow_images=True)
- self.stubs.Set(connection.disk, 'extend', fake_extend)
+ self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
@@ -2742,7 +2754,7 @@ class LibvirtDriverTestCase(test.TestCase):
disk_info_text, None, None, None)
def test_finish_revert_migration(self):
- """Test for nova.virt.libvirt.connection.LivirtConnection
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration. """
def fake_execute(*args, **kwargs):
@@ -2797,6 +2809,6 @@ class LibvirtNonblockingTestCase(test.TestCase):
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_connection_to_primitive(self):
"""Test bug 962840"""
- import nova.virt.libvirt.connection
- connection = nova.virt.libvirt.connection.LibvirtDriver('')
+ import nova.virt.libvirt.driver as libvirt_driver
+ connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
diff --git a/nova/tests/test_libvirt_config.py b/nova/tests/test_libvirt_config.py
index 4f9970e79..55f5528c7 100644
--- a/nova/tests/test_libvirt_config.py
+++ b/nova/tests/test_libvirt_config.py
@@ -59,6 +59,41 @@ class LibvirtConfigTest(LibvirtConfigBaseTest):
obj.parse_str(inxml)
+class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
+
+ def test_config_host(self):
+ xmlin = """
+ <capabilities>
+ <host>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Opteron_G3</model>
+ <vendor>AMD</vendor>
+ <topology sockets='1' cores='4' threads='1'/>
+ <feature name='ibs'/>
+ <feature name='osvw'/>
+ </cpu>
+ </host>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='x86_64'/>
+ </guest>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='i686'/>
+ </guest>
+ </capabilities>"""
+
+ obj = config.LibvirtConfigCaps()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(type(obj.host), config.LibvirtConfigCapsHost)
+
+ xmlout = obj.to_xml()
+
+ self.assertXmlEqual(xmlin, xmlout)
+
+
class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
def test_config_platform(self):
obj = config.LibvirtConfigGuestTimer()
@@ -151,6 +186,123 @@ class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
""")
+class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPUFeature("mtrr")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr"/>
+ """)
+
+
+class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUFeature("mtrr")
+ obj.policy = "force"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr" policy="force"/>
+ """)
+
+
+class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = "x86_64"
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="mtrr"/>
+ <feature name="apic"/>
+ </cpu>
+ """)
+
+ def test_config_topology(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.sockets = 4
+ obj.cores = 4
+ obj.threads = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ <topology sockets="4" cores="4" threads="2"/>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu match="exact">
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = "x86_64"
+ obj.mode = "custom"
+
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="custom" match="exact">
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="mtrr" policy="require"/>
+ <feature name="apic" policy="require"/>
+ </cpu>
+ """)
+
+ def test_config_host(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact"/>
+ """)
+
+
class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
def test_config_file(self):
@@ -485,43 +637,6 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
</domain>""")
-class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
-
- def test_config_cpu(self):
- obj = config.LibvirtConfigCPU()
- obj.vendor = "AMD"
- obj.model = "Quad-Core AMD Opteron(tm) Processor 2350"
- obj.arch = "x86_64"
- obj.add_feature("svm")
- obj.add_feature("extapic")
- obj.add_feature("constant_tsc")
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu>
- <arch>x86_64</arch>
- <model>Quad-Core AMD Opteron(tm) Processor 2350</model>
- <vendor>AMD</vendor>
- <feature name="svm"/>
- <feature name="extapic"/>
- <feature name="constant_tsc"/>
- </cpu>""")
-
- def test_config_topology(self):
- obj = config.LibvirtConfigCPU()
- obj.vendor = "AMD"
- obj.sockets = 2
- obj.cores = 4
- obj.threads = 2
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu>
- <vendor>AMD</vendor>
- <topology cores="4" threads="2" sockets="2"/>
- </cpu>""")
-
-
class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
def test_config_snapshot(self):
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index c90e9c6c4..0c3c5b574 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -66,7 +66,7 @@ def return_non_existing_address(*args, **kwarg):
def fake_InstanceMetadata(stubs, inst_data, address=None, sgroups=None):
- if sgroups == None:
+ if sgroups is None:
sgroups = [{'name': 'default'}]
def sg_get(*args, **kwargs):
@@ -84,7 +84,7 @@ def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
app = handler.MetadataRequestHandler()
- if fake_get_metadata == None:
+ if fake_get_metadata is None:
fake_get_metadata = get_metadata
if stubs:
@@ -93,7 +93,7 @@ def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
request = webob.Request.blank(relpath)
request.remote_addr = address
- if headers != None:
+ if headers is not None:
request.headers.update(headers)
response = request.get_response(app)
@@ -180,6 +180,16 @@ class MetadataTestCase(test.TestCase):
self.assertEqual(base._format_instance_mapping(ctxt, instance_ref1),
expected)
+ def test_pubkey(self):
+ md = fake_InstanceMetadata(self.stubs, copy(self.instance))
+ data = md.get_ec2_metadata(version='2009-04-04')
+ pubkey_ent = data['meta-data']['public-keys']
+
+ self.assertEqual(base.ec2_md_print(pubkey_ent),
+ "0=%s" % self.instance['key_name'])
+ self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
+ self.instance['key_data'])
+
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
index 5ce45d777..ff6acb155 100644
--- a/nova/tests/test_notifications.py
+++ b/nova/tests/test_notifications.py
@@ -25,9 +25,9 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import flags
-import nova.image.fake
from nova import log as logging
from nova import notifications
+import nova.network
from nova.notifier import test_notifier
from nova import test
from nova.tests import fake_network
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index 2b6fba975..f816cbdb9 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -89,7 +89,7 @@ class RootwrapTestCase(test.TestCase):
self.assertFalse(f.match(usercmd) or f2.match(usercmd))
usercmd = ['kill', p.pid]
# Providing no signal should work
- self.assertTrue(f.match(usercmd))
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
def test_KillFilter_no_raise(self):
"""Makes sure ValueError from bug 926412 is gone"""
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 54489918b..eefde358e 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -31,6 +31,7 @@ from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
+import nova.tests.image.fake
from nova import volume
@@ -39,11 +40,6 @@ FLAGS = flags.FLAGS
class QuotaIntegrationTestCase(test.TestCase):
- class StubImageService(object):
-
- def show(self, *args, **kwargs):
- return {"properties": {}}
-
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
@@ -78,6 +74,11 @@ class QuotaIntegrationTestCase(test.TestCase):
return orig_rpc_call(context, topic, msg)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
+
+ def tearDown(self):
+ super(QuotaIntegrationTestCase, self).tearDown()
+ nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance"""
@@ -173,8 +174,7 @@ class QuotaIntegrationTestCase(test.TestCase):
metadata=metadata)
def _create_with_injected_files(self, files):
- self.flags(image_service='nova.image.fake.FakeImageService')
- api = compute.API(image_service=self.StubImageService())
+ api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
@@ -182,8 +182,7 @@ class QuotaIntegrationTestCase(test.TestCase):
injected_files=files)
def test_no_injected_files(self):
- self.flags(image_service='nova.image.fake.FakeImageService')
- api = compute.API(image_service=self.StubImageService())
+ api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 3457a77e4..6443c6721 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -21,10 +21,10 @@ import traceback
from nova.compute.manager import ComputeManager
from nova import exception
-from nova import image
from nova import log as logging
from nova.openstack.common import importutils
from nova import test
+from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
LOG = logging.getLogger(__name__)
@@ -68,12 +68,12 @@ class _FakeDriverBackendTestCase(test.TestCase):
import fakelibvirt
sys.modules['libvirt'] = fakelibvirt
- import nova.virt.libvirt.connection
+ import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
- nova.virt.libvirt.connection.imagebackend = fake_imagebackend
- nova.virt.libvirt.connection.libvirt = fakelibvirt
- nova.virt.libvirt.connection.libvirt_utils = fake_libvirt_utils
+ nova.virt.libvirt.driver.imagebackend = fake_imagebackend
+ nova.virt.libvirt.driver.libvirt = fakelibvirt
+ nova.virt.libvirt.driver.libvirt_utils = fake_libvirt_utils
nova.virt.libvirt.firewall.libvirt = fakelibvirt
self.flags(firewall_driver=nova.virt.libvirt.firewall.drivers[0],
@@ -84,17 +84,17 @@ class _FakeDriverBackendTestCase(test.TestCase):
def fake_extend(image, size):
pass
- self.stubs.Set(nova.virt.libvirt.connection.disk,
+ self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
def _teardown_fakelibvirt(self):
# Restore libvirt
- import nova.virt.libvirt.connection
+ import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
- nova.virt.libvirt.connection.libvirt = self.saved_libvirt
- nova.virt.libvirt.connection.libvirt_utils = self.saved_libvirt
+ nova.virt.libvirt.driver.libvirt = self.saved_libvirt
+ nova.virt.libvirt.driver.libvirt_utils = self.saved_libvirt
nova.virt.libvirt.firewall.libvirt = self.saved_libvirt
def setUp(self):
@@ -102,9 +102,11 @@ class _FakeDriverBackendTestCase(test.TestCase):
# TODO(sdague): it would be nice to do this in a way that only
# the relevant backends where replaced for tests, though this
# should not harm anything by doing it for all backends
+ fake_image.stub_out_image_service(self.stubs)
self._setup_fakelibvirt()
def tearDown(self):
+ fake_image.FakeImageService_reset()
self._teardown_fakelibvirt()
super(_FakeDriverBackendTestCase, self).tearDown()
@@ -117,7 +119,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase):
# if your driver supports being tested in a fake way, it can go here
new_drivers = {
'nova.virt.fake.FakeDriver': 'FakeDriver',
- 'nova.virt.libvirt.connection.LibvirtDriver': 'LibvirtDriver'
+ 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver'
}
# NOTE(sdague): remove after Folsom release when connection_type
@@ -177,7 +179,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
super(_VirtDriverTestCase, self).setUp()
self.connection = importutils.import_object(self.driver_module, '')
self.ctxt = test_utils.get_test_admin_context()
- self.image_service = image.get_default_image_service()
+ self.image_service = fake_image.FakeImageService()
def _get_running_instance(self):
instance_ref = test_utils.get_test_instance()
@@ -560,7 +562,7 @@ class FakeConnectionTestCase(_VirtDriverTestCase):
class LibvirtConnTestCase(_VirtDriverTestCase):
def setUp(self):
# Point _VirtDriverTestCase at the right module
- self.driver_module = 'nova.virt.libvirt.connection.LibvirtDriver'
+ self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
super(LibvirtConnTestCase, self).setUp()
def tearDown(self):
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 7c2653f84..647a9d0a0 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -24,9 +24,8 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
-import nova.image.fake
from nova import test
-import nova.tests.api.openstack.fakes as api_fakes
+import nova.tests.image.fake
from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import fake as vmwareapi_fake
@@ -78,12 +77,12 @@ class VMWareAPIVMTestCase(test.TestCase):
'disk_format': 'vhd',
'size': 512,
}
- api_fakes.stub_out_image_service(self.stubs)
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(VMWareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
- nova.image.fake.FakeImageService_reset()
+ nova.tests.image.fake.FakeImageService_reset()
def _create_instance_in_the_db(self):
values = {'name': 1,
diff --git a/nova/tests/test_volume_utils.py b/nova/tests/test_volume_utils.py
index 222f12266..b2ccb41b8 100644
--- a/nova/tests/test_volume_utils.py
+++ b/nova/tests/test_volume_utils.py
@@ -20,7 +20,6 @@
from nova import context
from nova import db
from nova import flags
-import nova.image.fake
from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
@@ -39,7 +38,7 @@ class UsageInfoTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
stub_network=True,
host='fake')
- self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
+ self.stubs.Set(flags.FLAGS, 'notification_driver',
'nova.notifier.test_notifier')
self.volume = importutils.import_object(FLAGS.volume_manager)
self.user_id = 'fake'
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index b9f73fcb4..668172a92 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -29,7 +29,6 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
-import nova.image.fake
from nova import log as logging
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
@@ -37,13 +36,13 @@ from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
+import nova.tests.image.fake as fake_image
from nova.tests.xenapi import stubs
from nova.virt.xenapi import connection as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
-import nova.tests.api.openstack.fakes as api_fakes
LOG = logging.getLogger(__name__)
@@ -92,7 +91,7 @@ IMAGE_FIXTURES = {
def set_image_fixtures():
- image_service = nova.image.fake.FakeImageService()
+ image_service = fake_image.FakeImageService()
image_service.delete_all()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
@@ -113,23 +112,23 @@ def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
fake_dev = 'fakedev'
yield fake_dev
- def fake_image_service_get(*args, **kwargs):
+ def fake_image_download(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
- orig_image_service_get = nova.image.fake._FakeImageService.get
+ orig_image_download = fake_image._FakeImageService.download
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
- nova.image.fake._FakeImageService.get = fake_image_service_get
+ fake_image._FakeImageService.download = fake_image_download
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
- nova.image.fake._FakeImageService.get = orig_image_service_get
+ fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
@@ -278,14 +277,14 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(False)
- api_fakes.stub_out_image_service(self.stubs)
+ fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
- stubs.stubout_image_service_get(self.stubs)
+ stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
- nova.image.fake.FakeImageService_reset()
+ fake_image.FakeImageService_reset()
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 17fa2a049..46feca99e 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -17,6 +17,7 @@
import nova.context
import nova.db
import nova.flags
+from nova.image import glance
FLAGS = nova.flags.FLAGS
@@ -30,7 +31,8 @@ def get_test_image_info(context, instance_ref):
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
- image_service, image_id = nova.image.get_image_service(context, image_ref)
+ image_service, image_id = glance.get_remote_image_service(context,
+ image_ref)
return image_service.show(context, image_id)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 7f32e96fb..47b950e01 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -19,8 +19,8 @@ import random
from eventlet import tpool
-import nova.image.fake
from nova.openstack.common import jsonutils
+import nova.tests.image.fake
from nova.virt.xenapi import connection as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
@@ -82,10 +82,11 @@ def stubout_get_this_vm_uuid(stubs):
stubs.Set(vm_utils, 'get_this_vm_uuid', f)
-def stubout_image_service_get(stubs):
- def fake_get(*args, **kwargs):
+def stubout_image_service_download(stubs):
+ def fake_download(*args, **kwargs):
pass
- stubs.Set(nova.image.fake._FakeImageService, 'get', fake_get)
+ stubs.Set(nova.tests.image.fake._FakeImageService,
+ 'download', fake_download)
def stubout_stream_disk(stubs):
diff --git a/nova/utils.py b/nova/utils.py
index 3a74e1b98..9560c1d18 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -114,11 +114,6 @@ def vpn_ping(address, port, timeout=0.05, session_id=None):
return server_sess
-def fetchfile(url, target):
- LOG.debug(_('Fetching %s') % url)
- execute('curl', '--fail', url, '-o', target)
-
-
def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
@@ -1041,19 +1036,6 @@ def generate_image_url(image_ref):
@contextlib.contextmanager
-def logging_error(message):
- """Catches exception, write message to the log, re-raise.
- This is a common refinement of save_and_reraise that writes a specific
- message to the log.
- """
- try:
- yield
- except Exception as error:
- with excutils.save_and_reraise_exception():
- LOG.exception(message)
-
-
-@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 0c7c3bd0a..079675da3 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -35,7 +35,7 @@ FLAGS = flags.FLAGS
known_drivers = {
'baremetal': 'nova.virt.baremetal.proxy.ProxyConnection',
'fake': 'nova.virt.fake.FakeDriver',
- 'libvirt': 'nova.virt.libvirt.connection.LibvirtDriver',
+ 'libvirt': 'nova.virt.libvirt.LibvirtDriver',
'vmwareapi': 'nova.virt.vmwareapi_conn.VMWareESXDriver',
'xenapi': 'nova.virt.xenapi.connection.XenAPIDriver'
}
diff --git a/nova/virt/disk/guestfs.py b/nova/virt/disk/guestfs.py
index 20e3eda39..bf7a286e0 100644
--- a/nova/virt/disk/guestfs.py
+++ b/nova/virt/disk/guestfs.py
@@ -88,6 +88,7 @@ class Mount(mount.Mount):
def unmnt_dev(self):
if not self.mounted:
return
+ utils.execute('sync')
# root users don't need a specific unmnt_dev()
# but ordinary users do
utils.execute('fusermount', '-u', self.mount_dir, run_as_root=True)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index e89aed627..78bd8aebc 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -25,7 +25,7 @@ import os
from nova import exception
from nova import flags
-import nova.image
+from nova.image import glance
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
@@ -48,12 +48,11 @@ def fetch(context, image_href, path, _user_id, _project_id):
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
- (image_service, image_id) = nova.image.get_image_service(context,
- image_href)
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_href)
with utils.remove_path_on_error(path):
with open(path, "wb") as image_file:
- metadata = image_service.get(context, image_id, image_file)
- return metadata
+ image_service.download(context, image_id, image_file)
def fetch_to_raw(context, image_href, path, user_id, project_id):
diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py
index e69de29bb..00f4fd6b0 100644
--- a/nova/virt/libvirt/__init__.py
+++ b/nova/virt/libvirt/__init__.py
@@ -0,0 +1,17 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.libvirt.driver import LibvirtDriver
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index c67f11550..edde66d7c 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -67,6 +67,94 @@ class LibvirtConfigObject(object):
return xml_str
+class LibvirtConfigCaps(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
+ **kwargs)
+ self.host = None
+ self.guests = []
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCaps, self).parse_dom(xmldoc)
+
+ for c in xmldoc.getchildren():
+ if c.tag == "host":
+ host = LibvirtConfigCapsHost()
+ host.parse_dom(c)
+ self.host = host
+ elif c.tag == "guest":
+ guest = LibvirtConfigCapsGuest()
+ guest.parse_dom(c)
+ self.guests.append(guest)
+
+ def format_dom(self):
+ caps = super(LibvirtConfigCaps, self).format_dom()
+
+ if self.host:
+ caps.append(self.host.format_dom())
+ for g in self.guests:
+ caps.append(g.format_dom())
+
+ return caps
+
+
+class LibvirtConfigCapsHost(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCapsHost, self).__init__(root_name="host",
+ **kwargs)
+
+ self.cpu = None
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
+
+ for c in xmldoc.getchildren():
+ if c.tag == "cpu":
+ cpu = LibvirtConfigCPU()
+ cpu.parse_dom(c)
+ self.cpu = cpu
+
+ def format_dom(self):
+ caps = super(LibvirtConfigCapsHost, self).format_dom()
+
+ if self.cpu:
+ caps.append(self.cpu.format_dom())
+
+ return caps
+
+
+class LibvirtConfigCapsGuest(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
+ **kwargs)
+
+ self.arch = None
+ self.ostype = None
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
+
+ for c in xmldoc.getchildren():
+ if c.tag == "os_type":
+ self.ostype = c.text
+ elif c.tag == "arch":
+ self.arch = c.get("name")
+
+ def format_dom(self):
+ caps = super(LibvirtConfigCapsGuest, self).format_dom()
+
+ if self.ostype is not None:
+ caps.append(self._text_node("os_type", self.ostype))
+ if self.arch:
+ arch = etree.Element("arch", name=self.arch)
+ caps.append(arch)
+
+ return caps
+
+
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
@@ -124,6 +212,123 @@ class LibvirtConfigGuestClock(LibvirtConfigObject):
self.timers.append(tm)
+class LibvirtConfigCPUFeature(LibvirtConfigObject):
+
+ def __init__(self, name=None, **kwargs):
+ super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
+ **kwargs)
+
+ self.name = name
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
+
+ self.name = xmldoc.get("name")
+
+ def format_dom(self):
+ ft = super(LibvirtConfigCPUFeature, self).format_dom()
+
+ ft.set("name", self.name)
+
+ return ft
+
+
+class LibvirtConfigCPU(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCPU, self).__init__(root_name='cpu',
+ **kwargs)
+
+ self.arch = None
+ self.vendor = None
+ self.model = None
+
+ self.sockets = None
+ self.cores = None
+ self.threads = None
+
+ self.features = []
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCPU, self).parse_dom(xmldoc)
+
+ for c in xmldoc.getchildren():
+ if c.tag == "arch":
+ self.arch = c.text
+ elif c.tag == "model":
+ self.model = c.text
+ elif c.tag == "vendor":
+ self.vendor = c.text
+ elif c.tag == "topology":
+ self.sockets = int(c.get("sockets"))
+ self.cores = int(c.get("cores"))
+ self.threads = int(c.get("threads"))
+ elif c.tag == "feature":
+ f = LibvirtConfigCPUFeature()
+ f.parse_dom(c)
+ self.add_feature(f)
+
+ def format_dom(self):
+ cpu = super(LibvirtConfigCPU, self).format_dom()
+
+ if self.arch is not None:
+ cpu.append(self._text_node("arch", self.arch))
+ if self.model is not None:
+ cpu.append(self._text_node("model", self.model))
+ if self.vendor is not None:
+ cpu.append(self._text_node("vendor", self.vendor))
+
+ if (self.sockets is not None and
+ self.cores is not None and
+ self.threads is not None):
+ top = etree.Element("topology")
+ top.set("sockets", str(self.sockets))
+ top.set("cores", str(self.cores))
+ top.set("threads", str(self.threads))
+ cpu.append(top)
+
+ for f in self.features:
+ cpu.append(f.format_dom())
+
+ return cpu
+
+ def add_feature(self, feat):
+ self.features.append(feat)
+
+
+class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
+
+ def __init__(self, name=None, **kwargs):
+ super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
+
+ self.policy = "require"
+
+ def format_dom(self):
+ ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
+
+ ft.set("policy", self.policy)
+
+ return ft
+
+
+class LibvirtConfigGuestCPU(LibvirtConfigCPU):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
+
+ self.mode = None
+ self.match = "exact"
+
+ def format_dom(self):
+ cpu = super(LibvirtConfigGuestCPU, self).format_dom()
+
+ if self.mode:
+ cpu.set("mode", self.mode)
+ cpu.set("match", self.match)
+
+ return cpu
+
+
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
@@ -366,6 +571,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.name = None
self.memory = 1024 * 1024 * 500
self.vcpus = 1
+ self.cpu = None
self.acpi = False
self.clock = None
self.os_type = None
@@ -426,6 +632,9 @@ class LibvirtConfigGuest(LibvirtConfigObject):
if self.clock is not None:
root.append(self.clock.format_dom())
+ if self.cpu is not None:
+ root.append(self.cpu.format_dom())
+
self._format_devices(root)
return root
@@ -437,46 +646,6 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.clock = clk
-class LibvirtConfigCPU(LibvirtConfigObject):
-
- def __init__(self, **kwargs):
- super(LibvirtConfigCPU, self).__init__(root_name="cpu",
- **kwargs)
-
- self.arch = None
- self.model = None
- self.vendor = None
- self.sockets = None
- self.cores = None
- self.threads = None
- self.features = []
-
- def add_feature(self, name):
- self.features.append(name)
-
- def format_dom(self):
- cpu = super(LibvirtConfigCPU, self).format_dom()
- if self.arch:
- cpu.append(self._text_node("arch", self.arch))
- if self.model:
- cpu.append(self._text_node("model", self.model))
- if self.vendor:
- cpu.append(self._text_node("vendor", self.vendor))
- if (self.sockets is not None and
- self.cores is not None and
- self.threads is not None):
- cpu.append(etree.Element("topology",
- sockets=str(self.sockets),
- cores=str(self.cores),
- threads=str(self.threads)))
-
- for f in self.features:
- cpu.append(etree.Element("feature",
- name=f))
-
- return cpu
-
-
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/driver.py
index 59a32ee5b..08de82d62 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/driver.py
@@ -60,7 +60,7 @@ from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
-import nova.image
+from nova.image import glance
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import excutils
@@ -156,7 +156,7 @@ libvirt_opts = [
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
- default=False,
+ default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
# force_config_drive is a string option, to allow for future behaviors
@@ -219,7 +219,7 @@ LIBVIRT_POWER_STATE = {
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
-MIN_LIBVIRT_VERSION = (0, 9, 7)
+MIN_LIBVIRT_VERSION = (0, 9, 6)
def _late_load_cheetah():
@@ -513,10 +513,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mountpoint = vol['mount_device']
+ mount_device = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
- mountpoint)
+ mount_device)
target = os.path.join(FLAGS.instances_path, instance['name'])
LOG.info(_('Deleting instance files %(target)s') % locals(),
@@ -698,14 +698,14 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.InstanceNotFound:
raise exception.InstanceNotRunning()
- (image_service, image_id) = nova.image.get_image_service(
+ (image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
- _image_service = nova.image.get_image_service(context, image_href)
+ _image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
@@ -790,7 +790,7 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
- return self._hard_reboot(instance, network_info)
+ return self._hard_reboot(instance)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
@@ -827,17 +827,30 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(1)
return False
- def _hard_reboot(self, instance, network_info, xml=None):
+ def _hard_reboot(self, instance, xml=None):
"""Reboot a virtual machine, given an instance reference.
- This method actually destroys and re-creates the domain to ensure the
- reboot happens, as the guest OS cannot ignore this action.
+ Performs a Libvirt reset (if supported) on the domain.
+
+ If Libvirt reset is unavailable this method actually destroys and
+ re-creates the domain to ensure the reboot happens, as the guest
+ OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
+
virt_dom = self._conn.lookupByName(instance['name'])
- virt_dom.reset(0)
+ # NOTE(itoumsn): Use XML delived from the running instance.
+ if not xml:
+ xml = virt_dom.XMLDesc(0)
+
+ # NOTE(dprince): reset was added in Libvirt 0.9.7
+ if hasattr(virt_dom, 'reset'):
+ virt_dom.reset(0)
+ else:
+ self._destroy(instance)
+ self._create_domain(xml, virt_dom)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
@@ -1514,7 +1527,7 @@ class LibvirtDriver(driver.ComputeDriver):
if FLAGS.libvirt_type == "lxc":
fs = config.LibvirtConfigGuestFilesys()
- fs.type = "mount"
+ fs.source_type = "mount"
fs.source_dir = os.path.join(FLAGS.instances_path,
instance['name'],
"rootfs")
@@ -1619,10 +1632,10 @@ class LibvirtDriver(driver.ComputeDriver):
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mountpoint = vol['mount_device']
+ mount_device = vol['mount_device'].rpartition("/")[2]
cfg = self.volume_driver_method('connect_volume',
connection_info,
- mountpoint)
+ mount_device)
guest.add_device(cfg)
if self._has_config_drive(instance):
@@ -1828,26 +1841,14 @@ class LibvirtDriver(driver.ComputeDriver):
"This error can be safely ignored for now."))
return 0
- @staticmethod
- def get_memory_mb_total():
+ def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
- if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
- return 0
-
- if FLAGS.libvirt_type == 'xen':
- meminfo = self._conn.getInfo()[1]
- # this is in MB
- return meminfo
- else:
- meminfo = open('/proc/meminfo').read().split()
- idx = meminfo.index('MemTotal:')
- # transforming KB to MB
- return int(meminfo[idx + 1]) / 1024
+ return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_total():
@@ -2116,7 +2117,7 @@ class LibvirtDriver(driver.ComputeDriver):
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
- cpu.add_feature(f)
+ cpu.add_feature(config.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
@@ -2265,10 +2266,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mountpoint = vol['mount_device']
+ mount_device = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('connect_volume',
connection_info,
- mountpoint)
+ mount_device)
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
"""Preparation block migration.
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 5333e0992..3a93cf3c8 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -43,7 +43,7 @@ libvirt_vif_opts = [
FLAGS = flags.FLAGS
FLAGS.register_opts(libvirt_vif_opts)
-flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
+flags.DECLARE('libvirt_type', 'nova.virt.libvirt.driver')
class LibvirtBridgeDriver(vif.VIFDriver):
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 56088acb7..c1ab77fa5 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -91,10 +91,11 @@ def fetch_image(context, image, instance, **kwargs):
LOG.debug(_("Downloading image %s from glance image server") % image,
instance=instance)
(image_service, image_id) = glance.get_remote_image_service(context, image)
+ metadata = image_service.show(context, image_id)
+ file_size = int(metadata['size'])
f = StringIO.StringIO()
- metadata = image_service.get(context, image_id, f)
+ image_service.download(context, image_id, f)
read_file_handle = read_write_util.GlanceFileRead(f)
- file_size = int(metadata['size'])
write_file_handle = read_write_util.VMWareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 64a479fe1..b0f617e48 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -39,7 +39,6 @@ from nova.compute import power_state
from nova import db
from nova import exception
from nova import flags
-import nova.image
from nova.image import glance
from nova import log as logging
from nova.openstack.common import cfg
@@ -322,7 +321,7 @@ def create_vdi(session, sr_ref, info, disk_type, virtual_size,
# hence information about instance may or may not be present
otherconf = {}
if not isinstance(info, basestring):
- name_label = info['display_name']
+ name_label = info['name']
otherconf = {'nova_instance_uuid': info['uuid'],
'nova_disk_type': disk_type}
else:
@@ -869,7 +868,8 @@ def _fetch_image_glance_disk(context, session, instance, image_id, image_type):
else:
sr_ref = safe_find_sr(session)
- image_service, image_id = nova.image.get_image_service(context, image_id)
+ image_service, image_id = glance.get_remote_image_service(
+ context, image_id)
meta = image_service.show(context, image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
@@ -893,7 +893,8 @@ def _fetch_image_glance_disk(context, session, instance, image_id, image_type):
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
- stream_func = lambda f: image_service.get(context, image_id, f)
+ stream_func = lambda f: image_service.download(
+ context, image_id, f)
_stream_disk(stream_func, image_type, virtual_size, dev)
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 9520ba920..475e8264f 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -28,6 +28,7 @@ import time
import uuid
from eventlet import greenthread
+import netaddr
from nova.compute import api as compute
from nova.compute import power_state
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 531033a52..143970cfb 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -24,7 +24,6 @@ import string
from nova import flags
from nova import log as logging
-from nova.virt import xenapi
FLAGS = flags.FLAGS
diff --git a/nova/volume/iscsi.py b/nova/volume/iscsi.py
index 115d51c80..33ee7dabc 100644
--- a/nova/volume/iscsi.py
+++ b/nova/volume/iscsi.py
@@ -26,7 +26,7 @@ from nova import utils
iscsi_helper_opt = cfg.StrOpt('iscsi_helper',
- default='ietadm',
+ default='tgtadm',
help='iscsi target user-land tool to use')
FLAGS = flags.FLAGS
diff --git a/setup.py b/setup.py
index 4ff26d568..37988acf7 100644
--- a/setup.py
+++ b/setup.py
@@ -88,6 +88,7 @@ setuptools.setup(name='nova',
'bin/nova-instance-usage-audit',
'bin/nova-manage',
'bin/nova-network',
+ 'bin/nova-novncproxy',
'bin/nova-objectstore',
'bin/nova-rootwrap',
'bin/nova-scheduler',
diff --git a/tools/test-requires b/tools/test-requires
index 0e230fcd5..01cbbe122 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -5,5 +5,5 @@ coverage
mox==0.5.3
nose
openstack.nose_plugin>=0.7
-pep8==1.1
+pep8==1.2
sphinx>=1.1.2
diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py
index 43f2ad81e..b0e9d981c 100755
--- a/tools/xenserver/vm_vdi_cleaner.py
+++ b/tools/xenserver/vm_vdi_cleaner.py
@@ -32,40 +32,17 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova.virt.xenapi import connection as xenapi_conn
+CONF = cfg.CONF
flags.DECLARE("resize_confirm_window", "nova.compute.manager")
-FLAGS = flags.FLAGS
-# NOTE(sirp): Nova futzs with the sys.argv in order to provide default
-# flagfile. To isolate this awful practice, we're supplying a dummy
-# argument list.
-dummy = ["fakearg"]
-flags.parse_args(dummy)
-
-class UnrecognizedNameLabel(Exception):
- pass
-
-
-def parse_options():
- """Generate command line options."""
-
- ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances",
- "clean-instances", "test"]
- arg_str = "|".join(ALLOWED_COMMANDS)
- parser = optparse.OptionParser("%prog [options] [" + arg_str + "]")
- parser.add_option("--verbose", action="store_true")
-
- options, args = parser.parse_args()
-
- if not args:
- parser.print_usage()
- sys.exit(1)
-
- return options, args
+ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances",
+ "clean-instances", "test"]
def call_xenapi(xenapi, method, *args):
@@ -73,7 +50,7 @@ def call_xenapi(xenapi, method, *args):
return xenapi._session.call_xenapi(method, *args)
-def find_orphaned_instances(xenapi, verbose=False):
+def find_orphaned_instances(xenapi):
"""Find and return a list of orphaned instances."""
ctxt = context.get_admin_context(read_deleted="only")
@@ -86,8 +63,7 @@ def find_orphaned_instances(xenapi, verbose=False):
except (KeyError, exception.InstanceNotFound):
# NOTE(jk0): Err on the side of caution here. If we don't know
# anything about the particular instance, ignore it.
- print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0,
- verbose=verbose)
+ print_xen_object("INFO: Ignoring VM", vm_rec, indent_level=0)
continue
# NOTE(jk0): This would be triggered if a VM was deleted but the
@@ -99,7 +75,7 @@ def find_orphaned_instances(xenapi, verbose=False):
# been updated in over the specified period.
is_zombie_vm = (instance.vm_state != "active"
and timeutils.is_older_than(instance.updated_at,
- FLAGS.zombie_instance_updated_at_window))
+ CONF.zombie_instance_updated_at_window))
if is_active_and_deleting or is_zombie_vm:
orphaned_instances.append((vm_ref, vm_rec, instance))
@@ -129,15 +105,14 @@ def _get_applicable_vm_recs(xenapi):
yield vm_ref, vm_rec
-def print_xen_object(obj_type, obj, indent_level=0, spaces_per_indent=4,
- verbose=False):
+def print_xen_object(obj_type, obj, indent_level=0, spaces_per_indent=4):
"""Pretty-print a Xen object.
Looks like:
VM (abcd-abcd-abcd): 'name label here'
"""
- if not verbose:
+ if not CONF.verbose:
return
uuid = obj["uuid"]
try:
@@ -149,7 +124,7 @@ def print_xen_object(obj_type, obj, indent_level=0, spaces_per_indent=4,
print "".join([indent, msg])
-def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids, verbose=False):
+def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids):
"""Find VDIs which are connected to VBDs which are connected to VMs."""
def _is_null_ref(ref):
return ref == "OpaqueRef:NULL"
@@ -161,8 +136,7 @@ def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids, verbose=False):
cur_vdi_rec = vdi_rec
while True:
cur_vdi_uuid = cur_vdi_rec["uuid"]
- print_xen_object("VDI", vdi_rec, indent_level=indent_level,
- verbose=verbose)
+ print_xen_object("VDI", vdi_rec, indent_level=indent_level)
connected_vdi_uuids.add(cur_vdi_uuid)
vdi_and_parent_uuids.append(cur_vdi_uuid)
@@ -188,8 +162,7 @@ def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids, verbose=False):
for vm_ref, vm_rec in _get_applicable_vm_recs(xenapi):
indent_level = 0
- print_xen_object("VM", vm_rec, indent_level=indent_level,
- verbose=verbose)
+ print_xen_object("VM", vm_rec, indent_level=indent_level)
vbd_refs = vm_rec["VBDs"]
for vbd_ref in vbd_refs:
@@ -201,8 +174,7 @@ def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids, verbose=False):
continue
indent_level = 1
- print_xen_object("VBD", vbd_rec, indent_level=indent_level,
- verbose=verbose)
+ print_xen_object("VBD", vbd_rec, indent_level=indent_level)
vbd_vdi_ref = vbd_rec["VDI"]
@@ -219,8 +191,7 @@ def _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids, verbose=False):
_add_vdi_and_parents_to_connected(vdi_rec, indent_level)
-def _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids,
- verbose=False):
+def _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids):
"""Collects all VDIs and adds system VDIs to the connected set."""
def _system_owned(vdi_rec):
vdi_name = vdi_rec["name_label"]
@@ -241,42 +212,39 @@ def _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids,
# System owned and non-managed VDIs should be considered 'connected'
# for our purposes.
if _system_owned(vdi_rec):
- print_xen_object("SYSTEM VDI", vdi_rec, indent_level=0,
- verbose=verbose)
+ print_xen_object("SYSTEM VDI", vdi_rec, indent_level=0)
connected_vdi_uuids.add(vdi_uuid)
elif not vdi_rec["managed"]:
- print_xen_object("UNMANAGED VDI", vdi_rec, indent_level=0,
- verbose=verbose)
+ print_xen_object("UNMANAGED VDI", vdi_rec, indent_level=0)
connected_vdi_uuids.add(vdi_uuid)
-def find_orphaned_vdi_uuids(xenapi, verbose=False):
+def find_orphaned_vdi_uuids(xenapi):
"""Walk VM -> VBD -> VDI change and accumulate connected VDIs."""
connected_vdi_uuids = set()
- _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids, verbose=verbose)
+ _find_vdis_connected_to_vm(xenapi, connected_vdi_uuids)
all_vdi_uuids = set()
- _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids,
- verbose=verbose)
+ _find_all_vdis_and_system_vdis(xenapi, all_vdi_uuids, connected_vdi_uuids)
orphaned_vdi_uuids = all_vdi_uuids - connected_vdi_uuids
return orphaned_vdi_uuids
-def list_orphaned_vdis(vdi_uuids, verbose=False):
+def list_orphaned_vdis(vdi_uuids):
"""List orphaned VDIs."""
for vdi_uuid in vdi_uuids:
- if verbose:
+ if CONF.verbose:
print "ORPHANED VDI (%s)" % vdi_uuid
else:
print vdi_uuid
-def clean_orphaned_vdis(xenapi, vdi_uuids, verbose=False):
+def clean_orphaned_vdis(xenapi, vdi_uuids):
"""Clean orphaned VDIs."""
for vdi_uuid in vdi_uuids:
- if verbose:
+ if CONF.verbose:
print "CLEANING VDI (%s)" % vdi_uuid
vdi_ref = call_xenapi(xenapi, 'VDI.get_by_uuid', vdi_uuid)
@@ -286,19 +254,19 @@ def clean_orphaned_vdis(xenapi, vdi_uuids, verbose=False):
print >> sys.stderr, "Skipping %s: %s" % (vdi_uuid, exc)
-def list_orphaned_instances(orphaned_instances, verbose=False):
+def list_orphaned_instances(orphaned_instances):
"""List orphaned instances."""
for vm_ref, vm_rec, orphaned_instance in orphaned_instances:
- if verbose:
+ if CONF.verbose:
print "ORPHANED INSTANCE (%s)" % orphaned_instance.name
else:
print orphaned_instance.name
-def clean_orphaned_instances(xenapi, orphaned_instances, verbose=False):
+def clean_orphaned_instances(xenapi, orphaned_instances):
"""Clean orphaned instances."""
for vm_ref, vm_rec, instance in orphaned_instances:
- if verbose:
+ if CONF.verbose:
print "CLEANING INSTANCE (%s)" % instance.name
cleanup_instance(xenapi, instance, vm_ref, vm_rec)
@@ -306,33 +274,36 @@ def clean_orphaned_instances(xenapi, orphaned_instances, verbose=False):
def main():
"""Main loop."""
- options, args = parse_options()
- verbose = options.verbose
- command = args[0]
+ args = CONF(args=sys.argv,
+ usage='%prog [options] [' + '|'.join(ALLOWED_COMMANDS) + ']')
+ if len(args) < 2:
+ CONF.print_usage()
+ sys.exit(1)
+
+ command = args[1]
- if FLAGS.zombie_instance_updated_at_window < FLAGS.resize_confirm_window:
+ if CONF.zombie_instance_updated_at_window < CONF.resize_confirm_window:
raise Exception("`zombie_instance_updated_at_window` has to be longer"
" than `resize_confirm_window`.")
xenapi = xenapi_conn.XenAPIDriver()
if command == "list-vdis":
- if verbose:
+ if CONF.verbose:
print "Connected VDIs:\n"
- orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi, verbose=verbose)
- if verbose:
+ orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi)
+ if CONF.verbose:
print "\nOrphaned VDIs:\n"
- list_orphaned_vdis(orphaned_vdi_uuids, verbose=verbose)
+ list_orphaned_vdis(orphaned_vdi_uuids)
elif command == "clean-vdis":
- orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi, verbose=verbose)
- clean_orphaned_vdis(xenapi, orphaned_vdi_uuids, verbose=verbose)
+ orphaned_vdi_uuids = find_orphaned_vdi_uuids(xenapi)
+ clean_orphaned_vdis(xenapi, orphaned_vdi_uuids)
elif command == "list-instances":
- orphaned_instances = find_orphaned_instances(xenapi, verbose=verbose)
- list_orphaned_instances(orphaned_instances, verbose=verbose)
+ orphaned_instances = find_orphaned_instances(xenapi)
+ list_orphaned_instances(orphaned_instances)
elif command == "clean-instances":
- orphaned_instances = find_orphaned_instances(xenapi, verbose=verbose)
- clean_orphaned_instances(xenapi, orphaned_instances,
- verbose=verbose)
+ orphaned_instances = find_orphaned_instances(xenapi)
+ clean_orphaned_instances(xenapi, orphaned_instances)
elif command == "test":
doctest.testmod()
else: