summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_availability_zone.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py51
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_cells.py474
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py308
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_flavor_disabled.py101
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_images.py1336
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py84
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py24
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py31
-rw-r--r--nova/tests/api/openstack/fakes.py5
-rw-r--r--nova/tests/compute/test_compute.py191
-rw-r--r--nova/tests/conductor/test_conductor.py5
-rw-r--r--nova/tests/db/test_db_api.py151
-rw-r--r--nova/tests/db/test_migration_utils.py27
-rw-r--r--nova/tests/db/test_migrations.py3
-rw-r--r--nova/tests/fake_policy.py2
-rw-r--r--nova/tests/integrated/test_api_samples.py14
-rw-r--r--nova/tests/network/test_linux_net.py1
-rw-r--r--nova/tests/network/test_network_info.py2
-rw-r--r--nova/tests/network/test_quantumv2.py5
-rw-r--r--nova/tests/objects/test_objects.py25
-rw-r--r--nova/tests/scheduler/test_host_filters.py104
-rw-r--r--nova/tests/servicegroup/test_mc_servicegroup.py16
-rw-r--r--nova/tests/servicegroup/test_zk_driver.py2
-rw-r--r--nova/tests/test_availability_zones.py4
-rw-r--r--nova/tests/test_cinder.py10
-rw-r--r--nova/tests/test_flavors.py16
-rw-r--r--nova/tests/test_metadata.py1
-rw-r--r--nova/tests/test_quota.py2
-rw-r--r--nova/tests/test_utils.py86
-rw-r--r--nova/tests/virt/libvirt/test_libvirt.py293
-rw-r--r--nova/tests/virt/test_virt_drivers.py5
-rw-r--r--nova/tests/virt/vmwareapi/test_vmwareapi.py2
-rw-r--r--nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py96
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py4
35 files changed, 3254 insertions, 228 deletions
diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
index 2ccb9fa31..0b63960ce 100644
--- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
+++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
@@ -76,6 +76,7 @@ def fake_set_availability_zones(context, services):
class AvailabilityZoneApiTest(test.TestCase):
def setUp(self):
super(AvailabilityZoneApiTest, self).setUp()
+ availability_zones._reset_cache()
self.stubs.Set(db, 'service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
index 814c0fff4..59d60acf2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
@@ -19,6 +19,7 @@ import webob
from nova.api.openstack.compute.contrib import extended_availability_zone
from nova import availability_zones
from nova import compute
+from nova.compute import vm_states
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
@@ -29,14 +30,31 @@ UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
+def fake_compute_get_az(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
+ vm_state=vm_states.ACTIVE,
+ availability_zone='fakeaz')
+ return inst
+
+
+def fake_compute_get_empty(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="",
+ vm_state=vm_states.ACTIVE,
+ availability_zone='fakeaz')
+ return inst
+
+
def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, host="get-host")
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
+ vm_state=vm_states.ACTIVE)
return inst
def fake_compute_get_all(*args, **kwargs):
- inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host")
- inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host")
+ inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host",
+ vm_state=vm_states.ACTIVE)
+ inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host",
+ vm_state=vm_states.ACTIVE)
return [inst1, inst2]
@@ -44,12 +62,17 @@ def fake_get_host_availability_zone(context, host):
return host
+def fake_get_no_host_availability_zone(context, host):
+ return None
+
+
class ExtendedServerAttributesTest(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-AZ:'
def setUp(self):
super(ExtendedServerAttributesTest, self).setUp()
+ availability_zones._reset_cache()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
@@ -77,6 +100,28 @@ class ExtendedServerAttributesTest(test.TestCase):
self.assertEqual(server.get('%savailability_zone' % self.prefix),
az)
+ def test_show_no_host_az(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_az)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_no_host_availability_zone)
+
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerAttributes(self._get_server(res.body), 'fakeaz')
+
+ def test_show_empty_host_az(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_empty)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_no_host_availability_zone)
+
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerAttributes(self._get_server(res.body), 'fakeaz')
+
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py
new file mode 100644
index 000000000..a9e77693e
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py
@@ -0,0 +1,474 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cells as cells_ext
+from nova.api.openstack import extensions
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.tests import utils
+
+
+FAKE_CELLS = [
+ dict(id=1, name='cell1', username='bob', is_parent=True,
+ weight_scale=1.0, weight_offset=0.0,
+ rpc_host='r1.example.org', password='xxxx'),
+ dict(id=2, name='cell2', username='alice', is_parent=False,
+ weight_scale=1.0, weight_offset=0.0,
+ rpc_host='r2.example.org', password='qwerty')]
+
+
+FAKE_CAPABILITIES = [
+ {'cap1': '0,1', 'cap2': '2,3'},
+ {'cap3': '4,5', 'cap4': '5,6'}]
+
+
+def fake_db_cell_get(context, cell_name):
+ for cell in FAKE_CELLS:
+ if cell_name == cell['name']:
+ return cell
+ else:
+ raise exception.CellNotFound(cell_name=cell_name)
+
+
+def fake_db_cell_create(context, values):
+ cell = dict(id=1)
+ cell.update(values)
+ return cell
+
+
+def fake_db_cell_update(context, cell_id, values):
+ cell = fake_db_cell_get(context, cell_id)
+ cell.update(values)
+ return cell
+
+
+def fake_cells_api_get_all_cell_info(*args):
+ cells = copy.deepcopy(FAKE_CELLS)
+ del cells[0]['password']
+ del cells[1]['password']
+ for i, cell in enumerate(cells):
+ cell['capabilities'] = FAKE_CAPABILITIES[i]
+ return cells
+
+
+def fake_db_cell_get_all(context):
+ return FAKE_CELLS
+
+
+class CellsTest(test.TestCase):
+ def setUp(self):
+ super(CellsTest, self).setUp()
+ self.stubs.Set(db, 'cell_get', fake_db_cell_get)
+ self.stubs.Set(db, 'cell_get_all', fake_db_cell_get_all)
+ self.stubs.Set(db, 'cell_update', fake_db_cell_update)
+ self.stubs.Set(db, 'cell_create', fake_db_cell_create)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors',
+ fake_cells_api_get_all_cell_info)
+
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = cells_ext.Controller(self.ext_mgr)
+ self.context = context.get_admin_context()
+
+ def _get_request(self, resource):
+ return fakes.HTTPRequest.blank('/v2/fake/' + resource)
+
+ def test_index(self):
+ req = self._get_request("cells")
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], FAKE_CELLS[i]['name'])
+ self.assertNotIn('capabilitiles', cell)
+ self.assertNotIn('password', cell)
+
+ def test_detail(self):
+ req = self._get_request("cells/detail")
+ res_dict = self.controller.detail(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], FAKE_CELLS[i]['name'])
+ self.assertEqual(cell['capabilities'], FAKE_CAPABILITIES[i])
+ self.assertNotIn('password', cell)
+
+ def test_show_bogus_cell_raises(self):
+ req = self._get_request("cells/bogus")
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus')
+
+ def test_get_cell_by_name(self):
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.show(req, 'cell1')
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertNotIn('password', cell)
+
+ def test_cell_delete(self):
+ call_info = {'delete_called': 0}
+
+ def fake_db_cell_delete(context, cell_name):
+ self.assertEqual(cell_name, 'cell999')
+ call_info['delete_called'] += 1
+
+ self.stubs.Set(db, 'cell_delete', fake_db_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ self.controller.delete(req, 'cell999')
+ self.assertEqual(call_info['delete_called'], 1)
+
+ def test_delete_bogus_cell_raises(self):
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
+ 'cell999')
+
+ def test_cell_create_parent(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent',
+ # Also test this is ignored/stripped
+ 'is_parent': False}}
+
+ req = self._get_request("cells")
+ res_dict = self.controller.create(req, body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'parent')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_child(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'child'}}
+
+ req = self._get_request("cells")
+ res_dict = self.controller.create(req, body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'child')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_no_name_raises(self):
+ body = {'cell': {'username': 'moocow',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_empty_string_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_bang_raises(self):
+ body = {'cell': {'name': 'moo!cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_invalid_type_raises(self):
+ body = {'cell': {'name': 'moocow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'invalid'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_update(self):
+ body = {'cell': {'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.update(req, 'cell1', body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], FAKE_CELLS[0]['rpc_host'])
+ self.assertEqual(cell['username'], 'zeb')
+ self.assertNotIn('password', cell)
+
+ def test_cell_update_empty_name_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.update, req, 'cell1', body)
+
+ def test_cell_update_invalid_type_raises(self):
+ body = {'cell': {'username': 'zeb',
+ 'type': 'invalid',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.update, req, 'cell1', body)
+
+ def test_cell_info(self):
+ caps = ['cap1=a;b', 'cap2=c;d']
+ self.flags(name='darksecret', capabilities=caps, group='cells')
+
+ req = self._get_request("cells/info")
+ res_dict = self.controller.info(req)
+ cell = res_dict['cell']
+ cell_caps = cell['capabilities']
+
+ self.assertEqual(cell['name'], 'darksecret')
+ self.assertEqual(cell_caps['cap1'], 'a;b')
+ self.assertEqual(cell_caps['cap2'], 'c;d')
+
+ def test_show_capacities(self):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.controller.cells_rpcapi.\
+ get_capacities(self.context, cell_name=None).AndReturn(response)
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ res_dict = self.controller.capacities(req)
+ self.assertEqual(response, res_dict['cell']['capacities'])
+
+ def test_show_capacity_fails_with_non_admin_context(self):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ rules = {"compute_extension:cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.capacities, req)
+
+ def test_show_capacities_for_invalid_cell(self):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ self.controller.cells_rpcapi. \
+ get_capacities(self.context, cell_name="invalid_cell").AndRaise(
+ exception.CellNotFound(cell_name="invalid_cell"))
+ self.mox.ReplayAll()
+ req = self._get_request("cells/invalid_cell/capacities")
+ req.environ["nova.context"] = self.context
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.capacities, req, "invalid_cell")
+
+ def test_show_capacities_for_cell(self):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.controller.cells_rpcapi.\
+ get_capacities(self.context, cell_name='cell_name').\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ res_dict = self.controller.capacities(req, 'cell_name')
+ self.assertEqual(response, res_dict['cell']['capacities'])
+
+ def test_sync_instances(self):
+ call_info = {}
+
+ def sync_instances(self, context, **kwargs):
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ body = {}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], None)
+
+ body = {'project_id': 'test-project'}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], 'test-project')
+ self.assertEqual(call_info['updated_since'], None)
+
+ expected = timeutils.utcnow().isoformat()
+ if not expected.endswith("+00:00"):
+ expected += "+00:00"
+
+ body = {'updated_since': expected}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], expected)
+
+ body = {'updated_since': 'skjdfkjsdkf'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'foo': 'meow'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.sync_instances, req, body=body)
+
+
+class TestCellsXMLSerializer(test.TestCase):
+ def test_multiple_cells(self):
+ fixture = {'cells': fake_cells_api_get_all_cell_info()}
+
+ serializer = cells_ext.CellsTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+
+ def test_single_cell_with_caps(self):
+ cell = {'id': 1,
+ 'name': 'darksecret',
+ 'username': 'meow',
+ 'capabilities': {'cap1': 'a;b',
+ 'cap2': 'c;d'}}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'meow')
+ self.assertEqual(res_tree.get('password'), None)
+ self.assertEqual(len(res_tree), 1)
+
+ child = res_tree[0]
+ self.assertEqual(child.tag,
+ '{%s}capabilities' % xmlutil.XMLNS_V10)
+ for elem in child:
+ self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10,
+ '{%s}cap2' % xmlutil.XMLNS_V10))
+ if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'a;b')
+ elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'c;d')
+
+ def test_single_cell_without_caps(self):
+ cell = {'id': 1,
+ 'username': 'woof',
+ 'name': 'darksecret'}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'woof')
+ self.assertEqual(res_tree.get('password'), None)
+ self.assertEqual(len(res_tree), 0)
+
+
+class TestCellsXMLDeserializer(test.TestCase):
+ def test_cell_deserializer(self):
+ caps_dict = {'cap1': 'a;b',
+ 'cap2': 'c;d'}
+ caps_xml = ("<capabilities><cap1>a;b</cap1>"
+ "<cap2>c;d</cap2></capabilities>")
+ expected = {'cell': {'name': 'testcell1',
+ 'type': 'child',
+ 'rpc_host': 'localhost',
+ 'capabilities': caps_dict}}
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ "<cell><name>testcell1</name><type>child</type>"
+ "<rpc_host>localhost</rpc_host>"
+ "%s</cell>") % caps_xml
+ deserializer = cells_ext.CellDeserializer()
+ result = deserializer.deserialize(intext)
+ self.assertEqual(dict(body=expected), result)
+
+ def test_with_corrupt_xml(self):
+ deserializer = cells_ext.CellDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py
new file mode 100644
index 000000000..7d9ec93df
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py
@@ -0,0 +1,308 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.plugins.v3 import flavor_access
+from nova.api.openstack.compute import flavors as flavors_api
+from nova.compute import flavors
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+def generate_flavor(flavorid, ispublic):
+ return {
+ 'id': flavorid,
+ 'flavorid': str(flavorid),
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'test',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
+ 'updated_at': None,
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'swap': 512,
+ 'rxtx_factor': 1.0,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'is_public': bool(ispublic)
+ }
+
+
+INSTANCE_TYPES = {
+ '0': generate_flavor(0, True),
+ '1': generate_flavor(1, True),
+ '2': generate_flavor(2, False),
+ '3': generate_flavor(3, False)}
+
+
+ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
+ {'flavor_id': '2', 'project_id': 'proj3'},
+ {'flavor_id': '3', 'project_id': 'proj3'}]
+
+
+def fake_get_flavor_access_by_flavor_id(flavorid):
+ res = []
+ for access in ACCESS_LIST:
+ if access['flavor_id'] == flavorid:
+ res.append(access)
+ return res
+
+
+def fake_get_flavor_by_flavor_id(flavorid):
+ return INSTANCE_TYPES[flavorid]
+
+
+def _has_flavor_access(flavorid, projectid):
+ for access in ACCESS_LIST:
+ if access['flavor_id'] == flavorid and \
+ access['project_id'] == projectid:
+ return True
+ return False
+
+
+def fake_get_all_flavors(context, inactive=0, filters=None):
+ if filters == None or filters['is_public'] == None:
+ return INSTANCE_TYPES
+
+ res = {}
+ for k, v in INSTANCE_TYPES.iteritems():
+ if filters['is_public'] and _has_flavor_access(k, context.project_id):
+ res.update({k: v})
+ continue
+ if v['is_public'] == filters['is_public']:
+ res.update({k: v})
+
+ return res
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+ def get_db_flavor(self, flavor_id):
+ return INSTANCE_TYPES[flavor_id]
+
+
+class FakeResponse(object):
+ obj = {'flavor': {'id': '0'},
+ 'flavors': [
+ {'id': '0'},
+ {'id': '2'}]
+ }
+
+ def attach(self, **kwargs):
+ pass
+
+
+class FlavorAccessTest(test.TestCase):
+ def setUp(self):
+ super(FlavorAccessTest, self).setUp()
+ self.flavor_controller = flavors_api.Controller()
+ self.flavor_access_controller = flavor_access.FlavorAccessController()
+ self.flavor_action_controller = flavor_access.FlavorActionController()
+ self.req = FakeRequest()
+ self.context = self.req.environ['nova.context']
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(flavors, 'get_all_flavors', fake_get_all_flavors)
+ self.stubs.Set(flavors, 'get_flavor_access_by_flavor_id',
+ fake_get_flavor_access_by_flavor_id)
+
+ def _verify_flavor_list(self, result, expected):
+ # result already sorted by flavor_id
+ self.assertEqual(len(result), len(expected))
+
+ for d1, d2 in zip(result, expected):
+ self.assertEqual(d1['id'], d2['id'])
+
+ def test_list_flavor_access_public(self):
+ # query os-flavor-access on public flavor should return 404
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors/os-flavor-access',
+ use_admin_context=True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.flavor_access_controller.index,
+ self.req, '1')
+
+ def test_list_flavor_access_private(self):
+ expected = {'flavor_access': [
+ {'flavor_id': '2', 'tenant_id': 'proj2'},
+ {'flavor_id': '2', 'tenant_id': 'proj3'}]}
+ result = self.flavor_access_controller.index(self.req, '2')
+ self.assertEqual(result, expected)
+
+ def test_list_flavor_with_admin_default_proj1(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj1'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_default_proj2(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj2'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_true(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=true',
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_false(self):
+ expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=false',
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_false_proj2(self):
+ expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=false',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj2'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_none(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
+ {'id': '3'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=none',
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_default(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors',
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_true(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=true',
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_false(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=false',
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_none(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors?is_public=none',
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_show(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.show(self.req, resp, '0')
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
+ resp.obj['flavor'])
+ self.flavor_action_controller.show(self.req, resp, '2')
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': False},
+ resp.obj['flavor'])
+
+ def test_detail(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.detail(self.req, resp)
+ self.assertEqual([{'id': '0', 'os-flavor-access:is_public': True},
+ {'id': '2', 'os-flavor-access:is_public': False}],
+ resp.obj['flavors'])
+
+ def test_create(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.create(self.req, {}, resp)
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
+ resp.obj['flavor'])
+
+ def test_add_tenant_access(self):
+ def stub_add_flavor_access(flavorid, projectid, ctxt=None):
+ self.assertEqual('3', flavorid, "flavorid")
+ self.assertEqual("proj2", projectid, "projectid")
+ self.stubs.Set(flavors, 'add_flavor_access',
+ stub_add_flavor_access)
+ expected = {'flavor_access':
+ [{'flavor_id': '3', 'tenant_id': 'proj3'}]}
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors/2/action',
+ use_admin_context=True)
+ result = self.flavor_action_controller.\
+ _addTenantAccess(req, '3', body)
+ self.assertEqual(result, expected)
+
+ def test_add_tenant_access_with_already_added_access(self):
+ def stub_add_flavor_access(flavorid, projectid, ctxt=None):
+ raise exception.FlavorAccessExists(flavor_id=flavorid,
+ project_id=projectid)
+ self.stubs.Set(flavors, 'add_flavor_access',
+ stub_add_flavor_access)
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors/2/action',
+ use_admin_context=True)
+ self.assertRaises(exc.HTTPConflict,
+ self.flavor_action_controller._addTenantAccess,
+ self.req, '3', body)
+
+ def test_remove_tenant_access_with_bad_access(self):
+ def stub_remove_flavor_access(flavorid, projectid, ctxt=None):
+ raise exception.FlavorAccessNotFound(flavor_id=flavorid,
+ project_id=projectid)
+ self.stubs.Set(flavors, 'remove_flavor_access',
+ stub_remove_flavor_access)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+ req = fakes.HTTPRequest.blank('/v3/fake/flavors/2/action',
+ use_admin_context=True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.flavor_action_controller._removeTenantAccess,
+ self.req, '3', body)
+
+
+class FlavorAccessSerializerTest(test.TestCase):
+ def test_serializer_empty(self):
+ serializer = flavor_access.FlavorAccessTemplate()
+ text = serializer.serialize(dict(flavor_access=[]))
+ tree = etree.fromstring(text)
+ self.assertEqual(len(tree), 0)
+
+ def test_serializer(self):
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<flavor_access>'
+ '<access tenant_id="proj2" flavor_id="2"/>'
+ '<access tenant_id="proj3" flavor_id="2"/>'
+ '</flavor_access>')
+ access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'},
+ {'flavor_id': '2', 'tenant_id': 'proj3'}]
+
+ serializer = flavor_access.FlavorAccessTemplate()
+ text = serializer.serialize(dict(flavor_access=access_list))
+ self.assertEqual(text, expected)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_disabled.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_disabled.py
new file mode 100644
index 000000000..5ff7f4035
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_disabled.py
@@ -0,0 +1,101 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import flavor_disabled
+from nova.compute import flavors
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "disabled": False,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '20',
+ "disabled": True,
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_flavor_get_all(*args, **kwargs):
+ return FAKE_FLAVORS
+
+
+class FlavorDisabledTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = '%s:' % flavor_disabled.FlavorDisabled.alias
+
+ def setUp(self):
+ super(FlavorDisabledTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors",
+ fake_flavor_get_all)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ app = fakes.wsgi_app_v3(init_only=('servers', 'flavors',
+ 'os-flavor-disabled'))
+ return req.get_response(app)
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorDisabled(self, flavor, disabled):
+ self.assertEqual(str(flavor.get('%sdisabled' % self.prefix)), disabled)
+
+ def test_show(self):
+ res = self._make_request('/v3/flavors/1')
+ self.assertEqual(res.status_int, 200, res.body)
+ self.assertFlavorDisabled(self._get_flavor(res.body), 'False')
+
+ def test_detail(self):
+ res = self._make_request('/v3/flavors/detail')
+
+ self.assertEqual(res.status_int, 200, res.body)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorDisabled(flavors[0], 'False')
+ self.assertFlavorDisabled(flavors[1], 'True')
+
+
+class FlavorDisabledXmlTest(FlavorDisabledTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % flavor_disabled.FlavorDisabled.namespace
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_images.py b/nova/tests/api/openstack/compute/plugins/v3/test_images.py
new file mode 100644
index 000000000..a35dc6e51
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_images.py
@@ -0,0 +1,1336 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests of the new image services, both as a service layer,
+and as a WSGI layer
+"""
+
+import urlparse
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute import images
+from nova.api.openstack.compute.views import images as images_view
+from nova.api.openstack import xmlutil
+from nova import exception
+from nova.image import glance
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.tests import matchers
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+
+
+class ImagesControllerTest(test.TestCase):
+ """
+ Test of the OpenStack API /images application controller w/Glance.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ super(ImagesControllerTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fakes.stub_out_compute_api_backup(self.stubs)
+ fakes.stub_out_glance(self.stubs)
+
+ self.controller = images.Controller()
+
+ def test_get_image(self):
+ fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123')
+ actual_image = self.controller.show(fake_req, '124')
+
+ href = "http://localhost/v2/fake/images/124"
+ bookmark = "http://localhost/fake/images/124"
+ alternate = "%s/fake/images/124" % glance.generate_glance_url()
+ server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
+ server_href = "http://localhost/v2/fake/servers/" + server_uuid
+ server_bookmark = "http://localhost/fake/servers/" + server_uuid
+
+ expected_image = {
+ "image": {
+ "id": "124",
+ "name": "queued snapshot",
+ "updated": NOW_API_FORMAT,
+ "created": NOW_API_FORMAT,
+ "status": "SAVING",
+ "progress": 25,
+ "minDisk": 0,
+ "minRam": 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "metadata": {
+ "instance_uuid": server_uuid,
+ "user_id": "fake",
+ },
+ "links": [{
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark,
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": alternate
+ }],
+ },
+ }
+
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
+
+ def test_get_image_with_custom_prefix(self):
+ self.flags(osapi_compute_link_prefix='https://zoo.com:42',
+ osapi_glance_link_prefix='http://circus.com:34')
+ fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123')
+ actual_image = self.controller.show(fake_req, '124')
+ href = "https://zoo.com:42/v2/fake/images/124"
+ bookmark = "https://zoo.com:42/fake/images/124"
+ alternate = "http://circus.com:34/fake/images/124"
+ server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
+ server_href = "https://zoo.com:42/v2/fake/servers/" + server_uuid
+ server_bookmark = "https://zoo.com:42/fake/servers/" + server_uuid
+
+ expected_image = {
+ "image": {
+ "id": "124",
+ "name": "queued snapshot",
+ "updated": NOW_API_FORMAT,
+ "created": NOW_API_FORMAT,
+ "status": "SAVING",
+ "progress": 25,
+ "minDisk": 0,
+ "minRam": 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "metadata": {
+ "instance_uuid": server_uuid,
+ "user_id": "fake",
+ },
+ "links": [{
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark,
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": alternate
+ }],
+ },
+ }
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
+
+ def test_get_image_404(self):
+ fake_req = fakes.HTTPRequest.blank('/v2/fake/images/unknown')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, fake_req, 'unknown')
+
+ def test_get_image_details(self):
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
+ response = self.controller.detail(request)
+ response_list = response["images"]
+
+ server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
+ server_href = "http://localhost/v2/fake/servers/" + server_uuid
+ server_bookmark = "http://localhost/fake/servers/" + server_uuid
+ alternate = "%s/fake/images/%s"
+
+ expected = [{
+ 'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 10,
+ 'minRam': 128,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/123",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": alternate % (glance.generate_glance_url(), 123),
+ }],
+ },
+ {
+ 'id': '124',
+ 'name': 'queued snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/124",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": alternate % (glance.generate_glance_url(), 124),
+ }],
+ },
+ {
+ 'id': '125',
+ 'name': 'saving snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 50,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/125",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/125",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": "%s/fake/images/125" % glance.generate_glance_url()
+ }],
+ },
+ {
+ 'id': '126',
+ 'name': 'active snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/126",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/126",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": "%s/fake/images/126" % glance.generate_glance_url()
+ }],
+ },
+ {
+ 'id': '127',
+ 'name': 'killed snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ERROR',
+ 'progress': 0,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/127",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/127",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": "%s/fake/images/127" % glance.generate_glance_url()
+ }],
+ },
+ {
+ 'id': '128',
+ 'name': 'deleted snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'DELETED',
+ 'progress': 0,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/128",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/128",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": "%s/fake/images/128" % glance.generate_glance_url()
+ }],
+ },
+ {
+ 'id': '129',
+ 'name': 'pending_delete snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'DELETED',
+ 'progress': 0,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/129",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/129",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": "%s/fake/images/129" % glance.generate_glance_url()
+ }],
+ },
+ {
+ 'id': '130',
+ 'name': None,
+ 'metadata': {},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 0,
+ 'minRam': 0,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/130",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/130",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": "%s/fake/images/130" % glance.generate_glance_url()
+ }],
+ },
+ ]
+
+ self.assertThat(expected, matchers.DictListMatches(response_list))
+
+ def test_get_image_details_with_limit(self):
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2')
+ response = self.controller.detail(request)
+ response_list = response["images"]
+ response_links = response["images_links"]
+
+ server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
+ server_href = "http://localhost/v2/fake/servers/" + server_uuid
+ server_bookmark = "http://localhost/fake/servers/" + server_uuid
+ alternate = "%s/fake/images/%s"
+
+ expected = [{
+ 'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'minDisk': 10,
+ 'progress': 100,
+ 'minRam': 128,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/123",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": alternate % (glance.generate_glance_url(), 123),
+ }],
+ },
+ {
+ 'id': '124',
+ 'name': 'queued snapshot',
+ 'metadata': {
+ u'instance_uuid': server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'minDisk': 0,
+ 'progress': 25,
+ 'minRam': 0,
+ 'server': {
+ 'id': server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/124",
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": alternate % (glance.generate_glance_url(), 124),
+ }],
+ }]
+
+ self.assertThat(expected, matchers.DictListMatches(response_list))
+
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/v2/fake/images', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+
+ self.assertThat({'limit': ['2'], 'marker': ['124']},
+ matchers.DictMatches(params))
+
+ def test_image_detail_filter_with_name(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {'name': 'testname'}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
+ '?name=testname')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_with_status(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {'status': 'active'}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
+ '?status=ACTIVE')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_with_property(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {'property-test': '3'}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
+ '?property-test=3')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_server_href(self):
+ image_service = self.mox.CreateMockAnything()
+ uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
+ ref = 'http://localhost:8774/servers/' + uuid
+ url = '/v2/fake/images/detail?server=' + ref
+ filters = {'property-instance_uuid': uuid}
+ request = fakes.HTTPRequest.blank(url)
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_server_uuid(self):
+ image_service = self.mox.CreateMockAnything()
+ uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
+ url = '/v2/fake/images/detail?server=' + uuid
+ filters = {'property-instance_uuid': uuid}
+ request = fakes.HTTPRequest.blank(url)
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_changes_since(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {'changes-since': '2011-01-24T17:08Z'}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
+ '?changes-since=2011-01-24T17:08Z')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_with_type(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {'property-image_type': 'BASE'}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail?type=BASE')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_filter_not_supported(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {'status': 'active'}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail?status='
+ 'ACTIVE&UNSUPPORTEDFILTER=testname')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_no_filters(self):
+ image_service = self.mox.CreateMockAnything()
+ filters = {}
+ request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
+ context = request.environ['nova.context']
+ image_service.detail(context, filters=filters).AndReturn([])
+ self.mox.ReplayAll()
+ controller = images.Controller(image_service=image_service)
+ controller.detail(request)
+
+ def test_image_detail_invalid_marker(self):
+ class InvalidImageService(object):
+
+ def detail(self, *args, **kwargs):
+ raise exception.Invalid('meow')
+
+ request = fakes.HTTPRequest.blank('/v2/images?marker=invalid')
+ controller = images.Controller(image_service=InvalidImageService())
+ self.assertRaises(webob.exc.HTTPBadRequest, controller.detail, request)
+
+ def test_generate_alternate_link(self):
+ view = images_view.ViewBuilder()
+ request = fakes.HTTPRequest.blank('/v2/fake/images/1')
+ generated_url = view._get_alternate_link(request, 1)
+ actual_url = "%s/fake/images/1" % glance.generate_glance_url()
+ self.assertEqual(generated_url, actual_url)
+
+ def test_delete_image(self):
+ request = fakes.HTTPRequest.blank('/v2/fake/images/124')
+ request.method = 'DELETE'
+ response = self.controller.delete(request, '124')
+ self.assertEqual(response.status_int, 204)
+
+ def test_delete_deleted_image(self):
+ """If you try to delete a deleted image, you get back 403 Forbidden."""
+
+ deleted_image_id = 128
+ # see nova.tests.api.openstack.fakes:_make_image_fixtures
+
+ request = fakes.HTTPRequest.blank(
+ '/v2/fake/images/%s' % deleted_image_id)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ request, '%s' % deleted_image_id)
+
+ def test_delete_image_not_found(self):
+ request = fakes.HTTPRequest.blank('/v2/fake/images/300')
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, '300')
+
+
+class ImageXMLSerializationTest(test.TestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_UUID = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
+ SERVER_HREF = 'http://localhost/v2/fake/servers/' + SERVER_UUID
+ SERVER_BOOKMARK = 'http://localhost/fake/servers/' + SERVER_UUID
+ IMAGE_HREF = 'http://localhost/v2/fake/images/%s'
+ IMAGE_NEXT = 'http://localhost/v2/fake/images?limit=%s&marker=%s'
+ IMAGE_BOOKMARK = 'http://localhost/fake/images/%s'
+
+ def test_xml_declaration(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minRam': 10,
+ 'minDisk': 100,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_zero_metadata(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {},
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ meta_nodes = root.findall('{0}meta'.format(ATOMNS))
+ self.assertEqual(len(meta_nodes), 0)
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_image_no_metadata_key(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ meta_nodes = root.findall('{0}meta'.format(ATOMNS))
+ self.assertEqual(len(meta_nodes), 0)
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_no_server(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root, None)
+
+ def test_show_with_min_ram(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minRam': 256,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
+ 'minRam']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_with_min_disk(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minDisk': 5,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
+ 'minDisk']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'Image2',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images_index')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_with_links(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'Image2',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ],
+ 'images_links': [
+ {
+ 'rel': 'next',
+ 'href': self.IMAGE_NEXT % (2, 2),
+ }
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images_index')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ # Check images_links
+ images_links = root.findall('{0}link'.format(ATOMNS))
+ for i, link in enumerate(fixture['images_links']):
+ for key, value in link.items():
+ self.assertEqual(images_links[i].get(key), value)
+
+ def test_index_zero_images(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixtures = {
+ 'images': [],
+ }
+
+ output = serializer.serialize(fixtures)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images_index')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 0)
+
+ def test_detail(self):
+ serializer = images.ImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': '2',
+ 'name': 'Image2',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'SAVING',
+ 'progress': 80,
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py
new file mode 100644
index 000000000..783275ea2
--- /dev/null
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py
@@ -0,0 +1,84 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute.contrib import server_diagnostics
+from nova.api.openstack import wsgi
+from nova.compute import api as compute_api
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+UUID = 'abc'
+
+
+def fake_get_diagnostics(self, _context, instance_uuid):
+ return {'data': 'Some diagnostic info'}
+
+
+def fake_instance_get(self, _context, instance_uuid):
+ if instance_uuid != UUID:
+ raise Exception("Invalid UUID")
+ return {'uuid': instance_uuid}
+
+
+class ServerDiagnosticsTest(test.TestCase):
+
+ def setUp(self):
+ super(ServerDiagnosticsTest, self).setUp()
+ self.flags(verbose=True,
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_diagnostics'])
+ self.stubs.Set(compute_api.API, 'get_diagnostics',
+ fake_get_diagnostics)
+ self.stubs.Set(compute_api.API, 'get', fake_instance_get)
+
+ self.router = compute.APIRouter(init_only=('servers', 'diagnostics'))
+
+ def test_get_diagnostics(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/%s/diagnostics' % UUID)
+ res = req.get_response(self.router)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(output, {'data': 'Some diagnostic info'})
+
+
+class TestServerDiagnosticsXMLSerializer(test.TestCase):
+ namespace = wsgi.XMLNS_V11
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def test_index_serializer(self):
+ serializer = server_diagnostics.ServerDiagnosticsTemplate()
+ exemplar = dict(diag1='foo', diag2='bar')
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('diagnostics', self._tag(tree))
+ self.assertEqual(len(tree), len(exemplar))
+ for child in tree:
+ tag = self._tag(child)
+ self.assertTrue(tag in exemplar)
+ self.assertEqual(child.text, exemplar[tag])
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index b9dc72b1f..3dbc7bfea 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -32,14 +32,18 @@ from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
+from nova import utils
TEST_LIMITS = [
- limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
- limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
- limits.Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
- limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
- limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
+ limits.Limit("GET", "/delayed", "^/delayed", 1,
+ utils.TIME_UNITS['MINUTE']),
+ limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']),
+ limits.Limit("POST", "/servers", "^/servers", 3,
+ utils.TIME_UNITS['MINUTE']),
+ limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']),
+ limits.Limit("PUT", "/servers", "^/servers", 5,
+ utils.TIME_UNITS['MINUTE']),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
@@ -312,7 +316,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
req, 1, {})
-class TestLimiter(limits.Limiter):
+class MockLimiter(limits.Limiter):
pass
@@ -331,12 +335,12 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
- "%s.TestLimiter" %
+ "%s.MockLimiter" %
self.__class__.__module__)
def test_limit_class(self):
# Test that middleware selected correct limiter class.
- assert isinstance(self.app._limiter, TestLimiter)
+ assert isinstance(self.app._limiter, MockLimiter)
def test_good_request(self):
# Test successful GET request through middleware.
@@ -485,8 +489,8 @@ class ParseLimitsTest(BaseLimitTestSuite):
self.assertEqual([t.value for t in l], expected)
# ...and the units...
- expected = [limits.PER_MINUTE, limits.PER_HOUR,
- limits.PER_SECOND, limits.PER_DAY]
+ expected = [utils.TIME_UNITS['MINUTE'], utils.TIME_UNITS['HOUR'],
+ utils.TIME_UNITS['SECOND'], utils.TIME_UNITS['DAY']]
self.assertEqual([t.unit for t in l], expected)
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 8eebec613..82bb6b868 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -3253,34 +3253,8 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
- def test_create_instance_with_config_drive_as_id(self):
- self.ext_mgr.extensions = {'os-config-drive': 'fake'}
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/v2/fake/flavors/3'
- body = {
- 'server': {
- 'name': 'config_drive_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- 'personality': {},
- 'config_drive': image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body).obj
-
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
def test_create_instance_with_bad_config_drive(self):
+ # Test with an image href as config drive value.
self.ext_mgr.extensions = {'os-config-drive': 'fake'}
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/v2/fake/flavors/3'
@@ -3294,7 +3268,7 @@ class ServersControllerCreateTest(test.TestCase):
'open': 'stack',
},
'personality': {},
- 'config_drive': 'asdf',
+ 'config_drive': image_href,
},
}
@@ -3302,7 +3276,6 @@ class ServersControllerCreateTest(test.TestCase):
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
-
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 6c30b9a9e..d0239885d 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -464,7 +464,8 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=datetime.datetime.utcnow(),
- terminated_at=datetime.datetime.utcnow()):
+ terminated_at=datetime.datetime.utcnow(),
+ availability_zone=''):
if user_id is None:
user_id = 'fake_user'
@@ -535,7 +536,7 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
"scheduled_at": timeutils.utcnow(),
"launched_at": launched_at,
"terminated_at": terminated_at,
- "availability_zone": "",
+ "availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": "",
"locked": False,
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index e2783641c..4fbf805f1 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -73,6 +73,11 @@ from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
+from nova.virt.event import EVENT_LIFECYCLE_PAUSED
+from nova.virt.event import EVENT_LIFECYCLE_RESUMED
+from nova.virt.event import EVENT_LIFECYCLE_STARTED
+from nova.virt.event import EVENT_LIFECYCLE_STOPPED
+from nova.virt.event import LifecycleEvent
from nova.virt import fake
from nova.volume import cinder
@@ -1083,6 +1088,103 @@ class ComputeTestCase(BaseTestCase):
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
+ def test_allocate_network_succeeds_after_retries(self):
+ # Undo setUp() stubs as this is a true unit test
+ self.stubs.UnsetAll()
+ self.flags(network_allocate_retries=8)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+ self.mox.StubOutWithMock(time, 'sleep')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ final_result = 'meow'
+
+ expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
+
+ for sleep_time in expected_sleep_times:
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ conductor_api=self.compute.conductor_api,
+ security_groups=sec_groups).AndRaise(
+ test.TestingException())
+ time.sleep(sleep_time)
+
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ conductor_api=self.compute.conductor_api,
+ security_groups=sec_groups).AndReturn(final_result)
+
+ self.mox.ReplayAll()
+
+ res = self.compute._allocate_network_async(self.context, instance,
+ req_networks,
+ macs,
+ sec_groups,
+ is_vpn)
+ self.assertEqual(final_result, res)
+
+ def test_allocate_network_fails(self):
+ # Undo setUp() stubs as this is a true unit test
+ self.stubs.UnsetAll()
+ self.flags(network_allocate_retries=0)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ conductor_api=self.compute.conductor_api,
+ security_groups=sec_groups).AndRaise(test.TestingException())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._allocate_network_async,
+ self.context, instance, req_networks, macs,
+ sec_groups, is_vpn)
+
+ def test_allocate_network_neg_conf_value_treated_as_zero(self):
+ # Undo setUp() stubs as this is a true unit test
+ self.stubs.UnsetAll()
+ self.flags(network_allocate_retries=-1)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+
+ # Only attempted once.
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ conductor_api=self.compute.conductor_api,
+ security_groups=sec_groups).AndRaise(test.TestingException())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._allocate_network_async,
+ self.context, instance, req_networks, macs,
+ sec_groups, is_vpn)
+
def test_run_instance_dealloc_network_instance_not_found(self):
"""spawn network deallocate test.
@@ -5315,6 +5417,38 @@ class ComputeTestCase(BaseTestCase):
self._test_sync_to_stop(power_state.RUNNING, vs, ps,
stop=False)
+ def _test_lifecycle_event(self, lifecycle_event, power_state):
+ instance = self._create_fake_instance()
+ uuid = instance['uuid']
+
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+ if power_state != None:
+ self.compute._sync_instance_power_state(
+ mox.IgnoreArg(),
+ mox.ContainsKeyValue('uuid', uuid),
+ power_state)
+ self.mox.ReplayAll()
+ self.compute.handle_events(LifecycleEvent(uuid, lifecycle_event))
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_lifecycle_events(self):
+ self._test_lifecycle_event(EVENT_LIFECYCLE_STOPPED,
+ power_state.SHUTDOWN)
+ self._test_lifecycle_event(EVENT_LIFECYCLE_STARTED,
+ power_state.RUNNING)
+ self._test_lifecycle_event(EVENT_LIFECYCLE_PAUSED,
+ power_state.PAUSED)
+ self._test_lifecycle_event(EVENT_LIFECYCLE_RESUMED,
+ power_state.RUNNING)
+ self._test_lifecycle_event(-1, None)
+
+ def test_lifecycle_event_non_existent_instance(self):
+ # No error raised for non-existent instance because of inherent race
+ # between database updates and hypervisor events. See bug #1180501.
+ event = LifecycleEvent('does-not-exist', EVENT_LIFECYCLE_STOPPED)
+ self.compute.handle_events(event)
+
class ComputeAPITestCase(BaseTestCase):
@@ -9592,42 +9726,31 @@ class CheckConfigDriveTestCase(test.TestCase):
def setUp(self):
super(CheckConfigDriveTestCase, self).setUp()
self.compute_api = compute.API()
- self.context = context.RequestContext(
- 'fake_user_id', 'fake_project_id')
-
- self.called = called = {'show': False}
-
- def fake_get_remote_image_service(context, image_id):
- class FakeGlance(object):
- def show(self, context, image_id):
- called['show'] = True
-
- return FakeGlance(), image_id
-
- self.stubs.Set(glance, 'get_remote_image_service',
- fake_get_remote_image_service)
- def tearDown(self):
- self.stubs.UnsetAll()
- super(CheckConfigDriveTestCase, self).tearDown()
-
- def assertCheck(self, expected, config_drive):
+ def _assertCheck(self, expected, config_drive):
self.assertEqual(expected,
- self.compute_api._check_config_drive(
- self.context, config_drive))
-
- def test_value_is_none(self):
- self.assertFalse(self.called['show'])
- self.assertCheck((None, None), None)
- self.assertFalse(self.called['show'])
-
- def test_bool_string_or_id(self):
- self.assertCheck((None, True), "true")
- self.assertCheck((None, True), 1)
- self.assertCheck((None, True), 't')
-
- def test_value_is_image_id(self):
- self.assertCheck(("fake-uuid", None), "fake-uuid")
+ self.compute_api._check_config_drive(config_drive))
+
+ def _assertInvalid(self, config_drive):
+ self.assertRaises(exception.ConfigDriveInvalidValue,
+ self.compute_api._check_config_drive,
+ config_drive)
+
+ def test_config_drive_false_values(self):
+ self._assertCheck('', None)
+ self._assertCheck('', '')
+ self._assertCheck('', 'False')
+ self._assertCheck('', 'f')
+ self._assertCheck('', '0')
+
+ def test_config_drive_true_values(self):
+ self._assertCheck(True, 'True')
+ self._assertCheck(True, 't')
+ self._assertCheck(True, '1')
+
+ def test_config_drive_bogus_values_raise(self):
+ self._assertInvalid('asd')
+ self._assertInvalid(uuidutils.generate_uuid())
class CheckRequestedImageTestCase(test.TestCase):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 7df829a53..609c2164c 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -246,9 +246,8 @@ class _BaseTestCase(object):
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
- result = self.conductor.aggregate_metadata_delete(self.context,
- aggregate,
- 'fake')
+ self.conductor.aggregate_metadata_delete(self.context, aggregate,
+ 'fake')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index deaf8d035..0b49210fa 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -286,7 +286,7 @@ class DbApiTestCase(DbTestCase):
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
- inst3 = self.create_instance_with_args()
+ self.create_instance_with_args()
db.instance_destroy(self.context, inst1['uuid'])
result = db.instance_get_all_by_filters(self.context,
{'deleted': True})
@@ -296,8 +296,8 @@ class DbApiTestCase(DbTestCase):
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
inst1 = self.create_instance_with_args()
- inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
- inst3 = self.create_instance_with_args()
+ self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ self.create_instance_with_args()
db.instance_destroy(self.context, inst1['uuid'])
result = db.instance_get_all_by_filters(self.context,
{'deleted': True,
@@ -948,8 +948,8 @@ class AggregateDBApiTestCase(test.TestCase):
values2 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
- a3 = _create_aggregate(context=ctxt, values=values2)
+ _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate(context=ctxt, values=values2)
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
@@ -958,9 +958,9 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
- a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
- a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
@@ -970,8 +970,8 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
- a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['foo.openstack.org'], metadata={'good': 'value'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
@@ -988,9 +988,9 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
- a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
- a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['foo.openstack.org'], metadata={'good': 'value'})
r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
self.assertEqual(r1, {'foo.openstack.org': set(['value'])})
@@ -1464,7 +1464,7 @@ class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_reservation_expire(self):
self.values['expire'] = datetime.datetime.utcnow() + datetime.\
timedelta(days=1)
- reservations = self._quota_reserve()
+ self._quota_reserve()
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1',
@@ -1628,8 +1628,7 @@ class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_security_group_get(self):
security_group1 = self._create_security_group({})
- security_group2 = self._create_security_group(
- {'name': 'fake_sec_group2'})
+ self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances'])
@@ -1742,8 +1741,8 @@ class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
{'name': 'fake2', 'project_id': 'fake_proj1'},
{'name': 'fake3', 'project_id': 'fake_proj2'},
]
- security_groups = [self._create_security_group(vals)
- for vals in values]
+ for vals in values:
+ self._create_security_group(vals)
real = []
for project in ('fake_proj1', 'fake_proj2'):
@@ -1776,7 +1775,7 @@ class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.ctxt.project_id,
'default'))
- default_group = db.security_group_ensure_default(self.ctxt)
+ db.security_group_ensure_default(self.ctxt)
self.assertTrue(db.security_group_exists(self.ctxt,
self.ctxt.project_id,
@@ -1896,7 +1895,7 @@ class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_service_get(self):
service1 = self._create_service({})
- service2 = self._create_service({'host': 'some_other_fake_host'})
+ self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
@@ -1921,7 +1920,7 @@ class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
- service2 = self._create_service({'host': 'host2', 'topic': 'topic2'})
+ self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
@@ -3693,24 +3692,24 @@ class VolumeUsageDBApiTestCase(test.TestCase):
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
- vol_usage = db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20,
- wr_req=30, wr_bytes=40,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- user_id='fake-user-uuid1',
- availability_zone='fake-az')
- vol_usage = db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200,
- wr_req=300, wr_bytes=400,
- instance_id='fake-instance-uuid2',
- project_id='fake-project-uuid2',
- user_id='fake-user-uuid2',
- availability_zone='fake-az')
- vol_usage = db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000,
- wr_req=3000, wr_bytes=4000,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- user_id='fake-user-uuid1',
- availability_zone='fake-az')
+ db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20,
+ wr_req=30, wr_bytes=40,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ user_id='fake-user-uuid1',
+ availability_zone='fake-az')
+ db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid2',
+ project_id='fake-project-uuid2',
+ user_id='fake-user-uuid2',
+ availability_zone='fake-az')
+ db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000,
+ wr_req=3000, wr_bytes=4000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ user_id='fake-user-uuid1',
+ availability_zone='fake-az')
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 2)
@@ -3732,44 +3731,44 @@ class VolumeUsageDBApiTestCase(test.TestCase):
timeutils.utcnow().AndReturn(now3)
self.mox.ReplayAll()
- vol_usage = db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200,
- wr_req=300, wr_bytes=400,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- user_id='fake-user-uuid',
- availability_zone='fake-az')
+ db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 0)
self.assertEqual(current_usage['curr_reads'], 100)
- vol_usage = db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300,
- wr_req=400, wr_bytes=500,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- user_id='fake-user-uuid',
- availability_zone='fake-az',
- update_totals=True)
+ db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az',
+ update_totals=True)
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 0)
- vol_usage = db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400,
- wr_req=500, wr_bytes=600,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- availability_zone='fake-az',
- user_id='fake-user-uuid')
+ db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400,
+ wr_req=500, wr_bytes=600,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 300)
- vol_usage = db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500,
- wr_req=600, wr_bytes=700,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- user_id='fake-user-uuid',
- availability_zone='fake-az',
- update_totals=True)
+ db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500,
+ wr_req=600, wr_bytes=700,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az',
+ update_totals=True)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
@@ -4714,7 +4713,7 @@ class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.ctxt, 'p1', 'nonexitent_resource')
def test_quota_usage_get(self):
- reservations = _quota_reserve(self.ctxt, 'p1')
+ _quota_reserve(self.ctxt, 'p1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'res0')
expected = {'resource': 'res0', 'project_id': 'p1',
'in_use': 0, 'reserved': 0, 'total': 0}
@@ -4722,7 +4721,7 @@ class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.assertEqual(value, quota_usage[key])
def test_quota_usage_get_all_by_project(self):
- reservations = _quota_reserve(self.ctxt, 'p1')
+ _quota_reserve(self.ctxt, 'p1')
expected = {'project_id': 'p1',
'res0': {'in_use': 0, 'reserved': 0},
'res1': {'in_use': 1, 'reserved': 1},
@@ -4735,8 +4734,7 @@ class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.ctxt, 'p1', 'resource', in_use=42)
def test_quota_usage_update(self):
- reservations = _quota_reserve(self.ctxt, 'p1')
- until_refresh = datetime.datetime.now() + datetime.timedelta(days=1)
+ _quota_reserve(self.ctxt, 'p1')
db.quota_usage_update(self.ctxt, 'p1', 'res0', in_use=42, reserved=43)
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'res0')
expected = {'resource': 'res0', 'project_id': 'p1',
@@ -4792,7 +4790,7 @@ class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
'resource0': 0, 'resource1': 1, 'resource2': 2})
def test_quota_class_update(self):
- qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
'resource').hard_limit, 43)
@@ -4951,6 +4949,19 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.assertNotEqual(self.item['updated_at'],
item_updated['updated_at'])
+ def test_compute_node_update_override_updated_at(self):
+ # Update the record once so updated_at is set.
+ first = db.compute_node_update(self.ctxt, self.item['id'],
+ {'free_ram_mb': '12'})
+ self.assertIsNotNone(first['updated_at'])
+
+ # Update a second time. Make sure that the updated_at value we send
+ # is overridden.
+ second = db.compute_node_update(self.ctxt, self.item['id'],
+ {'updated_at': first.updated_at,
+ 'free_ram_mb': '13'})
+ self.assertNotEqual(first['updated_at'], second['updated_at'])
+
def test_compute_node_stat_unchanged(self):
# don't update unchanged stat values:
stats = self.item['stats']
diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py
index 3674a7c45..af206632d 100644
--- a/nova/tests/db/test_migration_utils.py
+++ b/nova/tests/db/test_migration_utils.py
@@ -19,7 +19,7 @@ import warnings
from migrate.changeset import UniqueConstraint
from sqlalchemy.dialects import mysql
from sqlalchemy import Boolean, Index, Integer, DateTime, String
-from sqlalchemy import MetaData, Table, Column
+from sqlalchemy import MetaData, Table, Column, ForeignKey
from sqlalchemy.engine import reflection
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.exc import SAWarning
@@ -514,3 +514,28 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
# but sqlalchemy will set it to NullType.
self.assertTrue(isinstance(table.c.foo.type, NullType))
self.assertTrue(isinstance(table.c.deleted.type, Boolean))
+
+ def test_drop_unique_constraint_in_sqlite_fk_recreate(self):
+ engine = self.engines['sqlite']
+ meta = MetaData()
+ meta.bind = engine
+ parent_table = Table('table0', meta,
+ Column('id', Integer, primary_key=True),
+ Column('foo', Integer))
+ parent_table.create()
+ table_name = 'table1'
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('baz', Integer),
+ Column('bar', Integer, ForeignKey("table0.id")),
+ UniqueConstraint('baz', name='constr1'))
+ table.create()
+ utils.drop_unique_constraint(engine, table_name, 'constr1', 'baz')
+
+ insp = reflection.Inspector.from_engine(engine)
+ f_keys = insp.get_foreign_keys(table_name)
+ self.assertEqual(len(f_keys), 1)
+ f_key = f_keys[0]
+ self.assertEqual(f_key['referred_table'], 'table0')
+ self.assertEqual(f_key['referred_columns'], ['id'])
+ self.assertEqual(f_key['constrained_columns'], ['bar'])
diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py
index a9ea29e69..7e28fb1d5 100644
--- a/nova/tests/db/test_migrations.py
+++ b/nova/tests/db/test_migrations.py
@@ -806,8 +806,6 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
def _check_153(self, engine, data):
fake_types, fake_instances = data
# NOTE(danms): Fetch all the tables and data from scratch after change
- instances = db_utils.get_table(engine, 'instances')
- instance_types = db_utils.get_table(engine, 'instance_types')
sys_meta = db_utils.get_table(engine, 'instance_system_metadata')
# Collect all system metadata, indexed by instance_uuid
@@ -1073,7 +1071,6 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
for result in results:
the_id = result['id']
key = result['key']
- value = result['value']
original = data[the_id]
if key == 'instance_type_baz':
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 63589848b..da43115d0 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -133,7 +133,9 @@ policy_data = """
"compute_extension:fixed_ips": "",
"compute_extension:v3:os-fixed-ips": "",
"compute_extension:flavor_access": "",
+ "compute_extension:v3:os-flavor-access": "",
"compute_extension:flavor_disabled": "",
+ "compute_extension:v3:os-flavor-disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 380b69079..00a989a70 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -3184,7 +3184,7 @@ class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
- uuid = self._post_server()
+ self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
@@ -3412,18 +3412,18 @@ class ConfigDriveSampleJsonTest(ServersSampleBase):
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
- # config drive can be an uuid or empty value
- subs['cdrive'] = '(%s)?' % subs['uuid']
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
self._verify_response('server-config-drive-get-resp', subs,
response, 200)
def test_config_drive_detail(self):
- uuid = self._post_server()
+ self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
- # config drive can be an uuid or empty value
- subs['cdrive'] = '(%s)?' % subs['uuid']
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
self._verify_response('servers-config-drive-details-resp',
subs, response, 200)
@@ -3496,7 +3496,7 @@ class FlavorAccessSampleJsonTests(ApiSampleTestBase):
def test_flavor_access_add_tenant(self):
self._create_flavor()
- response = self._add_tenant()
+ self._add_tenant()
def test_flavor_access_remove_tenant(self):
self._create_flavor()
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index 5c7f3828d..e8368d06a 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -861,7 +861,6 @@ class LinuxNetworkTestCase(test.TestCase):
self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None)
net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'}
ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net)
- one_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net)
two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net)
diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py
index bb3d91f55..38a27b51c 100644
--- a/nova/tests/network/test_network_info.py
+++ b/nova/tests/network/test_network_info.py
@@ -372,7 +372,7 @@ class NetworkInfoTests(test.TestCase):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
- deserialized = model.NetworkInfo.hydrate(ninfo)
+ model.NetworkInfo.hydrate(ninfo)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index d97dcae57..2ddeb72bf 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -714,7 +714,6 @@ class TestQuantumv2(test.TestCase):
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.delete_port(port_data[0]['id'])
- nets = [port_data[0]['network_id']]
quantumv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
@@ -1056,7 +1055,6 @@ class TestQuantumv2(test.TestCase):
def test_allocate_floating_ip_with_pool_id(self):
api = quantumapi.API()
- pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
@@ -1100,7 +1098,6 @@ class TestQuantumv2(test.TestCase):
def test_release_floating_ip_associated(self):
api = quantumapi.API()
address = self.fip_associated['floating_ip_address']
- fip_id = self.fip_associated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
@@ -1144,7 +1141,6 @@ class TestQuantumv2(test.TestCase):
api = quantumapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
- fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
@@ -1178,7 +1174,6 @@ class TestQuantumv2(test.TestCase):
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
- zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'network_id': network_id}
diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py
index 40ad8bb81..03a270386 100644
--- a/nova/tests/objects/test_objects.py
+++ b/nova/tests/objects/test_objects.py
@@ -178,6 +178,31 @@ class TestUtils(test.TestCase):
self.assertEqual(utils.dt_deserializer(None, None), None)
self.assertRaises(ValueError, utils.dt_deserializer, None, 'foo')
+ def test_obj_to_primitive_list(self):
+ class MyList(base.ObjectListBase, base.NovaObject):
+ pass
+ mylist = MyList()
+ mylist.objects = [1, 2, 3]
+ self.assertEqual([1, 2, 3], base.obj_to_primitive(mylist))
+
+ def test_obj_to_primitive_dict(self):
+ myobj = MyObj()
+ myobj.foo = 1
+ myobj.bar = 'foo'
+ self.assertEqual({'foo': 1, 'bar': 'foo'},
+ base.obj_to_primitive(myobj))
+
+ def test_obj_to_primitive_recursive(self):
+ class MyList(base.ObjectListBase, base.NovaObject):
+ pass
+
+ mylist = MyList()
+ mylist.objects = [MyObj(), MyObj()]
+ for i, value in enumerate(mylist):
+ value.foo = i
+ self.assertEqual([{'foo': 0}, {'foo': 1}],
+ base.obj_to_primitive(mylist))
+
class _BaseTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 48d4db1fd..8832f5c2b 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -536,6 +536,64 @@ class HostFiltersTestCase(test.NoDBTestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
+ def test_aggregate_ram_filter_value_error(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateRamFilter']()
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': self.context,
+ 'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
+ 'capabilities': capabilities, 'service': service})
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': 'XXX'})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_default_value(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateRamFilter']()
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': self.context,
+ 'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
+ 'capabilities': capabilities, 'service': service})
+ # False: fallback to default flag w/o aggregates
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': '2.0'})
+ # True: use ratio from aggregates
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_conflict_values(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateRamFilter']()
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': self.context,
+ 'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
+ 'capabilities': capabilities, 'service': service})
+ self._create_aggregate_with_host(name='fake_aggregate1',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': '1.5'})
+ self._create_aggregate_with_host(name='fake_aggregate2',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': '2.0'})
+ # use the minimum ratio from aggregates
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
+
def test_disk_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
@@ -1310,6 +1368,52 @@ class HostFiltersTestCase(test.NoDBTestCase):
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ def test_aggregate_core_filter_value_error(self):
+ filt_cls = self.class_map['AggregateCoreFilter']()
+ filter_properties = {'context': self.context,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'cpu_allocation_ratio': 'XXX'})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 2, host.limits['vcpu'])
+
+ def test_aggregate_core_filter_default_value(self):
+ filt_cls = self.class_map['AggregateCoreFilter']()
+ filter_properties = {'context': self.context,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ # False: fallback to default flag w/o aggregates
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'cpu_allocation_ratio': '3'})
+ # True: use ratio from aggregates
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 3, host.limits['vcpu'])
+
+ def test_aggregate_core_filter_conflict_values(self):
+ filt_cls = self.class_map['AggregateCoreFilter']()
+ filter_properties = {'context': self.context,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ self._create_aggregate_with_host(name='fake_aggregate1',
+ hosts=['host1'],
+ metadata={'cpu_allocation_ratio': '2'})
+ self._create_aggregate_with_host(name='fake_aggregate2',
+ hosts=['host1'],
+ metadata={'cpu_allocation_ratio': '3'})
+ # use the minimum ratio from aggregates
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 2, host.limits['vcpu'])
+
@staticmethod
def _make_zone_request(zone, is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
diff --git a/nova/tests/servicegroup/test_mc_servicegroup.py b/nova/tests/servicegroup/test_mc_servicegroup.py
index 9d5601ff8..9f27ecc31 100644
--- a/nova/tests/servicegroup/test_mc_servicegroup.py
+++ b/nova/tests/servicegroup/test_mc_servicegroup.py
@@ -103,15 +103,9 @@ class MemcachedServiceGroupTestCase(test.TestCase):
ServiceFixture(host3, self._binary, self._topic)).serv
serv3.start()
- service_ref1 = db.service_get_by_args(self._ctx,
- host1,
- self._binary)
- service_ref2 = db.service_get_by_args(self._ctx,
- host2,
- self._binary)
- service_ref3 = db.service_get_by_args(self._ctx,
- host3,
- self._binary)
+ db.service_get_by_args(self._ctx, host1, self._binary)
+ db.service_get_by_args(self._ctx, host2, self._binary)
+ db.service_get_by_args(self._ctx, host3, self._binary)
host1key = str("%s:%s" % (self._topic, host1))
host2key = str("%s:%s" % (self._topic, host2))
@@ -198,9 +192,7 @@ class MemcachedServiceGroupTestCase(test.TestCase):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
+ db.service_get_by_args(self._ctx, self._host, self._binary)
self.servicegroup_api = servicegroup.API()
# updating model_disconnected
diff --git a/nova/tests/servicegroup/test_zk_driver.py b/nova/tests/servicegroup/test_zk_driver.py
index eb6ddd4bf..716f62e4e 100644
--- a/nova/tests/servicegroup/test_zk_driver.py
+++ b/nova/tests/servicegroup/test_zk_driver.py
@@ -40,7 +40,7 @@ class ZKServiceGroupTestCase(test.TestCase):
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
- _unused = zk.ZooKeeperDriver()
+ zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
index 3923dd9a1..0c58cd08a 100644
--- a/nova/tests/test_availability_zones.py
+++ b/nova/tests/test_availability_zones.py
@@ -179,8 +179,8 @@ class AvailabilityZoneTestCases(test.TestCase):
disabled=False)
service4 = self._create_service_with_topic('compute', 'host4',
disabled=True)
- service5 = self._create_service_with_topic('compute', 'host5',
- disabled=True)
+ self._create_service_with_topic('compute', 'host5',
+ disabled=True)
self._add_to_aggregate(service1, self.agg)
self._add_to_aggregate(service2, self.agg)
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 98cc1c3f9..eefc1bab6 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -153,7 +153,7 @@ class CinderTestCase(test.TestCase):
self.fake_client_factory.assert_called(*args, **kwargs)
def test_context_with_catalog(self):
- volume = self.api.get(self.context, '1234')
+ self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEquals(
self.fake_client_factory.client.client.management_url,
@@ -163,7 +163,7 @@ class CinderTestCase(test.TestCase):
self.flags(
cinder_endpoint_template='http://other_host:8776/v1/%(project_id)s'
)
- volume = self.api.get(self.context, '1234')
+ self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEquals(
self.fake_client_factory.client.client.management_url,
@@ -183,7 +183,7 @@ class CinderTestCase(test.TestCase):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(cinder_api_insecure=True)
- volume = self.api.get(self.context, '1234')
+ self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEquals(
self.fake_client_factory.client.client.verify_cert, False)
@@ -191,7 +191,7 @@ class CinderTestCase(test.TestCase):
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cinder_ca_certificates_file=cacert)
- volume = self.api.get(self.context, '1234')
+ self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEquals(
self.fake_client_factory.client.client.verify_cert, cacert)
@@ -199,7 +199,7 @@ class CinderTestCase(test.TestCase):
def test_cinder_http_retries(self):
retries = 42
self.flags(cinder_http_retries=retries)
- volume = self.api.get(self.context, '1234')
+ self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEquals(
self.fake_client_factory.client.client.retries, retries)
diff --git a/nova/tests/test_flavors.py b/nova/tests/test_flavors.py
index bd3f805cd..6edbddb21 100644
--- a/nova/tests/test_flavors.py
+++ b/nova/tests/test_flavors.py
@@ -69,11 +69,8 @@ class InstanceTypeTestCase(test.TestCase):
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
- type_ref = flavors.create('some flavor', 256, 1, 120, 100,
- flavorid=flavor_id)
- access_ref = flavors.add_flavor_access(flavor_id,
- project_id,
- ctxt=ctxt)
+ flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
+ flavors.add_flavor_access(flavor_id, project_id, ctxt=ctxt)
self.assertRaises(exception.FlavorAccessExists,
flavors.add_flavor_access,
flavor_id, project_id, ctxt)
@@ -92,10 +89,8 @@ class InstanceTypeTestCase(test.TestCase):
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
- type_ref = flavors.create('some flavor', 256, 1, 120, 100,
- flavorid=flavor_id)
- access_ref = flavors.add_flavor_access(flavor_id, project_id,
- ctxt)
+ flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
+ flavors.add_flavor_access(flavor_id, project_id, ctxt)
flavors.remove_flavor_access(flavor_id, project_id, ctxt)
projects = flavors.get_flavor_access_by_flavor_id(flavor_id,
@@ -107,8 +102,7 @@ class InstanceTypeTestCase(test.TestCase):
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
- type_ref = flavors.create('some flavor', 256, 1, 120, 100,
- flavorid=flavor_id)
+ flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
self.assertRaises(exception.FlavorAccessNotFound,
flavors.remove_flavor_access,
flavor_id, project_id, ctxt=ctxt)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 8cdc3e7af..2aece7ac8 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -71,6 +71,7 @@ INSTANCES = (
'info_cache': {'network_info': []},
'hostname': 'test.novadomain',
'display_name': 'my_displayname',
+ 'metadata': {}
},
)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index be3669958..37009f3df 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -227,7 +227,7 @@ class QuotaIntegrationTestCase(test.TestCase):
timeutils.advance_time_seconds(80)
- result = quota.QUOTAS.expire(self.context)
+ quota.QUOTAS.expire(self.context)
assertInstancesReserved(0)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 609280261..ada649549 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -193,6 +193,92 @@ class GetFromPathTestCase(test.TestCase):
self.assertEquals(['b_1'], f(input, "a/b"))
+class GetMyIP4AddressTestCase(test.TestCase):
+ def test_get_my_ipv4_address_with_no_ipv4(self):
+ response = """172.16.0.0/16 via 172.16.251.13 dev tun1
+172.16.251.1 via 172.16.251.13 dev tun1
+172.16.251.13 dev tun1 proto kernel scope link src 172.16.251.14
+172.24.0.0/16 via 172.16.251.13 dev tun1
+192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1"""
+
+ def fake_execute(*args, **kwargs):
+ return response, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '127.0.0.1')
+
+ def test_get_my_ipv4_address_bad_process(self):
+ def fake_execute(*args, **kwargs):
+ raise processutils.ProcessExecutionError()
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '127.0.0.1')
+
+ def test_get_my_ipv4_address_with_single_interface(self):
+ response_route = """default via 192.168.1.1 dev wlan0 proto static
+192.168.1.0/24 dev wlan0 proto kernel scope link src 192.168.1.137 metric 9
+"""
+ response_addr = """
+1: lo inet 127.0.0.1/8 scope host lo
+3: wlan0 inet 192.168.1.137/24 brd 192.168.1.255 scope global wlan0
+"""
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '192.168.1.137')
+
+ def test_get_my_ipv4_address_with_multi_ipv4_on_single_interface(self):
+ response_route = """
+172.18.56.0/24 dev customer proto kernel scope link src 172.18.56.22
+169.254.0.0/16 dev customer scope link metric 1031
+default via 172.18.56.1 dev customer
+"""
+ response_addr = (""
+"31: customer inet 172.18.56.22/24 brd 172.18.56.255 scope global"
+" customer\n"
+"31: customer inet 172.18.56.32/24 brd 172.18.56.255 scope global "
+"secondary customer")
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '172.18.56.22')
+
+ def test_get_my_ipv4_address_with_multiple_interfaces(self):
+ response_route = """
+169.1.9.0/24 dev eth1 proto kernel scope link src 169.1.9.10
+172.17.248.0/21 dev eth0 proto kernel scope link src 172.17.255.9
+169.254.0.0/16 dev eth0 scope link metric 1002
+169.254.0.0/16 dev eth1 scope link metric 1003
+default via 172.17.248.1 dev eth0 proto static
+"""
+ response_addr = """
+1: lo inet 127.0.0.1/8 scope host lo
+2: eth0 inet 172.17.255.9/21 brd 172.17.255.255 scope global eth0
+3: eth1 inet 169.1.9.10/24 scope global eth1
+"""
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '172.17.255.9')
+
+
class GenericUtilsTestCase(test.TestCase):
def test_parse_server_string(self):
result = utils.parse_server_string('::1')
diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py
index 3c658a7f5..5636e1706 100644
--- a/nova/tests/virt/libvirt/test_libvirt.py
+++ b/nova/tests/virt/libvirt/test_libvirt.py
@@ -26,6 +26,7 @@ import re
import shutil
import tempfile
+from eventlet import greenthread
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
@@ -43,6 +44,7 @@ from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import loopingcall
+from nova.openstack.common import processutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_network
@@ -77,6 +79,7 @@ CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
+CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
@@ -286,8 +289,9 @@ class LibvirtConnTestCase(test.TestCase):
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
- self.flags(instances_path='')
- self.flags(libvirt_snapshots_directory='')
+ temp_dir = self.useFixture(fixtures.TempDir()).path
+ self.flags(instances_path=temp_dir)
+ self.flags(libvirt_snapshots_directory=temp_dir)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
@@ -343,6 +347,9 @@ class LibvirtConnTestCase(test.TestCase):
'extra_specs': {},
'system_metadata': sys_meta}
+ def relpath(self, path):
+ return os.path.relpath(path, CONF.instances_path)
+
def tearDown(self):
nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
@@ -589,8 +596,8 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- # make configdrive.enabled_for() return True
- instance_ref['config_drive'] = 'ANY_ID'
+ # make configdrive.required_by() return True
+ instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
@@ -2078,8 +2085,8 @@ class LibvirtConnTestCase(test.TestCase):
else:
suffix = ''
if expect_kernel:
- check = (lambda t: t.find('./os/kernel').text.split(
- '/')[1], 'kernel' + suffix)
+ check = (lambda t: self.relpath(t.find('./os/kernel').text).
+ split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
@@ -2094,8 +2101,8 @@ class LibvirtConnTestCase(test.TestCase):
check_list.append(check)
if expect_ramdisk:
- check = (lambda t: t.find('./os/initrd').text.split(
- '/')[1], 'ramdisk' + suffix)
+ check = (lambda t: self.relpath(t.find('./os/initrd').text).
+ split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
@@ -2146,8 +2153,9 @@ class LibvirtConnTestCase(test.TestCase):
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
- check = (lambda t: t.findall('./devices/serial/source')[0].get(
- 'path').split('/')[1], 'console.log')
+ check = (lambda t: self.relpath(t.findall(
+ './devices/serial/source')[0].get('path')).
+ split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
@@ -2159,16 +2167,16 @@ class LibvirtConnTestCase(test.TestCase):
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
- (lambda t: t.findall('./devices/disk/source')[0].get(
- 'file').split('/')[1], 'disk.rescue'),
- (lambda t: t.findall('./devices/disk/source')[1].get(
- 'file').split('/')[1], 'disk')]
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[0].
+ get('file')).split('/')[1], 'disk.rescue'),
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[1].
+ get('file')).split('/')[1], 'disk')]
else:
- common_checks += [(lambda t: t.findall(
- './devices/disk/source')[0].get('file').split('/')[1],
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
- common_checks += [(lambda t: t.findall(
- './devices/disk/source')[1].get('file').split('/')[1],
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
@@ -2683,6 +2691,80 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(self.context, instance_ref['uuid'])
+ def test_get_instance_disk_info_excludes_volumes(self):
+ # Test data
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume1'/>"
+ "<target dev='vdc' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume2'/>"
+ "<target dev='vdd' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ GB = 1024 * 1024 * 1024
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': [
+ {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
+ {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'],
+ block_device_info=info)
+ info = jsonutils.loads(info)
+ self.assertEquals(info[0]['type'], 'raw')
+ self.assertEquals(info[0]['path'], '/test/disk')
+ self.assertEquals(info[0]['disk_size'], 10737418240)
+ self.assertEquals(info[0]['backing_file'], "")
+ self.assertEquals(info[0]['over_committed_disk_size'], 0)
+ self.assertEquals(info[1]['type'], 'qcow2')
+ self.assertEquals(info[1]['path'], '/test/disk.local')
+ self.assertEquals(info[1]['virt_disk_size'], 21474836480)
+ self.assertEquals(info[1]['backing_file'], "file")
+ self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
+
+ db.instance_destroy(self.context, instance_ref['uuid'])
+
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
@@ -3165,6 +3247,90 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
+ def test_reboot_different_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(libvirt_wait_soft_reboot_seconds=1)
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_create_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_other_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ conn.reboot(None, instance, [])
+ self.assertTrue(self.reboot_create_called)
+
+ def test_reboot_same_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(libvirt_wait_soft_reboot_seconds=1)
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_hard_reboot_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_hard_reboot(*args, **kwargs):
+ self.reboot_hard_reboot_called = True
+
+ def fake_sleep(interval):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(greenthread, 'sleep', fake_sleep)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ conn.reboot(None, instance, [])
+ self.assertTrue(self.reboot_hard_reboot_called)
+
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
@@ -3931,6 +4097,88 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
+ def test_create_domain_define_xml_fails(self):
+ """
+ Tests that the xml is logged when defining the domain fails.
+ """
+ fake_xml = "<test>this is a test</test>"
+
+ def fake_defineXML(xml):
+ self.assertEquals(fake_xml, xml)
+ raise libvirt.libvirtError('virDomainDefineXML() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg):
+ self.log_error_called = True
+ self.assertTrue(fake_xml in msg)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock(defineXML=fake_defineXML)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain, fake_xml)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_with_flags_fails(self):
+ """
+ Tests that the xml is logged when creating the domain with flags fails.
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_createWithFlags(launch_flags):
+ raise libvirt.libvirtError('virDomainCreateWithFlags() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg):
+ self.log_error_called = True
+ self.assertTrue(fake_xml in msg)
+
+ self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain,
+ domain=fake_domain)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_enable_hairpin_fails(self):
+ """
+ Tests that the xml is logged when enabling hairpin mode for the domain
+ fails.
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_enable_hairpin(launch_flags):
+ raise processutils.ProcessExecutionError('error')
+
+ self.log_error_called = False
+
+ def fake_error(msg):
+ self.log_error_called = True
+ self.assertTrue(fake_xml in msg)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(conn, '_enable_hairpin', fake_enable_hairpin)
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn._create_domain,
+ domain=fake_domain,
+ power_on=False)
+ self.assertTrue(self.log_error_called)
+
class HostStateTestCase(test.TestCase):
@@ -4945,10 +5193,9 @@ class LibvirtDriverTestCase(test.TestCase):
inst['host'] = 'host1'
inst['root_gb'] = 10
inst['ephemeral_gb'] = 20
- inst['config_drive'] = 1
+ inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
- inst['config_drive_id'] = 1
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = sys_meta
@@ -4963,7 +5210,8 @@ class LibvirtDriverTestCase(test.TestCase):
self.counter = 0
self.checked_shared_storage = False
- def fake_get_instance_disk_info(instance, xml=None):
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
return '[]'
def fake_destroy(instance):
@@ -5015,7 +5263,8 @@ class LibvirtDriverTestCase(test.TestCase):
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
- def fake_get_instance_disk_info(instance, xml=None):
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
return disk_info_text
def fake_destroy(instance):
diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py
index 5c2df9854..6cc9bef43 100644
--- a/nova/tests/virt/test_virt_drivers.py
+++ b/nova/tests/virt/test_virt_drivers.py
@@ -60,7 +60,7 @@ class _FakeDriverBackendTestCase(object):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
# default instances_path which doesn't exist
- self.flags(instances_path='')
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# Put fakelibvirt in place
if 'libvirt' in sys.modules:
@@ -106,7 +106,8 @@ class _FakeDriverBackendTestCase(object):
def fake_make_drive(_self, _path):
pass
- def fake_get_instance_disk_info(_self, instance, xml=None):
+ def fake_get_instance_disk_info(_self, instance, xml=None,
+ block_device_info=None):
return '[]'
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi.py b/nova/tests/virt/vmwareapi/test_vmwareapi.py
index 62db10535..afda26b0d 100644
--- a/nova/tests/virt/vmwareapi/test_vmwareapi.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi.py
@@ -531,7 +531,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._create_instance_in_the_db()
self._create_vm()
vnc_dict = self.conn.get_vnc_console(self.instance)
- self.assertEquals(vnc_dict['host'], "test_url")
+ self.assertEquals(vnc_dict['host'], "ha-host")
self.assertEquals(vnc_dict['port'], 5910)
def test_host_ip_addr(self):
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py b/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py
index 123a314c1..0456dfece 100644
--- a/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py
@@ -16,6 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from collections import namedtuple
+
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
@@ -33,9 +35,11 @@ class fake_session(object):
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
+ fake.reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
+ fake.reset()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
@@ -54,3 +58,95 @@ class VMwareVMUtilTestCase(test.TestCase):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
+
+ def test_get_host_ref_from_id(self):
+
+ fake_host_sys = fake.HostSystem(
+ fake.ManagedObjectReference("HostSystem", "host-123"))
+
+ fake_host_id = fake_host_sys.obj.value
+ fake_host_name = "ha-host"
+
+ ref = vm_util.get_host_ref_from_id(
+ fake_session([fake_host_sys]), fake_host_id, ['name'])
+
+ self.assertIsInstance(ref, fake.HostSystem)
+ self.assertEqual(fake_host_id, ref.obj.value)
+
+ host_name = vm_util.get_host_name_from_host_ref(ref)
+
+ self.assertEquals(fake_host_name, host_name)
+
+ def test_get_host_name_for_vm(self):
+
+ fake_vm = fake.ManagedObject(
+ "VirtualMachine", fake.ManagedObjectReference(
+ "vm-123", "VirtualMachine"))
+ fake_vm.propSet.append(
+ fake.Property('name', 'vm-123'))
+
+ vm_ref = vm_util.get_vm_ref_from_name(
+ fake_session([fake_vm]), 'vm-123')
+
+ self.assertIsNotNone(vm_ref)
+
+ fake_results = [
+ fake.ObjectContent(
+ None, [
+ fake.Property('runtime.host',
+ fake.ManagedObjectReference(
+ 'host-123', 'HostSystem'))
+ ])]
+
+ host_id = vm_util.get_host_id_from_vm_ref(
+ fake_session(fake_results), vm_ref)
+
+ self.assertEqual('host-123', host_id)
+
+ def test_property_from_property_set(self):
+
+ ObjectContent = namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = namedtuple('Property', ['name', 'val'])
+ MoRef = namedtuple('Val', ['value'])
+
+ results_good = [
+ ObjectContent(propSet=[
+ DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
+ ObjectContent(propSet=[
+ DynamicProperty(name='foo', val=MoRef(value='bar1')),
+ DynamicProperty(
+ name='runtime.host', val=MoRef(value='host-123')),
+ DynamicProperty(name='foo', val=MoRef(value='bar2')),
+ ]),
+ ObjectContent(propSet=[
+ DynamicProperty(
+ name='something', val=MoRef(value='thing'))]), ]
+
+ results_bad = [
+ ObjectContent(propSet=[
+ DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
+ ObjectContent(propSet=[
+ DynamicProperty(name='foo', val='bar1'),
+ DynamicProperty(name='foo', val='bar2'), ]),
+ ObjectContent(propSet=[
+ DynamicProperty(
+ name='something', val=MoRef(value='thing'))]), ]
+
+ prop = vm_util.property_from_property_set(
+ 'runtime.host', results_good)
+ self.assertIsNotNone(prop)
+ value = prop.val.value
+ self.assertEqual('host-123', value)
+
+ prop2 = vm_util.property_from_property_set(
+ 'runtime.host', results_bad)
+ self.assertIsNone(prop2)
+
+ prop3 = vm_util.property_from_property_set('foo', results_good)
+ self.assertIsNotNone(prop3)
+ val3 = prop3.val.value
+ self.assertEqual('bar1', val3)
+
+ prop4 = vm_util.property_from_property_set('foo', results_bad)
+ self.assertIsNotNone(prop4)
+ self.assertEqual('bar1', prop4.val)
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
index 68caab651..6884ab5a8 100644
--- a/nova/tests/virt/xenapi/test_vm_utils.py
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -266,7 +266,7 @@ class FetchVhdImageTestCase(test.TestCase):
self._apply_stubouts()
self._common_params_setup(True)
- vm_utils._add_bittorrent_params(self.params)
+ vm_utils._add_bittorrent_params(self.image_id, self.params)
vm_utils._fetch_using_dom0_plugin_with_retry(self.context,
self.session, self.image_id, "bittorrent", self.params,
@@ -289,7 +289,7 @@ class FetchVhdImageTestCase(test.TestCase):
self._common_params_setup(True)
self.mox.StubOutWithMock(self.session, 'call_xenapi')
- vm_utils._add_bittorrent_params(self.params)
+ vm_utils._add_bittorrent_params(self.image_id, self.params)
vm_utils._fetch_using_dom0_plugin_with_retry(self.context,
self.session, self.image_id, "bittorrent", self.params,