summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorTushar Patil <tushar.vitthal.patil@gmail.com>2011-09-08 11:47:56 -0700
committerTushar Patil <tushar.vitthal.patil@gmail.com>2011-09-08 11:47:56 -0700
commit229e5fe8a1bb94bdea17b2fd5868a0aac992e2b1 (patch)
tree303b3191cfa4550ee989475a9930918b56984313 /nova/tests
parent4d98408d4ec605ad9a591d5166f2b8ea6e723ecb (diff)
parentbb3b61b61dc4b45fba4bdab7ead69af239eac40a (diff)
Merged trunk
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/openstack/contrib/test_createserverext.py2
-rw-r--r--nova/tests/api/openstack/test_servers.py69
-rw-r--r--nova/tests/test_network.py7
-rw-r--r--nova/tests/test_quantum.py323
-rw-r--r--nova/tests/test_xenapi.py2
5 files changed, 395 insertions, 8 deletions
diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py
index ba8fb925e..078b72d67 100644
--- a/nova/tests/api/openstack/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/contrib/test_createserverext.py
@@ -112,6 +112,8 @@ class CreateserverextTest(test.TestCase):
return [{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID,
+ 'user_id': 'fake',
+ 'project_id': 'fake',
'created_at': "",
'updated_at': ""}]
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 1591ea56c..2ef687709 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -347,6 +347,8 @@ class ServersTest(test.TestCase):
"server": {
"id": 1,
"uuid": FAKE_UUID,
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
@@ -446,6 +448,8 @@ class ServersTest(test.TestCase):
expected = minidom.parseString("""
<server id="1"
uuid="%(expected_uuid)s"
+ userId="fake"
+ tenantId="fake"
xmlns="http://docs.openstack.org/compute/api/v1.1"
xmlns:atom="http://www.w3.org/2005/Atom"
name="server1"
@@ -515,6 +519,8 @@ class ServersTest(test.TestCase):
"server": {
"id": 1,
"uuid": FAKE_UUID,
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
@@ -610,6 +616,8 @@ class ServersTest(test.TestCase):
"server": {
"id": 1,
"uuid": FAKE_UUID,
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
@@ -1199,6 +1207,26 @@ class ServersTest(test.TestCase):
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], 100)
+ def test_tenant_id_filter_converts_to_project_id_for_admin(self):
+ def fake_get_all(context, filters=None):
+ self.assertNotEqual(filters, None)
+ self.assertEqual(filters['project_id'], 'faketenant')
+ self.assertFalse(filters.get('tenant_id'))
+ return [stub_instance(100)]
+
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_filters',
+ fake_get_all)
+ self.flags(allow_admin_api=True)
+
+ req = webob.Request.blank('/v1.1/fake/servers?tenant_id=faketenant')
+ # Use admin context
+ context = nova.context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=context))
+ res_dict = json.loads(res.body)
+ # Failure in fake_get_all returns non 200 status code
+ self.assertEqual(res.status_int, 200)
+
def test_get_servers_allows_flavor_v1_1(self):
def fake_get_all(compute_self, context, search_opts=None):
self.assertNotEqual(search_opts, None)
@@ -1455,6 +1483,8 @@ class ServersTest(test.TestCase):
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': image_ref,
+ 'user_id': 'fake',
+ 'project_id': 'fake',
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": self.config_drive,
@@ -3103,7 +3133,7 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase):
"name": "new-server-test",
"imageRef": "1",
"flavorRef": "1",
- "networks": []
+ "networks": [],
}}
self.assertEquals(request['body'], expected)
@@ -3330,6 +3360,8 @@ class TestServerInstanceCreation(test.TestCase):
self.injected_files = None
return [{'id': '1234', 'display_name': 'fakeinstance',
+ 'user_id': 'fake',
+ 'project_id': 'fake',
'uuid': FAKE_UUID}]
def set_admin_password(self, *args, **kwargs):
@@ -3583,10 +3615,14 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
def test_ami_no_ramdisk(self):
- """If an ami is missing a ramdisk it should raise NotFound"""
+ """If an ami is missing a ramdisk, return kernel ID and None for
+ ramdisk ID
+ """
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'kernel_id': 1}}
- self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
+ kernel_id, ramdisk_id = self._get_k_r(image_meta)
+ self.assertEqual(kernel_id, 1)
+ self.assertEqual(ramdisk_id, None)
def test_ami_kernel_ramdisk_present(self):
"""Return IDs if both kernel and ramdisk are present"""
@@ -3621,8 +3657,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"created_at": created_at,
"updated_at": updated_at,
"admin_pass": "",
- "user_id": "",
- "project_id": "",
+ "user_id": "fake",
+ "project_id": "fake",
"image_ref": "5",
"kernel_id": "",
"ramdisk_id": "",
@@ -3647,7 +3683,6 @@ class ServersViewBuilderV11Test(test.TestCase):
"terminated_at": utils.utcnow(),
"availability_zone": "",
"display_name": "test_server",
- "display_description": "",
"locked": False,
"metadata": [],
"accessIPv4": "1.2.3.4",
@@ -3730,6 +3765,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"server": {
"id": 1,
"uuid": self.instance['uuid'],
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
@@ -3785,6 +3822,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"server": {
"id": 1,
"uuid": self.instance['uuid'],
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
@@ -3841,6 +3880,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"server": {
"id": 1,
"uuid": self.instance['uuid'],
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
@@ -3897,6 +3938,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"server": {
"id": 1,
"uuid": self.instance['uuid'],
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
@@ -3956,6 +3999,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"server": {
"id": 1,
"uuid": self.instance['uuid'],
+ "user_id": "fake",
+ "tenant_id": "fake",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
@@ -4024,6 +4069,8 @@ class ServerXMLSerializationTest(test.TestCase):
fixture = {
"server": {
"id": 1,
+ "user_id": "fake",
+ "tenant_id": "fake",
"uuid": FAKE_UUID,
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
@@ -4161,6 +4208,8 @@ class ServerXMLSerializationTest(test.TestCase):
"server": {
"id": 1,
"uuid": FAKE_UUID,
+ "user_id": "fake",
+ "tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
@@ -4361,6 +4410,8 @@ class ServerXMLSerializationTest(test.TestCase):
{
"id": 1,
"uuid": FAKE_UUID,
+ "user_id": "fake",
+ "tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
@@ -4416,6 +4467,8 @@ class ServerXMLSerializationTest(test.TestCase):
{
"id": 2,
"uuid": FAKE_UUID,
+ "user_id": 'fake',
+ "tenant_id": 'fake',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 100,
@@ -4535,6 +4588,8 @@ class ServerXMLSerializationTest(test.TestCase):
fixture = {
"server": {
"id": 1,
+ "user_id": "fake",
+ "tenant_id": "fake",
"uuid": FAKE_UUID,
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
@@ -4671,6 +4726,8 @@ class ServerXMLSerializationTest(test.TestCase):
"server": {
"id": 1,
"uuid": FAKE_UUID,
+ "user_id": "fake",
+ "tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 25ff940f0..2ae5a35e3 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -118,9 +118,14 @@ vifs = [{'id': 0,
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
- 'network_id': 0,
'network_id': 1,
'network': FakeModel(**networks[1]),
+ 'instance_id': 0},
+ {'id': 2,
+ 'address': 'DE:AD:BE:EF:00:02',
+ 'uuid': '00000000-0000-0000-0000-0000000000000002',
+ 'network_id': 2,
+ 'network': None,
'instance_id': 0}]
diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py
new file mode 100644
index 000000000..0feec9b99
--- /dev/null
+++ b/nova/tests/test_quantum.py
@@ -0,0 +1,323 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Nicira, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova.db.sqlalchemy.session import get_session
+from nova import exception
+from nova import ipv6
+from nova import log as logging
+from nova.network.quantum import manager as quantum_manager
+from nova import test
+from nova import utils
+
+LOG = logging.getLogger('nova.tests.quantum_network')
+
+
+# this class can be used for unit functional/testing on nova,
+# as it does not actually make remote calls to the Quantum service
+class FakeQuantumClientConnection(object):
+
+ def __init__(self):
+ self.nets = {}
+
+ def get_networks_for_tenant(self, tenant_id):
+ net_ids = []
+ for net_id, n in self.nets.items():
+ if n['tenant-id'] == tenant_id:
+ net_ids.append(net_id)
+ return net_ids
+
+ def create_network(self, tenant_id, network_name):
+
+ uuid = str(utils.gen_uuid())
+ self.nets[uuid] = {'net-name': network_name,
+ 'tenant-id': tenant_id,
+ 'ports': {}}
+ return uuid
+
+ def delete_network(self, tenant_id, net_id):
+ if self.nets[net_id]['tenant-id'] == tenant_id:
+ del self.nets[net_id]
+
+ def network_exists(self, tenant_id, net_id):
+ try:
+ return self.nets[net_id]['tenant-id'] == tenant_id
+ except KeyError:
+ return False
+
+ def _confirm_not_attached(self, interface_id):
+ for n in self.nets.values():
+ for p in n['ports'].values():
+ if p['attachment-id'] == interface_id:
+ raise Exception(_("interface '%s' is already attached" %
+ interface_id))
+
+ def create_and_attach_port(self, tenant_id, net_id, interface_id):
+ if not self.network_exists(tenant_id, net_id):
+ raise Exception(
+ _("network %(net_id)s does not exist for tenant %(tenant_id)"
+ % locals()))
+
+ self._confirm_not_attached(interface_id)
+ uuid = str(utils.gen_uuid())
+ self.nets[net_id]['ports'][uuid] = \
+ {"port-state": "ACTIVE",
+ "attachment-id": interface_id}
+
+ def detach_and_delete_port(self, tenant_id, net_id, port_id):
+ if not self.network_exists(tenant_id, net_id):
+ raise exception.NotFound(
+ _("network %(net_id)s does not exist "
+ "for tenant %(tenant_id)s" % locals()))
+ del self.nets[net_id]['ports'][port_id]
+
+ def get_port_by_attachment(self, tenant_id, attachment_id):
+ for net_id, n in self.nets.items():
+ if n['tenant-id'] == tenant_id:
+ for port_id, p in n['ports'].items():
+ if p['attachment-id'] == attachment_id:
+ return (net_id, port_id)
+
+ return (None, None)
+
+networks = [{'label': 'project1-net1',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': '2001:1db8::/64',
+ 'gateway_v6': '2001:1db8::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': None,
+ 'bridge_interface': None,
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'vlan': None,
+ 'host': None,
+ 'vpn_public_address': None,
+ 'project_id': 'fake_project1',
+ 'priority': 1},
+ {'label': 'project2-net1',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '2001:1db9::/64',
+ 'gateway_v6': '2001:1db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': None,
+ 'bridge_interface': None,
+ 'gateway': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project2',
+ 'priority': 1},
+ {'label': "public",
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '10.0.0.0/24',
+ 'cidr_v6': '2001:1dba::/64',
+ 'gateway_v6': '2001:1dba::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': None,
+ 'bridge_interface': None,
+ 'gateway': '10.0.0.1',
+ 'broadcast': '10.0.0.255',
+ 'dns1': '10.0.0.1',
+ 'dns2': '10.0.0.2',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': None,
+ 'priority': 0},
+ {'label': "project2-net2",
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '9.0.0.0/24',
+ 'cidr_v6': '2001:1dbb::/64',
+ 'gateway_v6': '2001:1dbb::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': None,
+ 'bridge_interface': None,
+ 'gateway': '9.0.0.1',
+ 'broadcast': '9.0.0.255',
+ 'dns1': '9.0.0.1',
+ 'dns2': '9.0.0.2',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': "fake_project2",
+ 'priority': 2}]
+
+
+# this is a base class to be used by all other Quantum Test classes
+class QuantumTestCaseBase(object):
+
+ def test_create_and_delete_nets(self):
+ self._create_nets()
+ self._delete_nets()
+
+ def _create_nets(self):
+ for n in networks:
+ ctx = context.RequestContext('user1', n['project_id'])
+ self.net_man.create_networks(ctx,
+ label=n['label'], cidr=n['cidr'],
+ multi_host=n['multi_host'],
+ num_networks=1, network_size=256, cidr_v6=n['cidr_v6'],
+ gateway_v6=n['gateway_v6'], bridge=None,
+ bridge_interface=None, dns1=n['dns1'],
+ dns2=n['dns2'], project_id=n['project_id'],
+ priority=n['priority'])
+
+ def _delete_nets(self):
+ for n in networks:
+ ctx = context.RequestContext('user1', n['project_id'])
+ self.net_man.delete_network(ctx, n['cidr'])
+
+ def test_allocate_and_deallocate_instance_static(self):
+ self._create_nets()
+
+ project_id = "fake_project1"
+ ctx = context.RequestContext('user1', project_id)
+
+ instance_ref = db.api.instance_create(ctx,
+ {"project_id": project_id})
+ nw_info = self.net_man.allocate_for_instance(ctx,
+ instance_id=instance_ref['id'], host="",
+ instance_type_id=instance_ref['instance_type_id'],
+ project_id=project_id)
+
+ self.assertEquals(len(nw_info), 2)
+
+ # we don't know which order the NICs will be in until we
+ # introduce the notion of priority
+ # v4 cidr
+ self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
+ self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
+
+ # v4 address
+ self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
+ self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
+
+ # v6 cidr
+ self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
+ self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
+
+ # v6 address
+ self.assertTrue(
+ nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
+ self.assertTrue(
+ nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
+
+ self.net_man.deallocate_for_instance(ctx,
+ instance_id=instance_ref['id'],
+ project_id=project_id)
+
+ self._delete_nets()
+
+ def test_allocate_and_deallocate_instance_dynamic(self):
+ self._create_nets()
+ project_id = "fake_project2"
+ ctx = context.RequestContext('user1', project_id)
+
+ net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
+ requested_networks = [(net_id, None) for net_id in net_ids]
+
+ self.net_man.validate_networks(ctx, requested_networks)
+
+ instance_ref = db.api.instance_create(ctx,
+ {"project_id": project_id})
+ nw_info = self.net_man.allocate_for_instance(ctx,
+ instance_id=instance_ref['id'], host="",
+ instance_type_id=instance_ref['instance_type_id'],
+ project_id=project_id,
+ requested_networks=requested_networks)
+
+ self.assertEquals(len(nw_info), 2)
+
+ # we don't know which order the NICs will be in until we
+ # introduce the notion of priority
+ # v4 cidr
+ self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
+ nw_info[1][0]['cidr'].startswith("9."))
+ self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
+ nw_info[1][0]['cidr'].startswith("192."))
+
+ # v4 address
+ self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
+ nw_info[1][1]['ips'][0]['ip'].startswith("9."))
+ self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
+ nw_info[1][1]['ips'][0]['ip'].startswith("192."))
+
+ # v6 cidr
+ self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
+ nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
+ self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
+ nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
+
+ # v6 address
+ self.assertTrue(
+ nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
+ nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
+ self.assertTrue(
+ nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
+ nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
+
+ self.net_man.deallocate_for_instance(ctx,
+ instance_id=instance_ref['id'],
+ project_id=project_id)
+
+ self._delete_nets()
+
+ def test_validate_bad_network(self):
+ ctx = context.RequestContext('user1', 'fake_project1')
+ self.assertRaises(exception.NetworkNotFound,
+ self.net_man.validate_networks, ctx, [("", None)])
+
+
+class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase):
+
+ def setUp(self):
+ super(QuantumNovaIPAMTestCase, self).setUp()
+
+ self.net_man = quantum_manager.QuantumManager(
+ ipam_lib="nova.network.quantum.nova_ipam_lib",
+ q_conn=FakeQuantumClientConnection())
+
+ # Tests seem to create some networks by default, which
+ # we don't want. So we delete them.
+
+ ctx = context.RequestContext('user1', 'fake_project1').elevated()
+ for n in db.network_get_all(ctx):
+ db.network_delete_safe(ctx, n['id'])
+
+ # Other unit tests (e.g., test_compute.py) have a nasty
+ # habit of of creating fixed IPs and not cleaning up, which
+ # can confuse these tests, so we remove all existing fixed
+ # ips before starting.
+ session = get_session()
+ result = session.query(models.FixedIp).all()
+ with session.begin():
+ for fip_ref in result:
+ session.delete(fip_ref)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 45dad3516..91b4161b0 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -494,6 +494,7 @@ class XenAPIVMTestCase(test.TestCase):
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
+ self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -611,7 +612,6 @@ class XenAPIVMTestCase(test.TestCase):
str(3 * 1024))
def test_rescue(self):
- self.flags(flat_injected=False)
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
conn.rescue(self.context, instance, None, [])