summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-08-29 18:53:47 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-08-29 18:53:47 -0700
commitfab0bbaca8d6cf34f131c4426463bf5c76a0477f (patch)
tree5166031c79cfb87905d1a12f3626ff95badb3d4c
parent74e5e817905322e609870e60ce55863f35ce7893 (diff)
downloadnova-fab0bbaca8d6cf34f131c4426463bf5c76a0477f.tar.gz
nova-fab0bbaca8d6cf34f131c4426463bf5c76a0477f.tar.xz
nova-fab0bbaca8d6cf34f131c4426463bf5c76a0477f.zip
tests pass
-rwxr-xr-xbin/nova-dhcpbridge21
-rw-r--r--nova/auth/manager.py4
-rw-r--r--nova/compute/service.py174
-rw-r--r--nova/db/api.py30
-rw-r--r--nova/db/sqlalchemy/api.py7
-rw-r--r--nova/endpoint/cloud.py2
-rw-r--r--nova/manager.py2
-rw-r--r--nova/service.py10
-rw-r--r--nova/tests/cloud_unittest.py4
-rw-r--r--nova/tests/compute_unittest.py3
-rw-r--r--nova/tests/fake_flags.py10
-rw-r--r--nova/tests/network_unittest.py14
-rw-r--r--nova/tests/service_unittest.py19
-rw-r--r--nova/tests/volume_unittest.py39
-rw-r--r--nova/volume/driver.py5
-rw-r--r--nova/volume/manager.py4
16 files changed, 103 insertions, 245 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 6747a3a0e..52ec2d497 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -34,23 +34,23 @@ from nova import db
from nova import flags
from nova import rpc
from nova import utils
-from nova.network import linux_net
-from nova.network import service
from nova import datastore # for redis_db flag
from nova.auth import manager # for auth flags
+from nova.network import manager # for network flags
FLAGS = flags.FLAGS
-
def add_lease(_mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
logging.debug("leasing ip")
- service.VlanNetworkService().lease_fixed_ip(ip_address)
+ network_manager = utils.import_object(FLAGS.network_manager)
+ network_manager.lease_fixed_ip(None, ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
{"method": "lease_fixed_ip",
- "args": {"address": ip_address}})
+ "args": {"context": None,
+ "address": ip_address}})
def old_lease(_mac, _ip_address, _hostname, _interface):
@@ -62,20 +62,24 @@ def del_lease(_mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
logging.debug("releasing ip")
- service.VlanNetworkService().release_fixed_ip(ip_address)
+ network_manager = utils.import_object(FLAGS.network_manager)
+ network_manager.release_fixed_ip(None, ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
{"method": "release_fixed_ip",
- "args": {"address": ip_address}})
+ "args": {"context": None,
+ "address": ip_address}})
def init_leases(interface):
"""Get the list of hosts for an interface."""
network_ref = db.network_get_by_bridge(None, interface)
- return linux_net.get_dhcp_hosts(None, network_ref['id'])
+ network_manager = utils.import_object(FLAGS.network_manager)
+ return network_manager.driver.get_dhcp_hosts(None, network_ref['id'])
def main():
+ global network_manager
"""Parse environment and arguments and call the approproate action."""
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
@@ -93,7 +97,6 @@ def main():
'..',
'_trial_temp',
'nova.sqlite'))
- print path
FLAGS.sql_connection = 'sqlite:///%s' % path
#FLAGS.sql_connection = 'mysql://root@localhost/test'
action = argv[1]
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index a072a143b..62ec3f4e4 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -252,6 +252,7 @@ class AuthManager(object):
__init__ is run every time AuthManager() is called, so we only
reset the driver if it is not set or a new driver is specified.
"""
+ self.network_manager = utils.import_object(FLAGS.network_manager)
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
@@ -525,7 +526,8 @@ class AuthManager(object):
if project_dict:
project = Project(**project_dict)
try:
- db.network_allocate(context, project.id)
+ self.network_manager.allocate_network(context,
+ project.id)
except:
drv.delete_project(project.id)
raise
diff --git a/nova/compute/service.py b/nova/compute/service.py
index 877246ef6..9bf498d03 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -17,182 +17,16 @@
# under the License.
"""
-Compute Service:
-
- Runs on each compute host, managing the
- hypervisor using the virt module.
-
+Compute service allows rpc calls to the compute manager and reports state
+to the database.
"""
-import base64
-import logging
-import os
-
-from twisted.internet import defer
-
-from nova import db
-from nova import exception
-from nova import flags
-from nova import process
from nova import service
-from nova import utils
-from nova.compute import power_state
-from nova.network import service as network_service
-from nova.virt import connection as virt_connection
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('instances_path', utils.abspath('../instances'),
- 'where instances are stored on disk')
class ComputeService(service.Service):
"""
- Manages the running instances.
+ Compute Service automatically passes commands on to the Compute Manager
"""
- def __init__(self):
- """Load configuration options and connect to the hypervisor."""
- super(ComputeService, self).__init__()
- self._instances = {}
- self._conn = virt_connection.get_connection()
- # TODO(joshua): This needs to ensure system state, specifically
- # modprobe aoe
-
- def noop(self):
- """Simple test of an AMQP message call."""
- return defer.succeed('PONG')
-
- def update_state(self, instance_id, context):
- # FIXME(ja): include other fields from state?
- instance_ref = db.instance_get(context, instance_id)
- state = self._conn.get_info(instance_ref.name)['state']
- db.instance_state(context, instance_id, state)
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def run_instance(self, instance_id, context=None, **_kwargs):
- """Launch a new instance with specified options."""
- instance_ref = db.instance_get(context, instance_id)
- if instance_ref['str_id'] in self._conn.list_instances():
- raise exception.Error("Instance has already been created")
- logging.debug("Starting instance %s..." % (instance_id))
-
- network_service.setup_compute_network(context, instance_ref['project_id'])
- db.instance_update(context, instance_id, {'node_name': FLAGS.node_name})
-
- # TODO(vish) check to make sure the availability zone matches
- db.instance_state(context, instance_id, power_state.NOSTATE, 'spawning')
-
- try:
- yield self._conn.spawn(instance_ref)
- except:
- logging.exception("Failed to spawn instance %s" %
- instance_ref['str_id'])
- db.instance_state(context, instance_id, power_state.SHUTDOWN)
-
- self.update_state(instance_id, context)
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def terminate_instance(self, instance_id, context=None):
- """Terminate an instance on this machine."""
- logging.debug("Got told to terminate instance %s" % instance_id)
- instance_ref = db.instance_get(context, instance_id)
-
- if instance_ref['state'] == power_state.SHUTOFF:
- # self.datamodel.destroy() FIXME: RE-ADD?
- raise exception.Error('trying to destroy already destroyed'
- ' instance: %s' % instance_id)
-
- db.instance_state(
- context, instance_id, power_state.NOSTATE, 'shutting_down')
- yield self._conn.destroy(instance_ref)
-
- # FIXME(ja): should we keep it in a terminated state for a bit?
- db.instance_destroy(context, instance_id)
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def reboot_instance(self, instance_id, context=None):
- """Reboot an instance on this server.
-
- KVM doesn't support reboot, so we terminate and restart.
-
- """
- self.update_state(instance_id, context)
- instance_ref = db.instance_get(context, instance_id)
-
- # FIXME(ja): this is only checking the model state - not state on disk?
- if instance_ref['state'] != power_state.RUNNING:
- raise exception.Error(
- 'trying to reboot a non-running'
- 'instance: %s (state: %s excepted: %s)' %
- (instance_ref['str_id'],
- instance_ref['state'],
- power_state.RUNNING))
-
- logging.debug('rebooting instance %s' % instance_ref['str_id'])
- db.instance_state(
- context, instance_id, power_state.NOSTATE, 'rebooting')
- yield self._conn.reboot(instance_ref)
- self.update_state(instance_id, context)
-
- @exception.wrap_exception
- def get_console_output(self, instance_id, context=None):
- """Send the console output for an instance."""
- # FIXME: Abstract this for Xen
-
- logging.debug("Getting console output for %s" % (instance_id))
- instance_ref = db.instance_get(context, instance_id)
-
- if FLAGS.connection_type == 'libvirt':
- fname = os.path.abspath(os.path.join(FLAGS.instances_path,
- instance_ref['str_id'],
- 'console.log'))
- with open(fname, 'r') as f:
- output = f.read()
- else:
- output = 'FAKE CONSOLE OUTPUT'
-
- # TODO(termie): this stuff belongs in the API layer, no need to
- # munge the data we send to ourselves
- output = {"InstanceId" : instance_id,
- "Timestamp" : "2",
- "output" : base64.b64encode(output)}
- return output
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def attach_volume(self, instance_id=None, volume_id=None, mountpoint=None,
- context=None):
- """Attach a volume to an instance."""
- # TODO(termie): check that instance_id exists
- volume_ref = db.volume_get(context, volume_id)
- yield self._init_aoe()
- yield process.simple_execute(
- "sudo virsh attach-disk %s /dev/etherd/%s %s" %
- (instance_id,
- volume_ref['aoe_device'],
- mountpoint.rpartition('/dev/')[2]))
- db.volume_attached(context, volume_id)
- defer.returnValue(True)
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def detach_volume(self, instance_id, volume_id, context=None):
- """Detach a volume from an instance."""
- # despite the documentation, virsh detach-disk just wants the device
- # name without the leading /dev/
- # TODO(termie): check that instance_id exists
- volume_ref = db.volume_get(context, volume_id)
- target = volume_ref['mountpoint'].rpartition('/dev/')[2]
- yield process.simple_execute(
- "sudo virsh detach-disk %s %s " % (instance_id, target))
- db.volume_detached(context, volume_id)
- defer.returnValue(True)
+ pass
- @defer.inlineCallbacks
- def _init_aoe(self):
- # TODO(vish): these shell calls should move into a different layer.
- yield process.simple_execute("sudo aoe-discover")
- yield process.simple_execute("sudo aoe-stat")
diff --git a/nova/db/api.py b/nova/db/api.py
index 699118b16..80583de99 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -123,6 +123,16 @@ def fixed_ip_allocate(context, network_id):
return _impl.fixed_ip_allocate(context, network_id)
+def fixed_ip_create(context, network_id, address):
+ """Create a fixed ip from the values dictionary."""
+ return _impl.fixed_ip_create(context, network_id, address)
+
+
+def fixed_ip_deallocate(context, address):
+ """Deallocate a fixed ip by address."""
+ return _impl.fixed_ip_deallocate(context, address)
+
+
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address."""
return _impl.fixed_ip_get_by_address(context, address)
@@ -133,21 +143,6 @@ def fixed_ip_get_network(context, address):
return _impl.fixed_ip_get_network(context, address)
-def fixed_ip_lease(context, address):
- """Lease a fixed ip by address."""
- return _impl.fixed_ip_lease(context, address)
-
-
-def fixed_ip_release(context, address):
- """Un-Lease a fixed ip by address."""
- return _impl.fixed_ip_release(context, address)
-
-
-def fixed_ip_deallocate(context, address):
- """Deallocate a fixed ip by address."""
- return _impl.fixed_ip_deallocate(context, address)
-
-
def fixed_ip_instance_associate(context, address, instance_id):
"""Associate a fixed ip to an instance by address."""
return _impl.fixed_ip_instance_associate(context, address, instance_id)
@@ -158,6 +153,11 @@ def fixed_ip_instance_disassociate(context, address):
return _impl.fixed_ip_instance_disassociate(context, address)
+def fixed_ip_update(context, address, values):
+ """Create a fixed ip from the values dictionary."""
+ return _impl.fixed_ip_update(context, address, values)
+
+
####################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index b95346861..12455530d 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -149,6 +149,13 @@ def fixed_ip_instance_disassociate(context, address):
fixed_ip_ref.save()
+def fixed_ip_update(context, address, values):
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ for (key, value) in values.iteritems():
+ fixed_ip_ref[key] = value
+ fixed_ip_ref.save()
+
+
###################
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index ceff0f827..8ba10a5bb 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -60,7 +60,7 @@ class CloudController(object):
sent to the other nodes.
"""
def __init__(self):
- self.network_manager = utils.load_object(FLAGS.network_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
self.setup()
def __str__(self):
diff --git a/nova/manager.py b/nova/manager.py
index 4f212a41b..20b58bd13 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -25,7 +25,7 @@ from nova import flags
FLAGS = flags.FLAGS
-flags.DEFINE_string('db_driver', 'nova.db.api'
+flags.DEFINE_string('db_driver', 'nova.db.api',
'driver to use for volume creation')
diff --git a/nova/service.py b/nova/service.py
index 59da6f04e..b20e24348 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -46,9 +46,10 @@ class Service(object, service.Service):
def __init__(self, manager, *args, **kwargs):
self.manager = manager
- super(self, Service).__init__(*args, **kwargs)
+ super(Service, self).__init__(*args, **kwargs)
def __getattr__(self, key):
+ print 'getattr'
try:
super(Service, self).__getattr__(key)
except AttributeError:
@@ -65,7 +66,7 @@ class Service(object, service.Service):
Args:
report_interval, defaults to flag
bin_name, defaults to basename of executable
- topic, defaults to basename - "nova-" part
+ topic, defaults to bin_name - "nova-" part
manager, defaults to FLAGS.<topic>_manager
"""
if not report_interval:
@@ -77,17 +78,15 @@ class Service(object, service.Service):
if not topic:
topic = bin_name.rpartition("nova-")[2]
if not manager:
- manager = FLAGS.get('%s_manager' % topic)
+ manager = FLAGS.get('%s_manager' % topic, None)
manager_ref = utils.import_object(manager)
logging.warn("Starting %s node" % topic)
service_ref = cls(manager_ref)
-
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % topic,
proxy=service_ref)
-
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (topic, FLAGS.node_name),
@@ -110,6 +109,7 @@ class Service(object, service.Service):
@defer.inlineCallbacks
def report_state(self, node_name, binary, context=None):
"""Update the state of this daemon in the datastore."""
+ print 'report_state'
try:
try:
daemon_ref = db.daemon_get_by_args(context, node_name, binary)
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 3501771cc..df2246aae 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -27,8 +27,8 @@ from xml.etree import ElementTree
from nova import flags
from nova import rpc
from nova import test
+from nova import utils
from nova.auth import manager
-from nova.compute import service
from nova.endpoint import api
from nova.endpoint import cloud
@@ -53,7 +53,7 @@ class CloudTestCase(test.BaseTestCase):
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a service
- self.compute = service.ComputeService()
+ self.compute = utils.import_class(FLAGS.compute_manager)
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.compute)
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index e85973837..28e51f387 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -27,7 +27,6 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.compute import service
FLAGS = flags.FLAGS
@@ -60,7 +59,7 @@ class ComputeConnectionTestCase(test.TrialTestCase):
super(ComputeConnectionTestCase, self).setUp()
self.flags(connection_type='fake',
fake_storage=True)
- self.compute = service.ComputeService()
+ self.compute = utils.import_object(FLAGS.compute_manager)
self.manager = manager.AuthManager()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 42a13e4e3..3114912ba 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -20,13 +20,19 @@ from nova import flags
FLAGS = flags.FLAGS
-FLAGS.connection_type = 'fake'
+flags.DECLARE('fake_storage', 'nova.volume.manager')
FLAGS.fake_storage = True
+FLAGS.connection_type = 'fake'
FLAGS.fake_rabbit = True
-FLAGS.fake_network = True
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+flags.DECLARE('network_size', 'nova.network.manager')
+flags.DECLARE('num_networks', 'nova.network.manager')
+flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS.network_size = 16
FLAGS.num_networks = 5
+FLAGS.fake_network = True
+flags.DECLARE('num_shelves', 'nova.volume.manager')
+flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
FLAGS.num_shelves = 2
FLAGS.blades_per_shelf = 4
FLAGS.verbose = True
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index d487c2e45..e3fe01fa2 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -49,14 +49,15 @@ class NetworkTestCase(test.TrialTestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
- self.service = service.VlanNetworkService()
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.context = None
for i in range(5):
name = 'project%s' % i
self.projects.append(self.manager.create_project(name,
'netuser',
name))
# create the necessary network data for the project
- self.service.set_network_host(self.projects[i].id)
+ self.network.set_network_host(self.context, self.projects[i].id)
instance_id = db.instance_create(None,
{'mac_address': utils.generate_mac()})
self.instance_id = instance_id
@@ -92,16 +93,17 @@ class NetworkTestCase(test.TrialTestCase):
db.floating_ip_get_by_address(None, ip_str)
except exception.NotFound:
db.floating_ip_create(None, ip_str, FLAGS.node_name)
- float_addr = self.service.allocate_floating_ip(self.projects[0].id)
+ float_addr = self.network.allocate_floating_ip(self.context,
+ self.projects[0].id)
fix_addr = self._create_address(0)
self.assertEqual(float_addr, str(pubnet[0]))
- self.service.associate_floating_ip(float_addr, fix_addr)
+ self.network.associate_floating_ip(self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(None, self.instance_id)
self.assertEqual(address, float_addr)
- self.service.disassociate_floating_ip(float_addr)
+ self.network.disassociate_floating_ip(self.context, float_addr)
address = db.instance_get_floating_address(None, self.instance_id)
self.assertEqual(address, None)
- self.service.deallocate_floating_ip(float_addr)
+ self.network.deallocate_floating_ip(self.context, float_addr)
db.fixed_ip_deallocate(None, fix_addr)
def test_allocate_deallocate_fixed_ip(self):
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
index 0b9d60024..e13fe62d1 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/service_unittest.py
@@ -30,10 +30,16 @@ from nova import flags
from nova import rpc
from nova import test
from nova import service
-
+from nova import manager
FLAGS = flags.FLAGS
+flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
+ "Manager for testing")
+
+class FakeManager(manager.Manager):
+ """Fake manager for tests"""
+ pass
class ServiceTestCase(test.BaseTestCase):
"""Test cases for rpc"""
@@ -46,12 +52,12 @@ class ServiceTestCase(test.BaseTestCase):
self.mox.StubOutWithMock(
service.task, 'LoopingCall', use_mock_anything=True)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
- topic='run_tests.py',
+ topic='fake',
proxy=mox.IsA(service.Service)
).AndReturn(rpc.AdapterConsumer)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
- topic='run_tests.py.%s' % FLAGS.node_name,
+ topic='fake.%s' % FLAGS.node_name,
proxy=mox.IsA(service.Service)
).AndReturn(rpc.AdapterConsumer)
@@ -67,7 +73,7 @@ class ServiceTestCase(test.BaseTestCase):
rpc.AdapterConsumer.attach_to_twisted()
self.mox.ReplayAll()
- app = service.Service.create()
+ app = service.Service.create(bin_name='nova-fake')
self.assert_(app)
# We're testing sort of weird behavior in how report_state decides
@@ -82,7 +88,7 @@ class ServiceTestCase(test.BaseTestCase):
'binary': binary,
'report_count': 0,
'id': 1}
-
+ service.db.__getattr__('report_state')
service.db.daemon_get_by_args(None,
node_name,
binary).AndReturn(daemon_ref)
@@ -105,6 +111,7 @@ class ServiceTestCase(test.BaseTestCase):
'report_count': 0,
'id': 1}
+ service.db.__getattr__('report_state')
service.db.daemon_get_by_args(None,
node_name,
binary).AndRaise(exception.NotFound())
@@ -126,6 +133,7 @@ class ServiceTestCase(test.BaseTestCase):
'report_count': 0,
'id': 1}
+ service.db.__getattr__('report_state')
service.db.daemon_get_by_args(None,
node_name,
binary).AndRaise(Exception())
@@ -145,6 +153,7 @@ class ServiceTestCase(test.BaseTestCase):
'report_count': 0,
'id': 1}
+ service.db.__getattr__('report_state')
service.db.daemon_get_by_args(None,
node_name,
binary).AndReturn(daemon_ref)
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index a03e0e6e3..4504276e2 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -24,8 +24,7 @@ from nova import exception
from nova import db
from nova import flags
from nova import test
-from nova.compute import service as compute_service
-from nova.volume import service as volume_service
+from nova import utils
FLAGS = flags.FLAGS
@@ -35,10 +34,11 @@ class VolumeTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
- self.compute = compute_service.ComputeService()
+ self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake',
fake_storage=True)
- self.volume = volume_service.VolumeService()
+ self.volume = utils.import_object(FLAGS.volume_manager)
+ self.context = None
def _create_volume(self, size='0'):
@@ -49,15 +49,15 @@ class VolumeTestCase(test.TrialTestCase):
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
- return db.volume_create(None, vol)
+ return db.volume_create(None, vol)['id']
@defer.inlineCallbacks
def test_run_create_volume(self):
volume_id = self._create_volume()
- yield self.volume.create_volume(volume_id)
+ yield self.volume.create_volume(self.context, volume_id)
self.assertEqual(volume_id, db.volume_get(None, volume_id).id)
- yield self.volume.delete_volume(volume_id)
+ yield self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
None,
@@ -70,7 +70,7 @@ class VolumeTestCase(test.TrialTestCase):
defer.returnValue(True)
try:
volume_id = self._create_volume('1001')
- yield self.volume.create_volume(volume_id)
+ yield self.volume.create_volume(self.context, volume_id)
self.fail("Should have thrown TypeError")
except TypeError:
pass
@@ -81,14 +81,15 @@ class VolumeTestCase(test.TrialTestCase):
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
for i in xrange(total_slots):
volume_id = self._create_volume()
- yield self.volume.create_volume(volume_id)
+ yield self.volume.create_volume(self.context, volume_id)
vols.append(volume_id)
volume_id = self._create_volume()
- self.assertFailure(self.volume.create_volume(volume_id),
+ self.assertFailure(self.volume.create_volume(self.context,
+ volume_id),
db.NoMoreBlades)
db.volume_destroy(None, volume_id)
- for id in vols:
- yield self.volume.delete_volume(id)
+ for volume_id in vols:
+ yield self.volume.delete_volume(self.context, volume_id)
@defer.inlineCallbacks
def test_run_attach_detach_volume(self):
@@ -96,7 +97,7 @@ class VolumeTestCase(test.TrialTestCase):
instance_id = "storage-test"
mountpoint = "/dev/sdf"
volume_id = self._create_volume()
- yield self.volume.create_volume(volume_id)
+ yield self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
db.volume_attached(None, volume_id, instance_id, mountpoint)
else:
@@ -109,15 +110,16 @@ class VolumeTestCase(test.TrialTestCase):
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['mountpoint'], mountpoint)
- self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
+ self.assertFailure(self.volume.delete_volume(self.context, volume_id),
+ exception.Error)
if FLAGS.fake_tests:
db.volume_detached(None, volume_id)
else:
- rv = yield self.volume.detach_volume(instance_id,
+ rv = yield self.compute.detach_volume(instance_id,
volume_id)
self.assertEqual(vol['status'], "available")
- rv = self.volume.delete_volume(volume_id)
+ rv = self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.Error,
db.volume_get,
None,
@@ -142,14 +144,13 @@ class VolumeTestCase(test.TrialTestCase):
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
for i in range(total_slots):
volume_id = self._create_volume()
- d = self.volume.create_volume(volume_id)
+ d = self.volume.create_volume(self.context, volume_id)
d.addCallback(_check)
d.addErrback(self.fail)
deferreds.append(d)
yield defer.DeferredList(deferreds)
for volume_id in volume_ids:
- vol = db.volume_get(None, volume_id)
- vol.delete()
+ self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 579472047..e0468b877 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -39,25 +39,20 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
class FakeAOEDriver(object):
- @defer.inlineCallbacks
def create_volume(self, volume_id, size):
logging.debug("Fake AOE: create_volume %s, %s", volume_id, size)
- @defer.inlineCallbacks
def delete_volume(self, volume_id):
logging.debug("Fake AOE: delete_volume %s", volume_id)
- @defer.inlineCallbacks
def create_export(self, volume_id, shelf_id, blade_id):
logging.debug("Fake AOE: create_export %s, %s, %s",
volume_id, shelf_id, blade_id)
- @defer.inlineCallbacks
def remove_export(self, volume_id, shelf_id, blade_id):
logging.debug("Fake AOE: remove_export %s, %s, %s",
volume_id, shelf_id, blade_id)
- @defer.inlineCallbacks
def ensure_exports(self):
logging.debug("Fake AOE: ensure_export")
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 0683703a1..7d8e1aca0 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -38,7 +38,7 @@ flags.DEFINE_string('storage_availability_zone',
'availability zone of this service')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
-flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver'
+flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver',
'Driver to use for volume creation')
flags.DEFINE_integer('num_shelves',
100,
@@ -60,7 +60,7 @@ class AOEManager(manager.Manager):
super(AOEManager, self).__init__(*args, **kwargs)
def _ensure_blades(self, context):
- total_blades = FLAGS.num_shelves, FLAGS.blades_per_shelf
+ total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
if self.db.export_device_count(context) >= total_blades:
return
for shelf_id in xrange(FLAGS.num_shelves):