diff options
| author | Josh Kearney <josh.kearney@rackspace.com> | 2011-01-13 10:45:26 -0600 |
|---|---|---|
| committer | Josh Kearney <josh.kearney@rackspace.com> | 2011-01-13 10:45:26 -0600 |
| commit | f26032b24b4eb22b5a81646756f6a8071e367c24 (patch) | |
| tree | 73e587fe1c93884a56cb26ffee6e2799aeb339bb | |
| parent | 752bed3311f09e7a43e642231e1638b4252f74a6 (diff) | |
| parent | a60a9fcab59e5486e166d51d47c84d6b4a9ede26 (diff) | |
Merged trunk
| -rw-r--r-- | Authors | 1 | ||||
| -rwxr-xr-x | bin/nova-manage | 2 | ||||
| -rw-r--r-- | nova/api/ec2/cloud.py | 2 | ||||
| -rw-r--r-- | nova/auth/manager.py | 2 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/api.py | 2 | ||||
| -rw-r--r-- | nova/virt/libvirt_conn.py | 48 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 4 | ||||
| -rw-r--r-- | nova/volume/driver.py | 57 |
8 files changed, 90 insertions, 28 deletions
@@ -21,6 +21,7 @@ Jesse Andrews <anotherjesse@gmail.com> Joe Heck <heckj@mac.com> Joel Moore <joelbm24@gmail.com> Jonathan Bryce <jbryce@jbryce.com> +Josh Durgin <joshd@hq.newdream.net> Josh Kearney <josh.kearney@rackspace.com> Joshua McKenty <jmckenty@gmail.com> Justin Santa Barbara <justin@fathomdb.com> diff --git a/bin/nova-manage b/bin/nova-manage index 3f5957190..3e290567c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -77,12 +77,14 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import utils from nova.auth import manager from nova.cloudpipe import pipelib +logging.basicConfig() FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 832426b94..5c25aa076 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -590,7 +590,7 @@ class CloudController(object): # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. - return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} + return {'volumeSet': [self._format_volume(context, dict(volume))]} def delete_volume(self, context, volume_id, **kwargs): self.volume_api.delete(context, volume_id) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 6fb9b522f..de18a3838 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -721,7 +721,7 @@ class AuthManager(object): if project is None: project = user.id pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid, use_dmz) + return self.__generate_rc(user, pid, use_dmz) @staticmethod def __generate_rc(user, pid, use_dmz=True, host=None): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2e4f8fc39..39df21e30 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -765,12 +765,14 @@ def instance_get_by_id(context, instance_id): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.floating_ips')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.floating_ips')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 655c55fa1..c03046703 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -197,40 +197,27 @@ class LibvirtConnection(object): pass # If the instance is already terminated, we're still happy - done = event.Event() - # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately timer = utils.LoopingCall(f=None) - def _wait_for_shutdown(): + while True: try: state = self.get_info(instance['name'])['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.SHUTDOWN: - timer.stop() + break except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) - timer.stop() + break - timer.f = _wait_for_shutdown - timer_done = timer.start(interval=0.5, now=True) + if cleanup: + self._cleanup(instance) - # NOTE(termie): this is strictly superfluous (we could put the - # cleanup code in the timer), but this emulates the - # previous model so I am keeping it around until - # everything has been vetted a bit - def _wait_for_timer(): - timer_done.wait() - if cleanup: - self._cleanup(instance) - done.send() - - greenthread.spawn(_wait_for_timer) - return done + return True def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) @@ -243,11 +230,24 @@ class LibvirtConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._conn.lookupByName(instance_name) mount_device = mountpoint.rpartition("/")[2] - xml = """<disk type='block'> - <driver name='qemu' type='raw'/> - <source dev='%s'/> - <target dev='%s' bus='virtio'/> - </disk>""" % (device_path, mount_device) + if device_path.startswith('/dev/'): + xml = """<disk type='block'> + <driver name='qemu' type='raw'/> + <source dev='%s'/> + <target dev='%s' bus='virtio'/> + </disk>""" % (device_path, mount_device) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + xml = """<disk type='network'> + <driver name='qemu' type='raw'/> + <source protocol='%s' name='%s'/> + <target dev='%s' bus='virtio'/> + </disk>""" % (protocol, + name, + mount_device) + else: + raise exception.Invalid(_("Invalid device path %s") % device_path) + virt_dom.attachDevice(xml) def _get_disk_xml(self, xml, device): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 8681608e1..9da50377f 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -252,7 +252,7 @@ class VMOps(object): raise Exception(_("suspend: instance not present %s") % instance_name) task = self._session.call_xenapi('Async.VM.suspend', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def resume(self, instance, callback): """resume the specified instance""" @@ -262,7 +262,7 @@ class VMOps(object): raise Exception(_("resume: instance not present %s") % instance_name) task = self._session.call_xenapi('Async.VM.resume', vm, False, True) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def rescue(self, instance, callback): """Rescue the specified instance""" diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 6bc925f3e..44bfeaf0c 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -49,6 +49,8 @@ flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:', 'prefix for iscsi volumes') flags.DEFINE_string('iscsi_ip_prefix', '127.0', 'discover volumes on the ip that starts with this prefix') +flags.DEFINE_string('rbd_pool', 'rbd', + 'the rbd pool in which volumes are stored') class VolumeDriver(object): @@ -314,3 +316,58 @@ class FakeISCSIDriver(ISCSIDriver): """Execute that simply logs the command.""" LOG.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) + + +class RBDDriver(VolumeDriver): + """Implements RADOS block device (RBD) volume commands""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + (stdout, stderr) = self._execute("rados lspools") + pools = stdout.split("\n") + if not FLAGS.rbd_pool in pools: + raise exception.Error(_("rbd has no pool %s") % + FLAGS.rbd_pool) + + def create_volume(self, volume): + """Creates a logical volume.""" + if int(volume['size']) == 0: + size = 100 + else: + size = int(volume['size']) * 1024 + self._try_execute("rbd --pool %s --size %d create %s" % + (FLAGS.rbd_pool, + size, + volume['name'])) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + self._try_execute("rbd --pool %s rm %s" % + (FLAGS.rbd_pool, + volume['name'])) + + def local_path(self, volume): + """Returns the path of the rbd volume.""" + # This is the same as the remote path + # since qemu accesses it directly. + return self.discover_volume(volume) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def discover_volume(self, volume): + """Discover volume on a remote host""" + return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host""" + pass |
