diff options
| author | Rick Harris <rconradharris@gmail.com> | 2011-10-10 17:58:56 -0500 |
|---|---|---|
| committer | Rick Harris <rconradharris@gmail.com> | 2011-10-13 18:46:38 -0500 |
| commit | 46d04831f5c290a40c14da415169749e2ef41383 (patch) | |
| tree | f3d20cf687ff074e719b861ab871bdc73c204e3f | |
| parent | 56be39aedb195576179e73c859db2271a9585496 (diff) | |
| download | nova-46d04831f5c290a40c14da415169749e2ef41383.tar.gz nova-46d04831f5c290a40c14da415169749e2ef41383.tar.xz nova-46d04831f5c290a40c14da415169749e2ef41383.zip | |
Xenapi driver can now generate swap from instance_type
Change-Id: I50268a85ccd62b019436a207c2b52b1901597564
| -rw-r--r-- | nova/compute/manager.py | 10 | ||||
| -rw-r--r-- | nova/tests/db/fakes.py | 15 | ||||
| -rw-r--r-- | nova/tests/test_xenapi.py | 29 | ||||
| -rw-r--r-- | nova/utils.py | 34 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 122 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 22 |
6 files changed, 176 insertions, 56 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f4cad6180..686c9f1ed 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -465,16 +465,8 @@ class ComputeManager(manager.SchedulerDependentManager): # be fixed once we have no-db-messaging pass except: - # NOTE(sirp): 3-arg raise needed since Eventlet clears exceptions - # when switching between greenthreads. - type_, value, traceback = sys.exc_info() - try: + with utils.original_exception_raised(): _deallocate_network() - finally: - # FIXME(sirp): when/if - # https://github.com/jcrocholl/pep8/pull/27 merges, we can add - # a per-line disable flag here for W602 - raise type_, value, traceback @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def run_instance(self, context, instance_id, **kwargs): diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 6e6253c1d..d9bdb2351 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -352,33 +352,38 @@ def stub_out_db_instance_api(stubs, injected=True): vcpus=1, local_gb=0, flavorid=1, - rxtx_cap=1), + rxtx_cap=1, + swap=0), 'm1.small': dict(id=5, memory_mb=2048, vcpus=1, local_gb=20, flavorid=2, - rxtx_cap=2), + rxtx_cap=2, + swap=0), 'm1.medium': dict(id=1, memory_mb=4096, vcpus=2, local_gb=40, flavorid=3, - rxtx_cap=3), + rxtx_cap=3, + swap=0), 'm1.large': dict(id=3, memory_mb=8192, vcpus=4, local_gb=80, flavorid=4, - rxtx_cap=4), + rxtx_cap=4, + swap=0), 'm1.xlarge': dict(id=4, memory_mb=16384, vcpus=8, local_gb=160, flavorid=5, - rxtx_cap=5)} + rxtx_cap=5, + swap=0)} flat_network_fields = {'id': 'fake_flat', 'bridge': 'xenbr0', diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 225d51aba..1d0639221 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -16,6 +16,7 @@ """Test suite for XenAPI.""" +import contextlib import functools import json import os @@ -55,10 +56,30 @@ def stub_vm_utils_with_vdi_attached_here(function, should_return=True): """ @functools.wraps(function) def decorated_function(self, *args, **kwargs): - orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here - vm_utils.with_vdi_attached_here = lambda *x: should_return - function(self, *args, **kwargs) - vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here + @contextlib.contextmanager + def fake_vdi_attached_here(*args, **kwargs): + fake_dev = 'fakedev' + yield fake_dev + + def fake_stream_disk(*args, **kwargs): + pass + + def fake_is_vdi_pv(*args, **kwargs): + return should_return + + orig_vdi_attached_here = vm_utils.vdi_attached_here + orig_stream_disk = vm_utils._stream_disk + orig_is_vdi_pv = vm_utils._is_vdi_pv + try: + vm_utils.vdi_attached_here = fake_vdi_attached_here + vm_utils._stream_disk = fake_stream_disk + vm_utils._is_vdi_pv = fake_is_vdi_pv + return function(self, *args, **kwargs) + finally: + vm_utils._is_vdi_pv = orig_is_vdi_pv + vm_utils._stream_disk = orig_stream_disk + vm_utils.vdi_attached_here = orig_vdi_attached_here + return decorated_function diff --git a/nova/utils.py b/nova/utils.py index 1d2063798..2477349cb 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -19,6 +19,7 @@ """Utilities and helper functions.""" +import contextlib import datetime import functools import inspect @@ -931,3 +932,36 @@ def generate_glance_url(): # TODO(jk0): This will eventually need to take SSL into consideration # when supported in glance. return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) + + +@contextlib.contextmanager +def original_exception_raised(): + """Run some code, then re-raise the original exception. + + This is needed because when Eventlet switches greenthreads, it clears the + exception context. This means if exception handler code blocks, we'll lose + the helpful exception traceback information. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. + """ + type_, value, traceback = sys.exc_info() + try: + yield + finally: + raise type_, value, traceback + + +def make_dev_path(dev, partition=None, base='/dev'): + """Return a path to a particular device. + + >>> make_dev_path('xvdc') + /dev/xvdc + + >>> make_dev_path('xvdc', 1) + /dev/xvdc1 + """ + path = os.path.join(base, dev) + if partition: + path += str(partition) + return path diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 51f102689..d438498f5 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -20,6 +20,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import contextlib import json import os import pickle @@ -402,6 +403,50 @@ class VMHelper(HelperBase): session.wait_for_task(task, instance.id) @classmethod + def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb): + """ + Steps to programmatically generate swap: + + 1. Create VDI of desired swap size + + 2. Attach VDI to compute worker + + 3. Create swap partition + + 4. Create VBD between instance VM and swap VDI + """ + # 1. Create VDI + sr_ref = safe_find_sr(session) + name_label = instance.name + "-swap" + ONE_MEG = 1024 * 1024 + virtual_size = swap_mb * ONE_MEG + vdi_ref = cls.create_vdi( + session, sr_ref, name_label, virtual_size, read_only=False) + + try: + # 2. Attach VDI to compute worker (VBD hotplug) + with vdi_attached_here(session, vdi_ref, read_only=False) as dev: + # 3. Create swap partition + dev_path = utils.make_dev_path(dev) + utils.execute('parted', '--script', dev_path, + 'mklabel', 'msdos', run_as_root=True) + + partition_start = 0 + partition_end = swap_mb + utils.execute('parted', '--script', dev_path, 'mkpartfs', + 'primary', 'linux-swap', + str(partition_start), + str(partition_end), + run_as_root=True) + + # 4. Create VBD between instance VM and swap VDI + cls.create_vbd(session, vm_ref, vdi_ref, userdevice, + bootable=False) + except: + with utils.original_exception_raised(): + cls.destroy_vdi(session, vdi_ref) + + @classmethod def fetch_blank_disk(cls, session, instance_type_id): # Size the blank harddrive to suit the machine type: one_gig = 1024 * 1024 * 1024 @@ -573,10 +618,10 @@ class VMHelper(HelperBase): try: filename = None vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) - with_vdi_attached_here(session, vdi_ref, False, - lambda dev: - _stream_disk(dev, image_type, - virtual_size, image_file)) + + with vdi_attached_here(session, vdi_ref, read_only=False) as dev: + _stream_disk(dev, image_type, virtual_size, image_file) + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): # We need to invoke a plugin for copying the # content of the VDI into the proper path. @@ -688,7 +733,8 @@ class VMHelper(HelperBase): is_pv = True elif disk_image_type == ImageType.DISK_RAW: # 2. RAW - is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) + with vdi_attached_here(session, vdi_ref, read_only=True) as dev: + is_pv = _is_vdi_pv(dev) elif disk_image_type == ImageType.DISK: # 3. Disk is_pv = True @@ -754,9 +800,8 @@ class VMHelper(HelperBase): if not mount_required: return - with_vdi_attached_here(session, vdi_ref, False, - lambda dev: _mounted_processing(dev, key, net, - metadata)) + with vdi_attached_here(session, vdi_ref, read_only=False) as dev: + _mounted_processing(dev, key, net, metadata) @classmethod def lookup_kernel_ramdisk(cls, session, vm): @@ -1024,14 +1069,16 @@ def remap_vbd_dev(dev): def _wait_for_device(dev): """Wait for device node to appear""" for i in xrange(0, FLAGS.block_device_creation_timeout): - if os.path.exists('/dev/%s' % dev): + dev_path = utils.make_dev_path(dev) + if os.path.exists(dev_path): return time.sleep(1) raise StorageError(_('Timeout waiting for device %s to be created') % dev) -def with_vdi_attached_here(session, vdi_ref, read_only, f): +@contextlib.contextmanager +def vdi_attached_here(session, vdi_ref, read_only=False): this_vm_ref = get_this_vm_ref(session) vbd_rec = {} vbd_rec['VM'] = this_vm_ref @@ -1052,22 +1099,24 @@ def with_vdi_attached_here(session, vdi_ref, read_only, f): try: LOG.debug(_('Plugging VBD %s ... '), vbd_ref) session.get_xenapi().VBD.plug(vbd_ref) - LOG.debug(_('Plugging VBD %s done.'), vbd_ref) - orig_dev = session.get_xenapi().VBD.get_device(vbd_ref) - LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals()) - dev = remap_vbd_dev(orig_dev) - if dev != orig_dev: - LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, ' - 'remapping to %(dev)s') % locals()) - if dev != 'autodetect': - # NOTE(johannes): Unit tests will end up with a device called - # 'autodetect' which obviously won't exist. It's not ideal, - # but the alternatives were much messier - _wait_for_device(dev) - return f(dev) + try: + LOG.debug(_('Plugging VBD %s done.'), vbd_ref) + orig_dev = session.get_xenapi().VBD.get_device(vbd_ref) + LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals()) + dev = remap_vbd_dev(orig_dev) + if dev != orig_dev: + LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, ' + 'remapping to %(dev)s') % locals()) + if dev != 'autodetect': + # NOTE(johannes): Unit tests will end up with a device called + # 'autodetect' which obviously won't exist. It's not ideal, + # but the alternatives were much messier + _wait_for_device(dev) + yield dev + finally: + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref) + vbd_unplug_with_retry(session, vbd_ref) finally: - LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref) - vbd_unplug_with_retry(session, vbd_ref) ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref) LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref) @@ -1118,7 +1167,8 @@ def get_this_vm_ref(session): def _is_vdi_pv(dev): LOG.debug(_("Running pygrub against %s"), dev) - output = os.popen('pygrub -qn /dev/%s' % dev) + dev_path = utils.make_dev_path(dev) + output = os.popen('pygrub -qn %s' % dev_path) for line in output.readlines(): #try to find kernel string m = re.search('(?<=kernel:)/.*(?:>)', line) @@ -1135,32 +1185,34 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) - utils.execute('chown', os.getuid(), '/dev/%s' % dev, run_as_root=True) + dev_path = utils.make_dev_path(dev) + utils.execute('chown', os.getuid(), dev_path, run_as_root=True) - with open('/dev/%s' % dev, 'wb') as f: + with open(dev_path, 'wb') as f: f.seek(offset) for chunk in image_file: f.write(chunk) def _write_partition(virtual_size, dev): - dest = '/dev/%s' % dev + dev_path = utils.make_dev_path(dev) primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' - ' to %(dest)s...') % locals()) + ' to %(dev_path)s...') % locals()) def execute(*cmd, **kwargs): return utils.execute(*cmd, **kwargs) - execute('parted', '--script', dest, 'mklabel', 'msdos', run_as_root=True) - execute('parted', '--script', dest, 'mkpart', 'primary', + execute('parted', '--script', dev_path, 'mklabel', 'msdos', + run_as_root=True) + execute('parted', '--script', dev_path, 'mkpart', 'primary', '%ds' % primary_first, '%ds' % primary_last, run_as_root=True) - LOG.debug(_('Writing partition table %s done.'), dest) + LOG.debug(_('Writing partition table %s done.'), dev_path) def _mount_filesystem(dev_path, dir): @@ -1205,8 +1257,8 @@ def _find_guest_agent(base_dir, agent_rel_path): def _mounted_processing(device, key, net, metadata): """Callback which runs with the image VDI attached""" - - dev_path = '/dev/' + device + '1' # NB: Partition 1 hardcoded + # NB: Partition 1 hardcoded + dev_path = utils.make_dev_path(device, partition=1) tmpdir = tempfile.mkdtemp() try: # Mount only Linux filesystems, to avoid disturbing NTFS images diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index aab2a1119..4df0ad38c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -55,6 +55,11 @@ flags.DEFINE_integer('agent_version_timeout', 300, flags.DEFINE_string('xenapi_vif_driver', 'nova.virt.xenapi.vif.XenAPIBridgeDriver', 'The XenAPI VIF driver using XenServer Network APIs.') +flags.DEFINE_bool('xenapi_generate_swap', + False, + 'Whether to generate swap (False means fetching it' + ' from OVA)') + RESIZE_TOTAL_STEPS = 5 BUILD_TOTAL_STEPS = 4 @@ -339,10 +344,20 @@ class VMOps(object): # userdevice 1 is reserved for rescue and we've used '0' userdevice = 2 + ctx = nova_context.get_admin_context() + instance_type = db.instance_type_get(ctx, instance.instance_type_id) + swap_mb = instance_type['swap'] + generate_swap = swap_mb and FLAGS.xenapi_generate_swap + if generate_swap: + VMHelper.generate_swap(session=self._session, instance=instance, + vm_ref=vm_ref, userdevice=userdevice, + swap_mb=swap_mb) + userdevice += 1 + # Attach any other disks for vdi in vdis[1:]: - # vdi['vdi_type'] is either 'os' or 'swap', but we don't - # really care what it is right here. + if generate_swap and vdi['vdi_type'] == 'swap': + continue vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi['vdi_uuid']) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, @@ -445,7 +460,8 @@ class VMOps(object): resources = [] if spawn_error.args: last_arg = spawn_error.args[-1] - resources = last_arg + if isinstance(last_arg, list): + resources = last_arg if vdis: for vdi in vdis: resources.append(dict(vdi_type=vdi['vdi_type'], |
