diff options
-rw-r--r-- | nova/tests/glance/__init__.py | 20 | ||||
-rw-r--r-- | nova/tests/glance/stubs.py | 37 | ||||
-rw-r--r-- | nova/tests/test_xenapi.py | 106 | ||||
-rw-r--r-- | nova/tests/xenapi/stubs.py | 24 | ||||
-rw-r--r-- | nova/virt/xenapi/fake.py | 87 | ||||
-rw-r--r-- | nova/virt/xenapi/vm_utils.py | 256 | ||||
-rw-r--r-- | nova/virt/xenapi/vmops.py | 3 | ||||
-rw-r--r-- | nova/virt/xenapi_conn.py | 3 | ||||
-rw-r--r-- | plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 33 | ||||
-rw-r--r-- | tools/pip-requires | 1 |
10 files changed, 502 insertions, 68 deletions
diff --git a/nova/tests/glance/__init__.py b/nova/tests/glance/__init__.py new file mode 100644 index 000000000..ef9fa05a7 --- /dev/null +++ b/nova/tests/glance/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`glance` -- Stubs for Glance +================================= +""" diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py new file mode 100644 index 000000000..f182b857a --- /dev/null +++ b/nova/tests/glance/stubs.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO + +import glance.client + + +def stubout_glance_client(stubs, cls): + """Stubs out glance.client.Client""" + stubs.Set(glance.client, 'Client', + lambda *args, **kwargs: cls(*args, **kwargs)) + + +class FakeGlance(object): + def __init__(self, host, port=None, use_ssl=False): + pass + + def get_image(self, image): + meta = { + 'size': 0, + } + image_file = StringIO.StringIO('') + return meta, image_file diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 261ee0fde..9f5b266f3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -34,6 +34,7 @@ from nova.virt.xenapi import volume_utils from nova.virt.xenapi.vmops import SimpleDH from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs +from nova.tests.glance import stubs as glance_stubs FLAGS = flags.FLAGS @@ -108,18 +109,16 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - xenapi_fake.create_vm(instance.name, 'Running') + vm = xenapi_fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it - # Get XenAPI reference for the VM - vms = xenapi_fake.get_all('VM') # Get XenAPI record for VBD vbds = xenapi_fake.get_all('VBD') vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] - self.assertEqual(vm_ref, vms[0]) + self.assertEqual(vm_ref, vm) check() @@ -157,9 +156,14 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' xenapi_fake.reset() + xenapi_fake.create_local_srs() db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_get_this_vm_uuid(self.stubs) + stubs.stubout_stream_disk(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): @@ -207,40 +211,70 @@ class XenAPIVMTestCase(test.TestCase): check() - def test_spawn(self): - instance = self._create_instance() + def check_vm_record(self, conn): + instances = conn.list_instances() + self.assertEquals(instances, [1]) + + # Get Nova record for VM + vm_info = conn.get_info(1) + + # Get XenAPI record for VM + vms = [rec for ref, rec + in xenapi_fake.get_all_records('VM').iteritems() + if not rec['is_control_domain']] + vm = vms[0] + + # Check that m1.large above turned into the right thing. + instance_type = instance_types.INSTANCE_TYPES['m1.large'] + mem_kib = long(instance_type['memory_mb']) << 10 + mem_bytes = str(mem_kib << 10) + vcpus = instance_type['vcpus'] + self.assertEquals(vm_info['max_mem'], mem_kib) + self.assertEquals(vm_info['mem'], mem_kib) + self.assertEquals(vm['memory_static_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_min'], mem_bytes) + self.assertEquals(vm['VCPUs_max'], str(vcpus)) + self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) + + # Check that the VM is running according to Nova + self.assertEquals(vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to XenAPI. + self.assertEquals(vm['power_state'], 'Running') + + def _test_spawn(self, image_id, kernel_id, ramdisk_id): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + conn = xenapi_conn.get_connection(False) + instance = db.instance_create(values) + conn.spawn(instance) + self.check_vm_record(conn) - def check(): - instances = self.conn.list_instances() - self.assertEquals(instances, [1]) - - # Get Nova record for VM - vm_info = self.conn.get_info(1) - - # Get XenAPI record for VM - vms = xenapi_fake.get_all('VM') - vm = xenapi_fake.get_record('VM', vms[0]) - - # Check that m1.large above turned into the right thing. - instance_type = instance_types.INSTANCE_TYPES['m1.large'] - mem_kib = long(instance_type['memory_mb']) << 10 - mem_bytes = str(mem_kib << 10) - vcpus = instance_type['vcpus'] - self.assertEquals(vm_info['max_mem'], mem_kib) - self.assertEquals(vm_info['mem'], mem_kib) - self.assertEquals(vm['memory_static_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_min'], mem_bytes) - self.assertEquals(vm['VCPUs_max'], str(vcpus)) - self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) - - # Check that the VM is running according to Nova - self.assertEquals(vm_info['state'], power_state.RUNNING) - - # Check that the VM is running according to XenAPI. - self.assertEquals(vm['power_state'], 'Running') + def test_spawn_raw_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn(1, None, None) - check() + def test_spawn_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn(1, 2, 3) + + def test_spawn_raw_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(1, None, None) + + def test_spawn_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(1, 2, 3) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 292bd9ba9..624995ada 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -115,6 +115,21 @@ def stub_out_get_target(stubs): stubs.Set(volume_utils, '_get_target', fake_get_target) +def stubout_get_this_vm_uuid(stubs): + def f(): + vms = [rec['uuid'] for ref, rec + in fake.get_all_records('VM').iteritems() + if rec['is_control_domain']] + return vms[0] + stubs.Set(vm_utils, 'get_this_vm_uuid', f) + + +def stubout_stream_disk(stubs): + def f(_1, _2, _3, _4): + pass + stubs.Set(vm_utils, '_stream_disk', f) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): @@ -124,7 +139,10 @@ class FakeSessionForVMTests(fake.SessionBase): return self.xenapi.network.get_all_records() def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + return '<string>%s</string>' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) @@ -159,10 +177,6 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, ref): - rec = fake.get_record('VBD', ref) - rec['currently-attached'] = True - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): valid_vdi = False diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 96d8f5fc8..4bfaf4b57 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -76,6 +76,7 @@ def reset(): for c in _CLASSES: _db_content[c] = {} create_host('fake') + create_vm('fake', 'Running', is_a_template=False, is_control_domain=True) def create_host(name_label): @@ -136,14 +137,21 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): - vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_rec = { + 'VM': vm_ref, + 'VDI': vdi_ref, + 'currently_attached': False, + } vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): - """Create backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is + created.""" + vbd_rec['currently_attached'] = False + vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'] = [vbd_ref] @@ -152,9 +160,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['vm_name_label'] = vm_name_label -def create_pbd(config, sr_ref, attached): +def create_pbd(config, host_ref, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'host': host_ref, 'SR': sr_ref, 'currently-attached': attached, }) @@ -167,6 +176,33 @@ def create_task(name_label): }) +def create_local_srs(): + """Create an SR that looks like the one created on the local disk by + default by the XenServer installer. Do this one per host.""" + for host_ref in _db_content['host'].keys(): + _create_local_sr(host_ref) + + +def _create_local_sr(host_ref): + sr_ref = _create_object('SR', { + 'name_label': 'Local storage', + 'type': 'lvm', + 'content_type': 'user', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage', + 'i18n-key': 'local-storage', + }, + 'VDIs': [] + }) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) @@ -179,9 +215,10 @@ def _create_sr(table, obj): # Forces fake to support iscsi only if sr_type != 'iscsi': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) vdi_ref = create_vdi('', False, sr_ref, False) - pbd_ref = create_pbd('', sr_ref, True) + pbd_ref = create_pbd('', host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref @@ -233,6 +270,20 @@ class SessionBase(object): def __init__(self, uri): self._session = None + def VBD_plug(self, _1, ref): + rec = get_record('VBD', ref) + if rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) + rec['currently_attached'] = True + rec['device'] = rec['userdevice'] + + def VBD_unplug(self, _1, ref): + rec = get_record('VBD', ref) + if not rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_DETACHED', ref]) + rec['currently_attached'] = False + rec['device'] = '' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -289,6 +340,8 @@ class SessionBase(object): return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) + elif self._is_destroy(name): + return lambda *params: self._destroy(name, params) else: return None @@ -299,10 +352,16 @@ class SessionBase(object): bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): + return self._is_method(name, 'create') + + def _is_destroy(self, name): + return self._is_method(name, 'destroy') + + def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and - bits[1] == 'create') + bits[1] == meth) def _getter(self, name, params): self._check_session(params) @@ -370,10 +429,9 @@ class SessionBase(object): _create_sr(cls, params) or _create_object(cls, params[1]) # Call hook to provide any fixups needed (ex. creating backrefs) - try: - globals()["after_%s_create" % cls](ref, params[1]) - except KeyError: - pass + after_hook = 'after_%s_create' % cls + if after_hook in globals(): + globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) @@ -383,6 +441,15 @@ class SessionBase(object): return ref + def _destroy(self, name, params): + self._check_session(params) + self._check_arg_count(params, 2) + table, _ = name.split('.') + ref = params[1] + if ref not in _db_content[table]: + raise Failure(['HANDLE_INVALID', table, ref]) + del _db_content[table][ref] + def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] @@ -420,7 +487,7 @@ class SessionBase(object): try: return result[0] except IndexError: - return None + raise Failure(['UUID_INVALID', v, result, recs, k]) return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index eb0393d2a..b80ff4dba 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,11 +19,14 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import os import pickle +import re import urllib from xml.dom import minidom from eventlet import event +import glance.client from nova import exception from nova import flags from nova import log as logging @@ -47,17 +50,23 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE +KERNEL_DIR = '/boot/guest' + + class ImageType: - """ - Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - """ + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 class VMHelper(HelperBase): @@ -207,6 +216,25 @@ class VMHelper(HelperBase): return vif_ref @classmethod + def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): + """Create a VDI record and returns its reference.""" + vdi_ref = session.get_xenapi().VDI.create( + {'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': []}) + LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) + return vdi_ref + + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD @@ -256,15 +284,71 @@ class VMHelper(HelperBase): def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance + Related flags: + xenapi_image_service = ['glance', 'objectstore'] + glance_address = 'address for glance services' + glance_port = 'port for glance services' """ - url = images.image_url(image) access = AuthManager().get_access_key(user, project) + + if FLAGS.xenapi_image_service == 'glance': + return cls._fetch_image_glance(session, instance_id, image, + access, type) + else: + return cls._fetch_image_objectstore(session, instance_id, image, + access, user.secret, type) + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + sr = find_sr(session) + if sr is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + + meta, image_file = c.get_image(image) + virtual_size = int(meta['size']) + vdi_size = virtual_size + LOG.debug(_("Size for image %s:%d"), image, virtual_size) + if type == ImageType.DISK: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi_size, False) + + with_vdi_attached_here(session, vdi, False, + lambda dev: + _stream_disk(dev, type, + virtual_size, image_file)) + if (type == ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's + #content into proper path + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi + #let the plugin copy the correct number of bytes + args['image-size'] = str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename = session.wait_for_task(instance_id, task) + #remove the VDI as it is not needed anymore + session.get_xenapi().VDI.destroy(vdi) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) + return filename + else: + return session.get_xenapi().VDI.get_uuid(vdi) + + @classmethod + def _fetch_image_objectstore(cls, session, instance_id, image, access, + secret, type): + url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' if type != ImageType.KERNEL_RAMDISK: @@ -276,14 +360,21 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, vdi_ref): + def lookup_image(cls, session, instance_id, vdi_ref): + if FLAGS.xenapi_image_service == 'glance': + return cls._lookup_image_glance(session, vdi_ref) + else: + return cls._lookup_image_objectstore(session, instance_id, vdi_ref) + + @classmethod + def _lookup_image_objectstore(cls, session, instance_id, vdi_ref): LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref - #TODO: Call proper function in plugin task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task) + pv_str = session.wait_for_task(instance_id, task) + pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': @@ -292,6 +383,23 @@ class VMHelper(HelperBase): return pv @classmethod + def _lookup_image_glance(cls, session, vdi_ref): + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) + + def is_vdi_pv(dev): + LOG.debug(_("Running pygrub against %s"), dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + for line in output.readlines(): + #try to find kernel string + m = re.search('(?<=kernel:)/.*(?:>)', line) + if m and m.group(0).find('xen') != -1: + LOG.debug(_("Found Xen kernel %s") % m.group(0)) + return True + LOG.debug(_("No Xen kernel found. Booting HVM.")) + return False + return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) + + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" vms = session.get_xenapi().VM.get_by_name_label(i) @@ -464,3 +572,123 @@ def get_vdi_for_vm_safely(session, vm_ref): vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec + + +def find_sr(session): + host = session.get_xenapi_host() + srs = session.get_xenapi().SR.get_all() + for sr in srs: + sr_rec = session.get_xenapi().SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.get_xenapi().PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def with_vdi_attached_here(session, vdi, read_only, f): + this_vm_ref = get_this_vm_ref(session) + vbd_rec = {} + vbd_rec['VM'] = this_vm_ref + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + LOG.debug(_('Creating VBD for VDI %s ... '), vdi) + vbd = session.get_xenapi().VBD.create(vbd_rec) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi) + try: + LOG.debug(_('Plugging VBD %s ... '), vbd) + session.get_xenapi().VBD.plug(vbd) + LOG.debug(_('Plugging VBD %s done.'), vbd) + return f(session.get_xenapi().VBD.get_device(vbd)) + finally: + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.get_xenapi().VBD.destroy, vbd) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.get_xenapi().VBD.unplug(vbd) + LOG.debug(_('VBD.unplug successful first time.')) + return + except VMHelper.XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + LOG.debug(_('VBD.unplug rejected: retrying...')) + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + LOG.debug(_('VBD.unplug successful eventually.')) + return + else: + LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) + return + + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except VMHelper.XenAPI.Failure, e: + LOG.error(_('Ignoring XenAPI.Failure %s'), e) + return None + + +def get_this_vm_uuid(): + with file('/sys/hypervisor/uuid') as f: + return f.readline().strip() + + +def get_this_vm_ref(session): + return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) + + +def _stream_disk(dev, type, virtual_size, image_file): + offset = 0 + if type == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) + for chunk in image_file: + f.write(chunk) + + +def _write_partition(virtual_size, dev): + dest = '/dev/%s' % dev + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + LOG.debug(_('Writing partition table %d %d to %s...'), + primary_first, primary_last, dest) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + execute('parted --script %s mklabel msdos' % dest) + execute('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + + LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5e414bab4..6c2fd6a68 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -85,7 +85,8 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c10f73fe7..72ec6bddb 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -89,6 +89,9 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_string('xenapi_image_service', + 'glance', + 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index cc34a1ec9..aadacce57 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -18,7 +18,7 @@ # under the License. # -# XenAPI plugin for putting images into glance +# XenAPI plugin for managing glance images # import base64 @@ -40,8 +40,36 @@ from pluginlib_nova import * configure_logging('glance') CHUNK_SIZE = 8192 +KERNEL_DIR = '/boot/guest' FILE_SR_PATH = '/var/run/sr-mount' +def copy_kernel_vdi(session,args): + vdi = exists(args, 'vdi-ref') + size = exists(args,'image-size') + #Use the uuid as a filename + vdi_uuid=session.xenapi.VDI.get_uuid(vdi) + copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} + filename=with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev,copy_args)) + return filename + +def _copy_kernel_vdi(dest,copy_args): + vdi_uuid=copy_args['vdi_uuid'] + vdi_size=copy_args['vdi_size'] + logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid) + filename=KERNEL_DIR + '/' + vdi_uuid + #read data from /dev/ and write into a file on /boot/guest + of=open(filename,'wb') + f=open(dest,'rb') + #copy only vdi_size bytes + data=f.read(vdi_size) + of.write(data) + f.close() + of.close() + logging.debug("Done. Filename: %s",filename) + return filename + def put_vdis(session, args): params = pickle.loads(exists(args, 'params')) vdi_uuids = params["vdi_uuids"] @@ -128,4 +156,5 @@ def find_sr(session): if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis}) + XenAPIPlugin.dispatch({'put_vdis': put_vdis, + 'copy_kernel_vdi': copy_kernel_vdi}) diff --git a/tools/pip-requires b/tools/pip-requires index 3aa76c24d..a6676a5e9 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -26,3 +26,4 @@ Twisted>=10.1.0 PasteDeploy paste netaddr +glance |