summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorHisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>2011-01-21 20:04:02 +0900
committerHisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>2011-01-21 20:04:02 +0900
commitd55e281efef06dbbcfec9ef4aad4ed0bac9a9368 (patch)
tree4ae24944e609ae20092e8e6a2219b6da963de4c4 /nova/virt
parent3294d3f98cb78b169656711c73547e1cf0527432 (diff)
parent14edbd55e667b16b8d46c0230b11ccd964f5742f (diff)
downloadnova-d55e281efef06dbbcfec9ef4aad4ed0bac9a9368.tar.gz
nova-d55e281efef06dbbcfec9ef4aad4ed0bac9a9368.tar.xz
nova-d55e281efef06dbbcfec9ef4aad4ed0bac9a9368.zip
Merged with rev597
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/fake.py48
-rw-r--r--nova/virt/libvirt.xml.template10
-rw-r--r--nova/virt/libvirt_conn.py25
-rw-r--r--nova/virt/xenapi/fake.py87
-rw-r--r--nova/virt/xenapi/vm_utils.py256
-rw-r--r--nova/virt/xenapi/vmops.py3
-rw-r--r--nova/virt/xenapi_conn.py3
7 files changed, 392 insertions, 40 deletions
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index a57a8f43b..f8b3c7807 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -310,6 +310,54 @@ class FakeConnection(object):
'username': 'fakeuser',
'password': 'fakepassword'}
+ def refresh_security_group_rules(self, security_group_id):
+ """This method is called after a change to security groups.
+
+ All security groups and their associated rules live in the datastore,
+ and calling this method should apply the updated rules to instances
+ running the specified security group.
+
+ An error should be raised if the operation cannot complete.
+
+ """
+ return True
+
+ def refresh_security_group_members(self, security_group_id):
+ """This method is called when a security group is added to an instance.
+
+ This message is sent to the virtualization drivers on hosts that are
+ running an instance that belongs to a security group that has a rule
+ that references the security group identified by `security_group_id`.
+ It is the responsiblity of this method to make sure any rules
+ that authorize traffic flow with members of the security group are
+ updated and any new members can communicate, and any removed members
+ cannot.
+
+ Scenario:
+ * we are running on host 'H0' and we have an instance 'i-0'.
+ * instance 'i-0' is a member of security group 'speaks-b'
+ * group 'speaks-b' has an ingress rule that authorizes group 'b'
+ * another host 'H1' runs an instance 'i-1'
+ * instance 'i-1' is a member of security group 'b'
+
+ When 'i-1' launches or terminates we will recieve the message
+ to update members of group 'b', at which time we will make
+ any changes needed to the rules for instance 'i-0' to allow
+ or deny traffic coming from 'i-1', depending on if it is being
+ added or removed from the group.
+
+ In this scenario, 'i-1' could just as easily have been running on our
+ host 'H0' and this method would still have been called. The point was
+ that this method isn't called on the host where instances of that
+ group are running (as is the case with
+ :method:`refresh_security_group_rules`) but is called where references
+ are made to authorizing those instances.
+
+ An error should be raised if the operation cannot complete.
+
+ """
+ return True
+
class FakeInstance(object):
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 3ec82e403..88bfbc668 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -18,10 +18,10 @@
#set $disk_prefix = 'vd'
#set $disk_bus = 'virtio'
<type>hvm</type>
- #end if
+ #end if
#if $getVar('rescue', False)
- <kernel>${basepath}/rescue-kernel</kernel>
- <initrd>${basepath}/rescue-ramdisk</initrd>
+ <kernel>${basepath}/kernel.rescue</kernel>
+ <initrd>${basepath}/ramdisk.rescue</initrd>
#else
#if $getVar('kernel', None)
<kernel>${kernel}</kernel>
@@ -47,7 +47,7 @@
#if $getVar('rescue', False)
<disk type='file'>
<driver type='${driver_type}'/>
- <source file='${basepath}/rescue-disk'/>
+ <source file='${basepath}/disk.rescue'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
<disk type='file'>
@@ -64,7 +64,7 @@
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
- <source file='${basepath}/local'/>
+ <source file='${basepath}/disk.local'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 55c193e20..8ad83731f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -350,7 +350,7 @@ class LibvirtConnection(object):
rescue_images = {'image_id': FLAGS.rescue_image_id,
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
- self._create_image(instance, xml, 'rescue-', rescue_images)
+ self._create_image(instance, xml, '.rescue', rescue_images)
self._conn.createXML(xml, 0)
timer = utils.LoopingCall(f=None)
@@ -532,23 +532,23 @@ class LibvirtConnection(object):
utils.execute('truncate %s -s %dG' % (target, local_gb))
# TODO(vish): should we format disk by default?
- def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
+ def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
# syntactic nicety
- def basepath(fname='', prefix=prefix):
+ def basepath(fname='', suffix=suffix):
return os.path.join(FLAGS.instances_path,
inst['name'],
- prefix + fname)
+ fname + suffix)
# ensure directories exist and are writable
- utils.execute('mkdir -p %s' % basepath(prefix=''))
- utils.execute('chmod 0777 %s' % basepath(prefix=''))
+ utils.execute('mkdir -p %s' % basepath(suffix=''))
+ utils.execute('chmod 0777 %s' % basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
- # NOTE(vish): No need add the prefix to console.log
+ # NOTE(vish): No need add the suffix to console.log
os.close(os.open(basepath('console.log', ''),
os.O_CREAT | os.O_WRONLY, 0660))
@@ -577,7 +577,7 @@ class LibvirtConnection(object):
root_fname = disk_images['image_id']
size = FLAGS.minimum_root_size
- if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
+ if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
size = None
root_fname += "_sm"
@@ -593,7 +593,7 @@ class LibvirtConnection(object):
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
- target=basepath('local'),
+ target=basepath('disk.local'),
fname="local_%s" % type_data['local_gb'],
cow=FLAGS.use_cow_images,
local_gb=type_data['local_gb'])
@@ -733,7 +733,8 @@ class LibvirtConnection(object):
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
- raise exception.APIError("diagnostics are not supported for libvirt")
+ raise exception.APIError(_("diagnostics are not supported "
+ "for libvirt"))
def get_disks(self, instance_name):
"""
@@ -1133,6 +1134,10 @@ class NWFilterFirewall(FirewallDriver):
return
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 96d8f5fc8..4bfaf4b57 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -76,6 +76,7 @@ def reset():
for c in _CLASSES:
_db_content[c] = {}
create_host('fake')
+ create_vm('fake', 'Running', is_a_template=False, is_control_domain=True)
def create_host(name_label):
@@ -136,14 +137,21 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
def create_vbd(vm_ref, vdi_ref):
- vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref}
+ vbd_rec = {
+ 'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'currently_attached': False,
+ }
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
- """Create backref from VM to VBD when VBD is created"""
+ """Create read-only fields and backref from VM to VBD when VBD is
+ created."""
+ vbd_rec['currently_attached'] = False
+ vbd_rec['device'] = ''
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'] = [vbd_ref]
@@ -152,9 +160,10 @@ def after_VBD_create(vbd_ref, vbd_rec):
vbd_rec['vm_name_label'] = vm_name_label
-def create_pbd(config, sr_ref, attached):
+def create_pbd(config, host_ref, sr_ref, attached):
return _create_object('PBD', {
'device-config': config,
+ 'host': host_ref,
'SR': sr_ref,
'currently-attached': attached,
})
@@ -167,6 +176,33 @@ def create_task(name_label):
})
+def create_local_srs():
+ """Create an SR that looks like the one created on the local disk by
+ default by the XenServer installer. Do this one per host."""
+ for host_ref in _db_content['host'].keys():
+ _create_local_sr(host_ref)
+
+
+def _create_local_sr(host_ref):
+ sr_ref = _create_object('SR', {
+ 'name_label': 'Local storage',
+ 'type': 'lvm',
+ 'content_type': 'user',
+ 'shared': False,
+ 'physical_size': str(1 << 30),
+ 'physical_utilisation': str(0),
+ 'virtual_allocation': str(0),
+ 'other_config': {
+ 'i18n-original-value-name_label': 'Local storage',
+ 'i18n-key': 'local-storage',
+ },
+ 'VDIs': []
+ })
+ pbd_ref = create_pbd('', host_ref, sr_ref, True)
+ _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
+ return sr_ref
+
+
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
@@ -179,9 +215,10 @@ def _create_sr(table, obj):
# Forces fake to support iscsi only
if sr_type != 'iscsi':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
+ host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
vdi_ref = create_vdi('', False, sr_ref, False)
- pbd_ref = create_pbd('', sr_ref, True)
+ pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
@@ -233,6 +270,20 @@ class SessionBase(object):
def __init__(self, uri):
self._session = None
+ def VBD_plug(self, _1, ref):
+ rec = get_record('VBD', ref)
+ if rec['currently_attached']:
+ raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
+ rec['currently_attached'] = True
+ rec['device'] = rec['userdevice']
+
+ def VBD_unplug(self, _1, ref):
+ rec = get_record('VBD', ref)
+ if not rec['currently_attached']:
+ raise Failure(['DEVICE_ALREADY_DETACHED', ref])
+ rec['currently_attached'] = False
+ rec['device'] = ''
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
@@ -289,6 +340,8 @@ class SessionBase(object):
return lambda *params: self._getter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
+ elif self._is_destroy(name):
+ return lambda *params: self._destroy(name, params)
else:
return None
@@ -299,10 +352,16 @@ class SessionBase(object):
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
+ return self._is_method(name, 'create')
+
+ def _is_destroy(self, name):
+ return self._is_method(name, 'destroy')
+
+ def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
- bits[1] == 'create')
+ bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
@@ -370,10 +429,9 @@ class SessionBase(object):
_create_sr(cls, params) or _create_object(cls, params[1])
# Call hook to provide any fixups needed (ex. creating backrefs)
- try:
- globals()["after_%s_create" % cls](ref, params[1])
- except KeyError:
- pass
+ after_hook = 'after_%s_create' % cls
+ if after_hook in globals():
+ globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
@@ -383,6 +441,15 @@ class SessionBase(object):
return ref
+ def _destroy(self, name, params):
+ self._check_session(params)
+ self._check_arg_count(params, 2)
+ table, _ = name.split('.')
+ ref = params[1]
+ if ref not in _db_content[table]:
+ raise Failure(['HANDLE_INVALID', table, ref])
+ del _db_content[table][ref]
+
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
@@ -420,7 +487,7 @@ class SessionBase(object):
try:
return result[0]
except IndexError:
- return None
+ raise Failure(['UUID_INVALID', v, result, recs, k])
return result
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index eb0393d2a..b80ff4dba 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -19,11 +19,14 @@ Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
+import os
import pickle
+import re
import urllib
from xml.dom import minidom
from eventlet import event
+import glance.client
from nova import exception
from nova import flags
from nova import log as logging
@@ -47,17 +50,23 @@ XENAPI_POWER_STATE = {
'Crashed': power_state.CRASHED}
+SECTOR_SIZE = 512
+MBR_SIZE_SECTORS = 63
+MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
+KERNEL_DIR = '/boot/guest'
+
+
class ImageType:
- """
- Enumeration class for distinguishing different image types
- 0 - kernel/ramdisk image (goes on dom0's filesystem)
- 1 - disk image (local SR, partitioned by objectstore plugin)
- 2 - raw disk image (local SR, NOT partitioned by plugin)
- """
+ """
+ Enumeration class for distinguishing different image types
+ 0 - kernel/ramdisk image (goes on dom0's filesystem)
+ 1 - disk image (local SR, partitioned by objectstore plugin)
+ 2 - raw disk image (local SR, NOT partitioned by plugin)
+ """
- KERNEL_RAMDISK = 0
- DISK = 1
- DISK_RAW = 2
+ KERNEL_RAMDISK = 0
+ DISK = 1
+ DISK_RAW = 2
class VMHelper(HelperBase):
@@ -207,6 +216,25 @@ class VMHelper(HelperBase):
return vif_ref
@classmethod
+ def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only):
+ """Create a VDI record and returns its reference."""
+ vdi_ref = session.get_xenapi().VDI.create(
+ {'name_label': name_label,
+ 'name_description': '',
+ 'SR': sr_ref,
+ 'virtual_size': str(virtual_size),
+ 'type': 'User',
+ 'sharable': False,
+ 'read_only': read_only,
+ 'xenstore_data': {},
+ 'other_config': {},
+ 'sm_config': {},
+ 'tags': []})
+ LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
+ name_label, virtual_size, read_only, sr_ref)
+ return vdi_ref
+
+ @classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
""" Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
@@ -256,15 +284,71 @@ class VMHelper(HelperBase):
def fetch_image(cls, session, instance_id, image, user, project, type):
"""
type is interpreted as an ImageType instance
+ Related flags:
+ xenapi_image_service = ['glance', 'objectstore']
+ glance_address = 'address for glance services'
+ glance_port = 'port for glance services'
"""
- url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
+
+ if FLAGS.xenapi_image_service == 'glance':
+ return cls._fetch_image_glance(session, instance_id, image,
+ access, type)
+ else:
+ return cls._fetch_image_objectstore(session, instance_id, image,
+ access, user.secret, type)
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access, type):
+ sr = find_sr(session)
+ if sr is None:
+ raise exception.NotFound('Cannot find SR to write VDI to')
+
+ c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+
+ meta, image_file = c.get_image(image)
+ virtual_size = int(meta['size'])
+ vdi_size = virtual_size
+ LOG.debug(_("Size for image %s:%d"), image, virtual_size)
+ if type == ImageType.DISK:
+ # Make room for MBR.
+ vdi_size += MBR_SIZE_BYTES
+
+ vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
+ vdi_size, False)
+
+ with_vdi_attached_here(session, vdi, False,
+ lambda dev:
+ _stream_disk(dev, type,
+ virtual_size, image_file))
+ if (type == ImageType.KERNEL_RAMDISK):
+ #we need to invoke a plugin for copying VDI's
+ #content into proper path
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
+ fn = "copy_kernel_vdi"
+ args = {}
+ args['vdi-ref'] = vdi
+ #let the plugin copy the correct number of bytes
+ args['image-size'] = str(vdi_size)
+ task = session.async_call_plugin('glance', fn, args)
+ filename = session.wait_for_task(instance_id, task)
+ #remove the VDI as it is not needed anymore
+ session.get_xenapi().VDI.destroy(vdi)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
+ return filename
+ else:
+ return session.get_xenapi().VDI.get_uuid(vdi)
+
+ @classmethod
+ def _fetch_image_objectstore(cls, session, instance_id, image, access,
+ secret, type):
+ url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
args['username'] = access
- args['password'] = user.secret
+ args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
if type != ImageType.KERNEL_RAMDISK:
@@ -276,14 +360,21 @@ class VMHelper(HelperBase):
return uuid
@classmethod
- def lookup_image(cls, session, vdi_ref):
+ def lookup_image(cls, session, instance_id, vdi_ref):
+ if FLAGS.xenapi_image_service == 'glance':
+ return cls._lookup_image_glance(session, vdi_ref)
+ else:
+ return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
+
+ @classmethod
+ def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
- #TODO: Call proper function in plugin
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(task)
+ pv_str = session.wait_for_task(instance_id, task)
+ pv = None
if pv_str.lower() == 'true':
pv = True
elif pv_str.lower() == 'false':
@@ -292,6 +383,23 @@ class VMHelper(HelperBase):
return pv
@classmethod
+ def _lookup_image_glance(cls, session, vdi_ref):
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
+
+ def is_vdi_pv(dev):
+ LOG.debug(_("Running pygrub against %s"), dev)
+ output = os.popen('pygrub -qn /dev/%s' % dev)
+ for line in output.readlines():
+ #try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ return False
+ return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
+
+ @classmethod
def lookup(cls, session, i):
"""Look the instance i up, and returns it if available"""
vms = session.get_xenapi().VM.get_by_name_label(i)
@@ -464,3 +572,123 @@ def get_vdi_for_vm_safely(session, vm_ref):
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, vdi_rec
+
+
+def find_sr(session):
+ host = session.get_xenapi_host()
+ srs = session.get_xenapi().SR.get_all()
+ for sr in srs:
+ sr_rec = session.get_xenapi().SR.get_record(sr)
+ if not ('i18n-key' in sr_rec['other_config'] and
+ sr_rec['other_config']['i18n-key'] == 'local-storage'):
+ continue
+ for pbd in sr_rec['PBDs']:
+ pbd_rec = session.get_xenapi().PBD.get_record(pbd)
+ if pbd_rec['host'] == host:
+ return sr
+ return None
+
+
+def with_vdi_attached_here(session, vdi, read_only, f):
+ this_vm_ref = get_this_vm_ref(session)
+ vbd_rec = {}
+ vbd_rec['VM'] = this_vm_ref
+ vbd_rec['VDI'] = vdi
+ vbd_rec['userdevice'] = 'autodetect'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = read_only and 'RO' or 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
+ vbd = session.get_xenapi().VBD.create(vbd_rec)
+ LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
+ try:
+ LOG.debug(_('Plugging VBD %s ... '), vbd)
+ session.get_xenapi().VBD.plug(vbd)
+ LOG.debug(_('Plugging VBD %s done.'), vbd)
+ return f(session.get_xenapi().VBD.get_device(vbd))
+ finally:
+ LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
+ vbd_unplug_with_retry(session, vbd)
+ ignore_failure(session.get_xenapi().VBD.destroy, vbd)
+ LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
+
+
+def vbd_unplug_with_retry(session, vbd):
+ """Call VBD.unplug on the given VBD, with a retry if we get
+ DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
+ seeing the device still in use, even when all processes using the device
+ should be dead."""
+ while True:
+ try:
+ session.get_xenapi().VBD.unplug(vbd)
+ LOG.debug(_('VBD.unplug successful first time.'))
+ return
+ except VMHelper.XenAPI.Failure, e:
+ if (len(e.details) > 0 and
+ e.details[0] == 'DEVICE_DETACH_REJECTED'):
+ LOG.debug(_('VBD.unplug rejected: retrying...'))
+ time.sleep(1)
+ elif (len(e.details) > 0 and
+ e.details[0] == 'DEVICE_ALREADY_DETACHED'):
+ LOG.debug(_('VBD.unplug successful eventually.'))
+ return
+ else:
+ LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
+ e)
+ return
+
+
+def ignore_failure(func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except VMHelper.XenAPI.Failure, e:
+ LOG.error(_('Ignoring XenAPI.Failure %s'), e)
+ return None
+
+
+def get_this_vm_uuid():
+ with file('/sys/hypervisor/uuid') as f:
+ return f.readline().strip()
+
+
+def get_this_vm_ref(session):
+ return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
+
+
+def _stream_disk(dev, type, virtual_size, image_file):
+ offset = 0
+ if type == ImageType.DISK:
+ offset = MBR_SIZE_BYTES
+ _write_partition(virtual_size, dev)
+
+ with open('/dev/%s' % dev, 'wb') as f:
+ f.seek(offset)
+ for chunk in image_file:
+ f.write(chunk)
+
+
+def _write_partition(virtual_size, dev):
+ dest = '/dev/%s' % dev
+ mbr_last = MBR_SIZE_SECTORS - 1
+ primary_first = MBR_SIZE_SECTORS
+ primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
+
+ LOG.debug(_('Writing partition table %d %d to %s...'),
+ primary_first, primary_last, dest)
+
+ def execute(cmd, process_input=None, check_exit_code=True):
+ return utils.execute(cmd=cmd,
+ process_input=process_input,
+ check_exit_code=check_exit_code)
+
+ execute('parted --script %s mklabel msdos' % dest)
+ execute('parted --script %s mkpart primary %ds %ds' %
+ (dest, primary_first, primary_last))
+
+ LOG.debug(_('Writing partition table %s done.'), dest)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 5e414bab4..6c2fd6a68 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -85,7 +85,8 @@ class VMOps(object):
#Have a look at the VDI and see if it has a PV kernel
pv_kernel = False
if not instance.kernel_id:
- pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
+ pv_kernel = VMHelper.lookup_image(self._session, instance.id,
+ vdi_ref)
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c98310dbc..c57c883c9 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -89,6 +89,9 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
+flags.DEFINE_string('xenapi_image_service',
+ 'glance',
+ 'Where to get VM images: glance or objectstore.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
'The interval used for polling of coalescing vhds.'