summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/cloud.py1
-rw-r--r--nova/api/openstack/servers.py56
-rw-r--r--nova/compute/api.py1
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/tests/glance/stubs.py40
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_xenapi.py71
-rw-r--r--nova/tests/xenapi/stubs.py6
-rw-r--r--nova/virt/xenapi/vm_utils.py201
-rw-r--r--nova/virt/xenapi/vmops.py107
-rw-r--r--nova/virt/xenapi_conn.py2
11 files changed, 410 insertions, 85 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 7458d307a..844ccbe5e 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -890,7 +890,6 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
- image = self.image_service.show(context, image_id)
image = self._format_image(context,
self.image_service.show(context,
image_id))
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 0e05a00a0..7d20f681b 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -51,7 +51,8 @@ def _translate_detail_keys(inst):
power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
- power_state.CRASHED: 'error'}
+ power_state.CRASHED: 'error',
+ power_state.FAILED: 'error'}
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
@@ -140,25 +141,6 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, req, image_id):
- """
- Machine images are associated with Kernels and Ramdisk images via
- metadata stored in Glance as 'image_properties'
- """
- def lookup(param):
- _image_id = image_id
- try:
- return image['properties'][param]
- except KeyError:
- LOG.debug(
- _("%(param)s property not found for image %(_image_id)s") %
- locals())
- return None
-
- image_id = str(image_id)
- image = self._image_service.show(req.environ['nova.context'], image_id)
- return lookup('kernel_id'), lookup('ramdisk_id')
-
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
@@ -382,3 +364,37 @@ class Controller(wsgi.Controller):
action=item.action,
error=item.error))
return dict(actions=actions)
+
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """Retrevies kernel and ramdisk IDs from Glance
+
+ Only 'machine' (ami) type use kernel and ramdisk outside of the
+ image.
+ """
+ # FIXME(sirp): Since we're retrieving the kernel_id from an
+ # image_property, this means only Glance is supported.
+ # The BaseImageService needs to expose a consistent way of accessing
+ # kernel_id and ramdisk_id
+ image = self._image_service.show(req.environ['nova.context'], image_id)
+
+ if image['status'] != 'active':
+ raise exception.Invalid(
+ _("Cannot build from image %(image_id)s, status not active") %
+ locals())
+
+ if image['type'] != 'machine':
+ return None, None
+
+ try:
+ kernel_id = image['properties']['kernel_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Kernel not found for image %(image_id)s") % locals())
+
+ try:
+ ramdisk_id = image['properties']['ramdisk_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Ramdisk not found for image %(image_id)s") % locals())
+
+ return kernel_id, ramdisk_id
diff --git a/nova/compute/api.py b/nova/compute/api.py
index c475e3bff..625778b66 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -129,6 +129,7 @@ class API(base.Base):
kernel_id = image.get('kernel_id', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdisk_id', None)
+ # FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 309313fd0..7a2a5baa3 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -45,6 +45,6 @@ def get_by_type(instance_type):
def get_by_flavor_id(flavor_id):
for instance_type, details in INSTANCE_TYPES.iteritems():
- if details['flavorid'] == flavor_id:
+ if details['flavorid'] == int(flavor_id):
return instance_type
return FLAGS.default_instance_type
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index f182b857a..3ff8d7ce5 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -26,12 +26,40 @@ def stubout_glance_client(stubs, cls):
class FakeGlance(object):
+ IMAGE_MACHINE = 1
+ IMAGE_KERNEL = 2
+ IMAGE_RAMDISK = 3
+ IMAGE_RAW = 4
+ IMAGE_VHD = 5
+
+ IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'type': 'machine'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'type': 'kernel'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'type': 'ramdisk'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'type': 'raw'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'type': 'vhd'},
+ 'image_data': StringIO.StringIO('')}}
+
def __init__(self, host, port=None, use_ssl=False):
pass
- def get_image(self, image):
- meta = {
- 'size': 0,
- }
- image_file = StringIO.StringIO('')
- return meta, image_file
+ def get_image_meta(self, image_id):
+ return self.IMAGE_FIXTURES[image_id]['image_meta']
+
+ def get_image(self, image_id):
+ image = self.IMAGE_FIXTURES[image_id]
+ return image['image_meta'], image['image_data']
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index b049ac943..949b5e6eb 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import instance_types
LOG = logging.getLogger('nova.tests.compute')
@@ -266,3 +267,10 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_get_by_flavor_id(self):
+ type = instance_types.get_by_flavor_id(1)
+ self.assertEqual(type, 'm1.tiny')
+
+ type = instance_types.get_by_flavor_id("1")
+ self.assertEqual(type, 'm1.tiny')
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 2cbe58aab..b9bb6d5b4 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -31,6 +31,7 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
@@ -284,11 +285,17 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+
+ def test_spawn_vhd_glance(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, 2, 3)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
@@ -337,3 +344,63 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
+
+
+class XenAPIDetermineDiskImageTestCase(test.TestCase):
+ """
+ Unit tests for code that detects the ImageType
+ """
+ def setUp(self):
+ super(XenAPIDetermineDiskImageTestCase, self).setUp()
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ class FakeInstance(object):
+ pass
+
+ self.fake_instance = FakeInstance()
+ self.fake_instance.id = 42
+
+ def assert_disk_type(self, disk_type):
+ dt = vm_utils.VMHelper.determine_disk_image_type(
+ self.fake_instance)
+ self.assertEqual(disk_type, dt)
+
+ def test_instance_disk(self):
+ """
+ If a kernel is specified then the image type is DISK (aka machine)
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
+ self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
+ self.assert_disk_type(vm_utils.ImageType.DISK)
+
+ def test_instance_disk_raw(self):
+ """
+ If the kernel isn't specified, and we're not using Glance, then
+ DISK_RAW is assumed.
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_raw(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'raw'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_vhd(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'vhd'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 4fec2bd75..9beab86e7 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -177,6 +177,12 @@ class FakeSessionForVMTests(fake.SessionBase):
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+ def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
+ pass
+
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 564a25057..6d9aeb060 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -24,6 +24,7 @@ import pickle
import re
import time
import urllib
+import uuid
from xml.dom import minidom
from eventlet import event
@@ -63,11 +64,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
+ 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
+ DISK_VHD = 3
class VMHelper(HelperBase):
@@ -276,29 +280,35 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
- return template_vm_ref, [template_vdi_uuid, parent_uuid]
+ template_vdi_uuids = {'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
@classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port}
+ 'glance_port': FLAGS.glance_port,
+ 'sr_path': get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
- task = session.async_call_plugin('glance', 'put_vdis', kwargs)
+ task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(instance_id, task)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project, type):
+ def fetch_image(cls, session, instance_id, image, user, project,
+ image_type):
"""
- type is interpreted as an ImageType instance
+ image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@@ -308,35 +318,80 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
- access, type)
+ access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret, type)
+ access, user.secret,
+ image_type)
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access, type):
- sr = find_sr(session)
- if sr is None:
- raise exception.NotFound('Cannot find SR to write VDI to')
+ def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ image_type):
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
+ % locals())
+
+ sr_ref = safe_find_sr(session)
+
+ # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
+ # have the `uuid` module. To work around this, we generate the uuids
+ # here (under Python 2.6+) and pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
- c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ params = {'image_id': image,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': get_sr_path(session)}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'download_vhd', kwargs)
+ vdi_uuid = session.wait_for_task(instance_id, task)
- meta, image_file = c.get_image(image)
+ scan_sr(session, instance_id, sr_ref)
+
+ # Set the name-label to ease debugging
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ name_label = get_name_label_for_image(image)
+ session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
+
+ LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
+ % locals())
+ return vdi_uuid
+
+ @classmethod
+ def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ image_type):
+ """Fetch the image from Glance
+
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
+
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ sr_ref = safe_find_sr(session)
+
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
- if type == ImageType.DISK:
+
+ if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
- vdi_size, False)
+ name_label = get_name_label_for_image(image)
+ vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
with_vdi_attached_here(session, vdi, False,
lambda dev:
- _stream_disk(dev, type,
+ _stream_disk(dev, image_type,
virtual_size, image_file))
- if (type == ImageType.KERNEL_RAMDISK):
+ if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
@@ -355,20 +410,87 @@ class VMHelper(HelperBase):
return session.get_xenapi().VDI.get_uuid(vdi)
@classmethod
+ def determine_disk_image_type(cls, instance):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ def log_disk_format(image_type):
+ pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ ImageType.DISK: 'DISK',
+ ImageType.DISK_RAW: 'DISK_RAW',
+ ImageType.DISK_VHD: 'DISK_VHD'}
+ disk_format = pretty_format[image_type]
+ image_id = instance.image_id
+ instance_id = instance.id
+ LOG.debug(_("Detected %(disk_format)s format for image "
+ "%(image_id)s, instance %(instance_id)s") % locals())
+
+ def determine_from_glance():
+ glance_type2nova_type = {'machine': ImageType.DISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD,
+ 'kernel': ImageType.KERNEL_RAMDISK,
+ 'ramdisk': ImageType.KERNEL_RAMDISK}
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta = client.get_image_meta(instance.image_id)
+ type_ = meta['type']
+ try:
+ return glance_type2nova_type[type_]
+ except KeyError:
+ raise exception.NotFound(
+ _("Unrecognized image type '%(type_)s'") % locals())
+
+ def determine_from_instance():
+ if instance.kernel_id:
+ return ImageType.DISK
+ else:
+ return ImageType.DISK_RAW
+
+ # FIXME(sirp): can we unify the ImageService and xenapi_image_service
+ # abstractions?
+ if FLAGS.xenapi_image_service == 'glance':
+ image_type = determine_from_glance()
+ else:
+ image_type = determine_from_instance()
+
+ log_disk_format(image_type)
+ return image_type
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access,
+ image_type):
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, access, image_type)
+ else:
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, access, image_type)
+
+ @classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, type):
+ secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
+ if image_type == ImageType.KERNEL_RAMDISK:
+ fn = 'get_kernel'
+ else:
+ fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if type != ImageType.KERNEL_RAMDISK:
+ if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
- if type == ImageType.DISK_RAW:
+ if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
uuid = session.wait_for_task(instance_id, task)
@@ -376,6 +498,9 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, instance_id, vdi_ref):
+ """
+ Determine if VDI is using a PV kernel
+ """
if FLAGS.xenapi_image_service == 'glance':
return cls._lookup_image_glance(session, vdi_ref)
else:
@@ -587,7 +712,18 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ return sr_ref
+
+
def find_sr(session):
+ """Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
srs = session.get_xenapi().SR.get_all()
for sr in srs:
@@ -602,6 +738,18 @@ def find_sr(session):
return None
+def get_sr_path(session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+
+
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
@@ -715,9 +863,9 @@ def _is_vdi_pv(dev):
return False
-def _stream_disk(dev, type, virtual_size, image_file):
+def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
- if type == ImageType.DISK:
+ if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@@ -746,3 +894,8 @@ def _write_partition(virtual_size, dev):
(dest, primary_first, primary_last))
LOG.debug(_('Writing partition table %s done.'), dest)
+
+
+def get_name_label_for_image(image):
+ # TODO(sirp): This should eventually be the URI for the Glance image
+ return _('Glance image %s') % image
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 2aa0dde70..bc39aa140 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -80,27 +80,33 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- #if kernel is not present we must download a raw disk
- if instance.kernel_id:
- disk_image_type = ImageType.DISK
- else:
- disk_image_type = ImageType.DISK_RAW
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+
vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
+
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- #Have a look at the VDI and see if it has a PV kernel
+
pv_kernel = False
- if not instance.kernel_id:
+ if disk_image_type == ImageType.DISK_RAW:
+ #Have a look at the VDI and see if it has a PV kernel
pv_kernel = VMHelper.lookup_image(self._session, instance.id,
vdi_ref)
+ elif disk_image_type == ImageType.DISK_VHD:
+ # TODO(sirp): Assuming PV for now; this will need to be
+ # configurable as Windows will use HVM.
+ pv_kernel = True
+
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
+
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
@@ -239,7 +245,8 @@ class VMOps(object):
VMHelper.upload_image(
self._session, instance.id, template_vdi_uuids, image_id)
finally:
- self._destroy(instance, template_vm_ref, shutdown=False)
+ self._destroy(instance, template_vm_ref, shutdown=False,
+ destroy_kernel_ramdisk=False)
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
@@ -321,6 +328,9 @@ class VMOps(object):
locals())
return
+ instance_id = instance.id
+ LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
+ % locals())
try:
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
@@ -329,6 +339,9 @@ class VMOps(object):
def _destroy_vdis(self, instance, vm):
"""Destroys all VDIs associated with a VM """
+ instance_id = instance.id
+ LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
+ % locals())
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
if not vdis:
@@ -341,29 +354,56 @@ class VMOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _destroy_kernel_ramdisk(self, instance, vm):
+ """
+ Three situations can occur:
+
+ 1. We have neither a ramdisk nor a kernel, in which case we are a
+ RAW image and can omit this step
+
+ 2. We have one or the other, in which case, we should flag as an
+ error
+
+ 3. We have both, in which case we safely remove both the kernel
+ and the ramdisk.
+ """
+ instance_id = instance.id
+ if not instance.kernel_id and not instance.ramdisk_id:
+ # 1. No kernel or ramdisk
+ LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
+ "skipping kernel and ramdisk deletion") % locals())
+ return
+
+ if not (instance.kernel_id and instance.ramdisk_id):
+ # 2. We only have kernel xor ramdisk
+ raise exception.NotFound(
+ _("Instance %(instance_id)s has a kernel or ramdisk but not "
+ "both" % locals()))
+
+ # 3. We have both kernel and ramdisk
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
+ self._session, vm)
+
+ LOG.debug(_("Removing kernel/ramdisk files"))
+
+ args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(instance.id, task)
+
+ LOG.debug(_("kernel/ramdisk files removed"))
+
def _destroy_vm(self, instance, vm):
"""Destroys a VM record """
+ instance_id = instance.id
try:
- kernel = None
- ramdisk = None
- if instance.kernel_id or instance.ramdisk_id:
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
- task1 = self._session.call_xenapi('Async.VM.destroy', vm)
- LOG.debug(_("Removing kernel/ramdisk files"))
- fn = "remove_kernel_ramdisk"
- args = {}
- if kernel:
- args['kernel-file'] = kernel
- if ramdisk:
- args['ramdisk-file'] = ramdisk
- task2 = self._session.async_call_plugin('glance', fn, args)
- self._session.wait_for_task(instance.id, task1)
- self._session.wait_for_task(instance.id, task2)
- LOG.debug(_("kernel/ramdisk files removed"))
+ task = self._session.call_xenapi('Async.VM.destroy', vm)
+ self._session.wait_for_task(instance_id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+
def destroy(self, instance):
"""
Destroy VM instance
@@ -371,26 +411,31 @@ class VMOps(object):
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
+ instance_id = instance.id
+ LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
vm = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True):
+ def _destroy(self, instance, vm, shutdown=True,
+ destroy_kernel_ramdisk=True):
"""
Destroys VM instance by performing:
- 1. A shutdown if requested
- 2. Destroying associated VDIs
- 3. Destroying that actual VM record
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying kernel and ramdisk files (if necessary)
+ 4. Destroying that actual VM record
"""
if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
self._shutdown(instance, vm)
self._destroy_vdis(instance, vm)
+ if destroy_kernel_ramdisk:
+ self._destroy_kernel_ramdisk(instance, vm)
self._destroy_vm(instance, vm)
def _wait_with_callback(self, instance_id, task, callback):
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index fc56a4bae..60bfe0a9f 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts',
5,
'Max number of times to poll for VHD to coalesce.'
' Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
+ 'Base path to the storage repository')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')