summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/api.py8
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py51
-rw-r--r--nova/db/sqlalchemy/models.py2
-rw-r--r--nova/tests/db/fakes.py1
-rw-r--r--nova/tests/test_xenapi.py98
-rw-r--r--nova/virt/xenapi/vm_utils.py163
-rw-r--r--nova/virt/xenapi/vmops.py21
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance11
8 files changed, 276 insertions, 79 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 301f9ee18..62de27b73 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -125,6 +125,11 @@ class API(base.Base):
raise quota.QuotaError(msg, "MetadataLimitExceeded")
image = self.image_service.show(context, image_id)
+
+ os_type = None
+ if 'properties' in image and 'os_type' in image['properties']:
+ os_type = image['properties']['os_type']
+
if kernel_id is None:
kernel_id = image['properties'].get('kernel_id', None)
if ramdisk_id is None:
@@ -180,7 +185,8 @@ class API(base.Base):
'key_data': key_data,
'locked': False,
'metadata': metadata,
- 'availability_zone': availability_zone}
+ 'availability_zone': availability_zone,
+ 'os_type': os_type}
elevated = context.elevated()
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
new file mode 100644
index 000000000..eb3066894
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from sqlalchemy.sql import text
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+instances_os_type = Column('os_type',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances.create_column(instances_os_type)
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.os_type == None)\
+ .values(os_type='linux'))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances.drop_column('os_type')
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 6ef284e65..6bf058327 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -193,6 +193,8 @@ class Instance(BASE, NovaBase):
locked = Column(Boolean)
+ os_type = Column(String(255))
+
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
# vmstate_state = running, halted, suspended, paused
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index d760dc456..142f6b1c6 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -77,6 +77,7 @@ def stub_out_db_instance_api(stubs):
'mac_address': values['mac_address'],
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
+ 'os_type': values['os_type']
}
return FakeModel(base_options)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index c26dc8639..cd125a301 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -18,6 +18,7 @@
Test suite for XenAPI
"""
+import functools
import stubout
from nova import db
@@ -41,6 +42,21 @@ from nova.tests.glance import stubs as glance_stubs
FLAGS = flags.FLAGS
+def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
+ """
+ vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
+ """
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
+ vm_utils.with_vdi_attached_here = lambda *x: should_return
+ function(self, *args, **kwargs)
+ vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
+ return decorated_function
+
+
class XenAPIVolumeTestCase(test.TestCase):
"""
Unit tests for Volume operations
@@ -62,6 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'
}
def _create_volume(self, size='0'):
@@ -219,7 +236,7 @@ class XenAPIVMTestCase(test.TestCase):
check()
- def check_vm_record(self, conn):
+ def create_vm_record(self, conn, os_type):
instances = conn.list_instances()
self.assertEquals(instances, [1])
@@ -231,28 +248,63 @@ class XenAPIVMTestCase(test.TestCase):
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+ def check_vm_record(self, conn):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm['memory_static_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
- self.assertEquals(vm['VCPUs_max'], str(vcpus))
- self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
+ self.assertEquals(self.vm_info['max_mem'], mem_kib)
+ self.assertEquals(self.vm_info['mem'], mem_kib)
+ self.assertEquals(self.vm['memory_static_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
+ self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
- self.assertEquals(vm['power_state'], 'Running')
+ self.assertEquals(self.vm['power_state'], 'Running')
+
+ def check_vm_params_for_windows(self):
+ self.assertEquals(self.vm['platform']['nx'], 'true')
+ self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_args'], '')
+ self.assertEquals(self.vm['PV_bootloader'], '')
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
+ self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEquals(self.vm['PV_kernel'], '')
+ self.assertNotEquals(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
- instance_type="m1.large"):
+ instance_type="m1.large", os_type="linux"):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1,
'id': 1,
@@ -263,10 +315,12 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': os_type
}
conn = xenapi_conn.get_connection(False)
instance = db.instance_create(values)
conn.spawn(instance)
+ self.create_vm_record(conn, os_type)
self.check_vm_record(conn)
def test_spawn_not_enough_memory(self):
@@ -283,24 +337,37 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, 2, 3)
+ @stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_linux(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="linux")
+ self.check_vm_params_for_linux()
- def test_spawn_vhd_glance(self):
+ def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="windows")
+ self.check_vm_params_for_windows()
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ self.vm_info = None
+ self.vm = None
self.stubs.UnsetAll()
def _create_instance(self):
@@ -314,7 +381,8 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff'}
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance
@@ -372,6 +440,7 @@ class XenAPIMigrateInstance(test.TestCase):
'ramdisk_id': None,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'
}
stubs.stub_out_migration_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
@@ -410,6 +479,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
+ self.fake_instance.os_type = 'linux'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 4e6c71446..a1b85284f 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -80,10 +80,19 @@ class VMHelper(HelperBase):
"""
@classmethod
- def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
+ def create_vm(cls, session, instance, kernel, ramdisk,
+ use_pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
VM reference.
- the pv_kernel flag indicates whether the guest is HVM or PV
+ the use_pv_kernel flag indicates whether the guest is HVM or PV
+
+ There are 3 scenarios:
+
+ 1. Using paravirtualization, kernel passed in
+
+ 2. Using paravirtualization, kernel within the image
+
+ 3. Using hardware virtualization
"""
instance_type = instance_types.\
@@ -91,52 +100,62 @@ class VMHelper(HelperBase):
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
- 'name_label': instance.name,
- 'name_description': '',
+ 'actions_after_crash': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_shutdown': 'destroy',
+ 'affinity': '',
+ 'blocked_operations': {},
+ 'ha_always_run': False,
+ 'ha_restart_priority': '',
+ 'HVM_boot_params': {},
+ 'HVM_boot_policy': '',
'is_a_template': False,
- 'memory_static_min': '0',
- 'memory_static_max': mem,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
- 'VCPUs_at_startup': vcpus,
- 'VCPUs_max': vcpus,
- 'VCPUs_params': {},
- 'actions_after_shutdown': 'destroy',
- 'actions_after_reboot': 'restart',
- 'actions_after_crash': 'destroy',
- 'PV_bootloader': '',
- 'PV_kernel': '',
- 'PV_ramdisk': '',
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_target': mem,
+ 'name_description': '',
+ 'name_label': instance.name,
+ 'other_config': {'allowvssprovider': False},
+ 'other_config': {},
+ 'PCI_bus': '',
+ 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
+ 'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
+ 'PV_bootloader': '',
'PV_bootloader_args': '',
+ 'PV_kernel': '',
'PV_legacy_args': '',
- 'HVM_boot_policy': '',
- 'HVM_boot_params': {},
- 'platform': {},
- 'PCI_bus': '',
+ 'PV_ramdisk': '',
'recommendations': '',
- 'affinity': '',
+ 'tags': [],
'user_version': '0',
- 'other_config': {},
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'xenstore_data': {}
}
- #Complete VM configuration record according to the image type
- #non-raw/raw with PV kernel/raw in HVM mode
- if instance.kernel_id:
- rec['PV_bootloader'] = ''
- rec['PV_kernel'] = kernel
- rec['PV_ramdisk'] = ramdisk
- rec['PV_args'] = 'root=/dev/xvda1'
- rec['PV_bootloader_args'] = ''
- rec['PV_legacy_args'] = ''
- else:
- if pv_kernel:
- rec['PV_args'] = 'noninteractive'
- rec['PV_bootloader'] = 'pygrub'
+
+ # Complete VM configuration record according to the image type
+ # non-raw/raw with PV kernel/raw in HVM mode
+ if use_pv_kernel:
+ rec['platform']['nx'] = 'false'
+ if instance.kernel_id:
+ # 1. Kernel explicitly passed in, use that
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
else:
- rec['HVM_boot_policy'] = 'BIOS order'
- rec['HVM_boot_params'] = {'order': 'dc'}
- rec['platform'] = {'acpi': 'true', 'apic': 'true',
- 'pae': 'true', 'viridian': 'true'}
+ # 2. Use kernel within the image
+ rec['PV_args'] = 'clocksource=jiffies'
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ # 3. Using hardware virtualization
+ rec['platform']['nx'] = 'true'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['HVM_boot_policy'] = 'BIOS order'
+
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
instance_name = instance.name
@@ -319,7 +338,7 @@ class VMHelper(HelperBase):
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
- def upload_image(cls, session, instance_id, vdi_uuids, image_id):
+ def upload_image(cls, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
@@ -328,15 +347,18 @@ class VMHelper(HelperBase):
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
+ os_type = instance.os_type and instance.os_type or 'linux'
+
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
- 'sr_path': cls.get_sr_path(session)}
+ 'sr_path': cls.get_sr_path(session),
+ 'os_type': os_type}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
- session.wait_for_task(task, instance_id)
+ session.wait_for_task(task, instance.id)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project,
@@ -533,17 +555,33 @@ class VMHelper(HelperBase):
return uuid
@classmethod
- def lookup_image(cls, session, instance_id, vdi_ref):
+ def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
+ os_type):
"""
- Determine if VDI is using a PV kernel
+ Determine whether the VM will use a paravirtualized kernel or if it
+ will use hardware virtualization.
+
+ 1. Objectstore (any image type):
+ We use plugin to figure out whether the VDI uses PV
+
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
"""
if FLAGS.xenapi_image_service == 'glance':
- return cls._lookup_image_glance(session, vdi_ref)
+ # 2, 3, 4: Glance
+ return cls._determine_is_pv_glance(
+ session, vdi_ref, disk_image_type, os_type)
else:
- return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
+ # 1. Objecstore
+ return cls._determine_is_pv_objectstore(session, instance_id,
+ vdi_ref)
@classmethod
- def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
+ def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
@@ -559,9 +597,38 @@ class VMHelper(HelperBase):
return pv
@classmethod
- def _lookup_image_glance(cls, session, vdi_ref):
+ def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
+ os_type):
+ """
+ For a Glance image, determine if we need paravirtualization.
+
+ The relevant scenarios are:
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
+ """
+
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
- return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
+ if disk_image_type == ImageType.DISK_VHD:
+ # 2. VHD
+ if os_type == 'windows':
+ is_pv = False
+ else:
+ is_pv = True
+ elif disk_image_type == ImageType.DISK_RAW:
+ # 3. RAW
+ is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
+ elif disk_image_type == ImageType.DISK:
+ # 4. Disk
+ is_pv = True
+ else:
+ raise exception.Error(_("Unknown image format %(disk_image_type)s")
+ % locals())
+
+ return is_pv
@classmethod
def lookup(cls, session, i):
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 562ecd4d5..aa4372c3d 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -104,31 +104,26 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- kernel = ramdisk = pv_kernel = None
-
# Are we building from a pre-existing disk?
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
disk_image_type = VMHelper.determine_disk_image_type(instance)
- if disk_image_type == ImageType.DISK_RAW:
- # Have a look at the VDI and see if it has a PV kernel
- pv_kernel = VMHelper.lookup_image(self._session, instance.id,
- vdi_ref)
- elif disk_image_type == ImageType.DISK_VHD:
- # TODO(sirp): Assuming PV for now; this will need to be
- # configurable as Windows will use HVM.
- pv_kernel = True
+ kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+ ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
- vm_ref = VMHelper.create_vm(self._session,
- instance, kernel, ramdisk, pv_kernel)
+ use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
+ vdi_ref, disk_image_type, instance.os_type)
+ vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
+ use_pv_kernel)
+
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=0, bootable=True)
@@ -266,7 +261,7 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
# call plugin to ship snapshot off to glance
VMHelper.upload_image(
- self._session, instance.id, template_vdi_uuids, image_id)
+ self._session, instance, template_vdi_uuids, image_id)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 201b99fda..c996f6ef4 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -191,7 +191,7 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
os.link(source, link_name)
-def _upload_tarball(staging_path, image_id, glance_host, glance_port):
+def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type):
"""
Create a tarball of the image and then stream that into Glance
using chunked-transfer-encoded HTTP.
@@ -215,7 +215,10 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port):
'x-image-meta-is-public': 'True',
'x-image-meta-status': 'queued',
'x-image-meta-disk-format': 'vhd',
- 'x-image-meta-container-format': 'ovf'}
+ 'x-image-meta-container-format': 'ovf',
+ 'x-image-meta-property-os-type': os_type
+ }
+
for header, value in headers.iteritems():
conn.putheader(header, value)
conn.endheaders()
@@ -337,11 +340,13 @@ def upload_vhd(session, args):
glance_host = params["glance_host"]
glance_port = params["glance_port"]
sr_path = params["sr_path"]
+ os_type = params["os_type"]
staging_path = _make_staging_area(sr_path)
try:
_prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
- _upload_tarball(staging_path, image_id, glance_host, glance_port)
+ _upload_tarball(staging_path, image_id, glance_host, glance_port,
+ os_type)
finally:
_cleanup_staging_area(staging_path)