summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKevin L. Mitchell <kevin.mitchell@rackspace.com>2011-07-07 16:47:42 -0500
committerKevin L. Mitchell <kevin.mitchell@rackspace.com>2011-07-07 16:47:42 -0500
commitfbf4d26bc9bd038eeea270846bdc8003abf3b527 (patch)
tree30594078fa7858b55d97c9715120a39a2818ee68
parent9f3a309eab3af46aa3f8ac2767b7edcdeb49e043 (diff)
parentbc8f009f8ac6393301dd857339918d40b93be63d (diff)
downloadnova-fbf4d26bc9bd038eeea270846bdc8003abf3b527.tar.gz
nova-fbf4d26bc9bd038eeea270846bdc8003abf3b527.tar.xz
nova-fbf4d26bc9bd038eeea270846bdc8003abf3b527.zip
pull-up from trunk
-rw-r--r--nova/api/openstack/contrib/hosts.py114
-rw-r--r--nova/compute/api.py5
-rw-r--r--nova/compute/manager.py6
-rw-r--r--nova/scheduler/api.py5
-rw-r--r--nova/scheduler/manager.py4
-rw-r--r--nova/scheduler/zone_manager.py16
-rw-r--r--nova/tests/test_hosts.py102
-rw-r--r--nova/tests/test_xenapi.py42
-rw-r--r--nova/tests/xenapi/stubs.py39
-rw-r--r--nova/virt/driver.py4
-rw-r--r--nova/virt/fake.py4
-rw-r--r--nova/virt/hyperv.py4
-rw-r--r--nova/virt/libvirt/connection.py6
-rw-r--r--nova/virt/vmwareapi_conn.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py165
-rw-r--r--nova/virt/xenapi/vmops.py191
-rw-r--r--nova/virt/xenapi_conn.py4
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance4
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost49
19 files changed, 665 insertions, 103 deletions
diff --git a/nova/api/openstack/contrib/hosts.py b/nova/api/openstack/contrib/hosts.py
new file mode 100644
index 000000000..55e57e1a4
--- /dev/null
+++ b/nova/api/openstack/contrib/hosts.py
@@ -0,0 +1,114 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The hosts admin extension."""
+
+import webob.exc
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.scheduler import api as scheduler_api
+
+
+LOG = logging.getLogger("nova.api.hosts")
+FLAGS = flags.FLAGS
+
+
+def _list_hosts(req, service=None):
+ """Returns a summary list of hosts, optionally filtering
+ by service type.
+ """
+ context = req.environ['nova.context']
+ hosts = scheduler_api.get_host_list(context)
+ if service:
+ hosts = [host for host in hosts
+ if host["service"] == service]
+ return hosts
+
+
+def check_host(fn):
+ """Makes sure that the host exists."""
+ def wrapped(self, req, id, service=None, *args, **kwargs):
+ listed_hosts = _list_hosts(req, service)
+ hosts = [h["host_name"] for h in listed_hosts]
+ if id in hosts:
+ return fn(self, req, id, *args, **kwargs)
+ else:
+ raise exception.HostNotFound(host=id)
+ return wrapped
+
+
+class HostController(object):
+ """The Hosts API controller for the OpenStack API."""
+ def __init__(self):
+ self.compute_api = compute.API()
+ super(HostController, self).__init__()
+
+ def index(self, req):
+ return {'hosts': _list_hosts(req)}
+
+ @check_host
+ def update(self, req, id, body):
+ for raw_key, raw_val in body.iteritems():
+ key = raw_key.lower().strip()
+ val = raw_val.lower().strip()
+ # NOTE: (dabo) Right now only 'status' can be set, but other
+ # actions may follow.
+ if key == "status":
+ if val[:6] in ("enable", "disabl"):
+ return self._set_enabled_status(req, id,
+ enabled=(val.startswith("enable")))
+ else:
+ explanation = _("Invalid status: '%s'") % raw_val
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ else:
+ explanation = _("Invalid update setting: '%s'") % raw_key
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+
+ def _set_enabled_status(self, req, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ context = req.environ['nova.context']
+ state = "enabled" if enabled else "disabled"
+ LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
+ result = self.compute_api.set_host_enabled(context, host=host,
+ enabled=enabled)
+ return {"host": host, "status": result}
+
+
+class Hosts(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Hosts"
+
+ def get_alias(self):
+ return "os-hosts"
+
+ def get_description(self):
+ return "Host administration"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/hosts/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = [extensions.ResourceExtension('os-hosts', HostController(),
+ collection_actions={'update': 'PUT'}, member_actions={})]
+ return resources
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 28459dc75..b0eedcd64 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -912,6 +912,11 @@ class API(base.Base):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
+ def set_host_enabled(self, context, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._call_compute_message("set_host_enabled", context,
+ instance_id=None, host=host, params={"enabled": enabled})
+
@scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index bbbddde0a..91a604934 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -875,6 +875,12 @@ class ComputeManager(manager.SchedulerDependentManager):
result))
@exception.wrap_exception
+ def set_host_enabled(self, context, instance_id=None, host=None,
+ enabled=None):
+ """Sets the specified host's ability to accept new instances."""
+ return self.driver.set_host_enabled(host, enabled)
+
+ @exception.wrap_exception
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 0f4fc48c8..137b671c0 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -51,6 +51,11 @@ def _call_scheduler(method, context, params=None):
return rpc.call(context, queue, kwargs)
+def get_host_list(context):
+ """Return a list of hosts associated with this zone."""
+ return _call_scheduler('get_host_list', context)
+
+
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 6cb75aa8d..749d66cad 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -56,6 +56,10 @@ class SchedulerManager(manager.Manager):
"""Poll child zones periodically to get status."""
self.zone_manager.ping(context)
+ def get_host_list(self, context=None):
+ """Get a list of hosts from the ZoneManager."""
+ return self.zone_manager.get_host_list()
+
def get_zone_list(self, context=None):
"""Get a list of zones from the ZoneManager."""
return self.zone_manager.get_zone_list()
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index ba7403c15..6093443a9 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -115,6 +115,18 @@ class ZoneManager(object):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
+ def get_host_list(self):
+ """Returns a list of dicts for each host that the Zone Manager
+ knows about. Each dict contains the host_name and the service
+ for that host.
+ """
+ all_hosts = self.service_states.keys()
+ ret = []
+ for host in self.service_states:
+ for svc in self.service_states[host]:
+ ret.append({"service": svc, "host_name": host})
+ return ret
+
def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
@@ -127,13 +139,15 @@ class ZoneManager(object):
combined = {} # { <service>_<cap> : (min, max), ... }
for host, host_dict in hosts_dict.iteritems():
for service_name, service_dict in host_dict.iteritems():
+ if not service_dict.get("enabled", True):
+ # Service is disabled; do no include it
+ continue
for cap, value in service_dict.iteritems():
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
max_value = max(max_value, value)
combined[key] = (min_value, max_value)
-
return combined
def _refresh_from_db(self, context):
diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py
new file mode 100644
index 000000000..548f81f8b
--- /dev/null
+++ b/nova/tests/test_hosts.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+import webob.exc
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova.api.openstack.contrib import hosts as os_hosts
+from nova.scheduler import api as scheduler_api
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.hosts')
+# Simulate the hosts returned by the zone manager.
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute"},
+ {"host_name": "host_c2", "service": "compute"},
+ {"host_name": "host_v1", "service": "volume"},
+ {"host_name": "host_v2", "service": "volume"}]
+
+
+def stub_get_host_list(req):
+ return HOST_LIST
+
+
+def stub_set_host_enabled(context, host, enabled):
+ # We'll simulate success and failure by assuming
+ # that 'host_c1' always succeeds, and 'host_c2'
+ # always fails
+ fail = (host == "host_c2")
+ status = "enabled" if (enabled ^ fail) else "disabled"
+ return status
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class HostTestCase(test.TestCase):
+ """Test Case for hosts."""
+
+ def setUp(self):
+ super(HostTestCase, self).setUp()
+ self.controller = os_hosts.HostController()
+ self.req = FakeRequest()
+ self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list)
+ self.stubs.Set(self.controller.compute_api, 'set_host_enabled',
+ stub_set_host_enabled)
+
+ def test_list_hosts(self):
+ """Verify that the compute hosts are returned."""
+ hosts = os_hosts._list_hosts(self.req)
+ self.assertEqual(hosts, HOST_LIST)
+
+ compute_hosts = os_hosts._list_hosts(self.req, "compute")
+ expected = [host for host in HOST_LIST
+ if host["service"] == "compute"]
+ self.assertEqual(compute_hosts, expected)
+
+ def test_disable_host(self):
+ dis_body = {"status": "disable"}
+ result_c1 = self.controller.update(self.req, "host_c1", body=dis_body)
+ self.assertEqual(result_c1["status"], "disabled")
+ result_c2 = self.controller.update(self.req, "host_c2", body=dis_body)
+ self.assertEqual(result_c2["status"], "enabled")
+
+ def test_enable_host(self):
+ en_body = {"status": "enable"}
+ result_c1 = self.controller.update(self.req, "host_c1", body=en_body)
+ self.assertEqual(result_c1["status"], "enabled")
+ result_c2 = self.controller.update(self.req, "host_c2", body=en_body)
+ self.assertEqual(result_c2["status"], "disabled")
+
+ def test_bad_status_value(self):
+ bad_body = {"status": "bad"}
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_update_key(self):
+ bad_body = {"crazy": "bad"}
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_host(self):
+ self.assertRaises(exception.HostNotFound, self.controller.update,
+ self.req, "bogus_host_name", body={"status": "disable"})
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index af7f7f338..4cb7447d3 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -381,6 +381,18 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
+ def _list_vdis(self):
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password)
+ return session.call_xenapi('VDI.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if not vdi_ref in start_list:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
architecture="x86-64", instance_id=1,
@@ -422,6 +434,36 @@ class XenAPIVMTestCase(test.TestCase):
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that VDIs created are properly cleaned up.
+
+ """
+ vdi_recs_start = self._list_vdis()
+ FLAGS.xenapi_image_service = 'glance'
+ stubs.stubout_fetch_image_glance_disk(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, 1, 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ It verifies that VDIs created are properly cleaned up.
+
+ """
+ vdi_recs_start = self._list_vdis()
+ FLAGS.xenapi_image_service = 'glance'
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, 1, 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+
def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 151a3e909..66c79d465 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -98,6 +98,42 @@ def stubout_is_vdi_pv(stubs):
stubs.Set(vm_utils, '_is_vdi_pv', f)
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs never have PV kernels"""
+
+ @classmethod
+ def f(cls, *args):
+ return False
+ stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_image_glance_disk(stubs):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ @classmethod
+ def f(cls, *args):
+ raise fake.Failure("Test Exception raised by " +
+ "fake fetch_image_glance_disk")
+ stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk', f)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ @classmethod
+ def f(cls, *args):
+ raise fake.Failure("Test Exception raised by " +
+ "fake create_vm")
+ stubs.Set(vm_utils.VMHelper, 'create_vm', f)
+
+
def stubout_loopingcall_start(stubs):
def fake_start(self, interval, now=True):
self.f(*self.args, **self.kw)
@@ -120,6 +156,9 @@ class FakeSessionForVMTests(fake.SessionBase):
super(FakeSessionForVMTests, self).__init__(uri)
def host_call_plugin(self, _1, _2, plugin, method, _5):
+ # If the call is for 'copy_kernel_vdi' return None.
+ if method == 'copy_kernel_vdi':
+ return
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
vdi_rec = fake.get_record('VDI', vdi_ref)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 1c9797973..3c4a073bf 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -249,3 +249,7 @@ class ComputeDriver(object):
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
raise NotImplementedError()
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 5fe9d674f..ea0a59f21 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -514,3 +514,7 @@ class FakeConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
return self.host_status
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index f6783f3aa..5c1dc772d 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -499,3 +499,7 @@ class HyperVConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""See xenapi_conn.py implementation."""
pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 0c6eaab84..e912c2bec 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -1015,7 +1015,7 @@ class LibvirtConnection(driver.ComputeDriver):
'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
- if FLAGS.libvirt_type != 'lxc':
+ if FLAGS.libvirt_type != 'lxc' or FLAGS.libvirt_type != 'uml':
xml_info['vncserver_host'] = FLAGS.vncserver_host
xml_info['vnc_keymap'] = FLAGS.vnc_keymap
if not rescue:
@@ -1591,3 +1591,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""See xenapi_conn.py implementation."""
pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 3c6345ec8..d80e14931 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -190,6 +190,10 @@ class VMWareESXConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
return
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index f91958c57..71107aff4 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -23,6 +23,7 @@ import json
import os
import pickle
import re
+import sys
import tempfile
import time
import urllib
@@ -71,17 +72,51 @@ KERNEL_DIR = '/boot/guest'
class ImageType:
"""
Enumeration class for distinguishing different image types
- 0 - kernel/ramdisk image (goes on dom0's filesystem)
- 1 - disk image (local SR, partitioned by objectstore plugin)
- 2 - raw disk image (local SR, NOT partitioned by plugin)
- 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ 0 - kernel image (goes on dom0's filesystem)
+ 1 - ramdisk image (goes on dom0's filesystem)
+ 2 - disk image (local SR, partitioned by objectstore plugin)
+ 3 - raw disk image (local SR, NOT partitioned by plugin)
+ 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
"""
- KERNEL_RAMDISK = 0
- DISK = 1
- DISK_RAW = 2
- DISK_VHD = 3
+ KERNEL = 0
+ RAMDISK = 1
+ DISK = 2
+ DISK_RAW = 3
+ DISK_VHD = 4
+
+ KERNEL_STR = "kernel"
+ RAMDISK_STR = "ramdisk"
+ DISK_STR = "os"
+ DISK_RAW_STR = "os_raw"
+ DISK_VHD_STR = "vhd"
+
+ @classmethod
+ def to_string(cls, image_type):
+ if image_type == ImageType.KERNEL:
+ return ImageType.KERNEL_STR
+ elif image_type == ImageType.RAMDISK:
+ return ImageType.RAMDISK_STR
+ elif image_type == ImageType.DISK:
+ return ImageType.DISK_STR
+ elif image_type == ImageType.DISK_RAW:
+ return ImageType.DISK_RAW_STR
+ elif image_type == ImageType.DISK_VHD:
+ return ImageType.VHD_STR
+
+ @classmethod
+ def from_string(cls, image_type_str):
+ if image_type_str == ImageType.KERNEL_STR:
+ return ImageType.KERNEL
+ elif image_type == ImageType.RAMDISK_STR:
+ return ImageType.RAMDISK
+ elif image_type == ImageType.DISK_STR:
+ return ImageType.DISK
+ elif image_type == ImageType.DISK_RAW_STR:
+ return ImageType.DISK_RAW
+ elif image_type == ImageType.DISK_VHD_STR:
+ return ImageType.VHD
class VMHelper(HelperBase):
@@ -145,7 +180,6 @@ class VMHelper(HelperBase):
'VCPUs_max': vcpus,
'VCPUs_params': {},
'xenstore_data': {}}
-
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
@@ -240,6 +274,15 @@ class VMHelper(HelperBase):
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
+ def destroy_vdi(cls, session, vdi_ref):
+ try:
+ task = session.call_xenapi('Async.VDI.destroy', vdi_ref)
+ session.wait_for_task(task)
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to destroy VDI %s') % vdi_ref)
+
+ @classmethod
def create_vif(cls, session, vm_ref, network_ref, mac_address,
dev, rxtx_cap=0):
"""Create a VIF record. Returns a Deferred that gives the new
@@ -394,12 +437,12 @@ class VMHelper(HelperBase):
"""
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
-
sr_ref = safe_find_sr(session)
- # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
- # have the `uuid` module. To work around this, we generate the uuids
- # here (under Python 2.6+) and pass them as arguments
+ # NOTE(sirp): The Glance plugin runs under Python 2.4
+ # which does not have the `uuid` module. To work around this,
+ # we generate the uuids here (under Python 2.6+) and
+ # pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
glance_host, glance_port = \
@@ -449,18 +492,20 @@ class VMHelper(HelperBase):
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
+ LOG.debug(_("Fetching image %(image)s") % locals())
+ LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type))
sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
-
+ LOG.debug(_("Size for image %(image)s:" +
+ "%(virtual_size)d") % locals())
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- elif image_type == ImageType.KERNEL_RAMDISK and \
+ elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and \
vdi_size > FLAGS.max_kernel_ramdisk_size:
max_size = FLAGS.max_kernel_ramdisk_size
raise exception.Error(
@@ -469,29 +514,45 @@ class VMHelper(HelperBase):
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
-
- with_vdi_attached_here(session, vdi_ref, False,
- lambda dev:
- _stream_disk(dev, image_type,
- virtual_size, image_file))
- if image_type == ImageType.KERNEL_RAMDISK:
- #we need to invoke a plugin for copying VDI's
- #content into proper path
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
- fn = "copy_kernel_vdi"
- args = {}
- args['vdi-ref'] = vdi_ref
- #let the plugin copy the correct number of bytes
- args['image-size'] = str(vdi_size)
- task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(task, instance_id)
- #remove the VDI as it is not needed anymore
- session.get_xenapi().VDI.destroy(vdi_ref)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
- return filename
- else:
+ # From this point we have a VDI on Xen host;
+ # If anything goes wrong, we need to remember its uuid.
+ try:
+ filename = None
vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref)
- return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
+ with_vdi_attached_here(session, vdi_ref, False,
+ lambda dev:
+ _stream_disk(dev, image_type,
+ virtual_size, image_file))
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ # We need to invoke a plugin for copying the
+ # content of the VDI into the proper path.
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
+ fn = "copy_kernel_vdi"
+ args = {}
+ args['vdi-ref'] = vdi_ref
+ # Let the plugin copy the correct number of bytes.
+ args['image-size'] = str(vdi_size)
+ task = session.async_call_plugin('glance', fn, args)
+ filename = session.wait_for_task(task, instance_id)
+ # Remove the VDI as it is not needed anymore.
+ session.get_xenapi().VDI.destroy(vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=None,
+ file=filename)]
+ else:
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=None)]
+ except (cls.XenAPI.Failure, IOError, OSError) as e:
+ # We look for XenAPI and OS failures.
+ LOG.exception(_("instance %s: Failed to fetch glance image"),
+ instance_id, exc_info=sys.exc_info())
+ e.args = e.args + ([dict(vdi_type=ImageType.
+ to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=filename)],)
+ raise e
@classmethod
def determine_disk_image_type(cls, instance):
@@ -506,7 +567,8 @@ class VMHelper(HelperBase):
whether a kernel_id is specified.
"""
def log_disk_format(image_type):
- pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ pretty_format = {ImageType.KERNEL: 'KERNEL',
+ ImageType.RAMDISK: 'RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
@@ -519,8 +581,8 @@ class VMHelper(HelperBase):
def determine_from_glance():
glance_disk_format2nova_type = {
'ami': ImageType.DISK,
- 'aki': ImageType.KERNEL_RAMDISK,
- 'ari': ImageType.KERNEL_RAMDISK,
+ 'aki': ImageType.KERNEL,
+ 'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
image_ref = instance.image_ref
@@ -553,7 +615,7 @@ class VMHelper(HelperBase):
image_type):
"""Fetch image from glance based on image type.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
@@ -568,13 +630,13 @@ class VMHelper(HelperBase):
secret, image_type):
"""Fetch an image from objectstore.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- if image_type == ImageType.KERNEL_RAMDISK:
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
fn = 'get_kernel'
else:
fn = 'get_vdi'
@@ -584,15 +646,20 @@ class VMHelper(HelperBase):
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if image_type != ImageType.KERNEL_RAMDISK:
+ if not image_type in (ImageType.KERNEL, ImageType.RAMDISK):
args['add_partition'] = 'true'
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid_or_fn = session.wait_for_task(task, instance_id)
- if image_type != ImageType.KERNEL_RAMDISK:
- return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)]
- return uuid_or_fn
+ vdi_uuid = None
+ filename = None
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ filename = session.wait_for_task(task, instance_id)
+ else:
+ vdi_uuid = session.wait_for_task(task, instance_id)
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=filename)]
@classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index b116c8467..56718f8e8 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -24,7 +24,9 @@ import json
import M2Crypto
import os
import pickle
+import random
import subprocess
+import sys
import time
import uuid
@@ -137,9 +139,18 @@ class VMOps(object):
return vdis
def spawn(self, instance, network_info):
- vdis = self._create_disks(instance)
- vm_ref = self._create_vm(instance, vdis, network_info)
- self._spawn(instance, vm_ref)
+ vdis = None
+ try:
+ vdis = self._create_disks(instance)
+ vm_ref = self._create_vm(instance, vdis, network_info)
+ self._spawn(instance, vm_ref)
+ except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
+ LOG.exception(_("instance %s: Failed to spawn"),
+ instance.id, exc_info=sys.exc_info())
+ LOG.debug(_('Instance %s failed to spawn - performing clean-up'),
+ instance.id)
+ self._handle_spawn_error(vdis, spawn_error)
+ raise spawn_error
def spawn_rescue(self, instance):
"""Spawn a rescue instance."""
@@ -165,42 +176,64 @@ class VMOps(object):
project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance)
-
kernel = None
- if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session, instance.id,
- instance.kernel_id, user, project,
- ImageType.KERNEL_RAMDISK)
-
ramdisk = None
- if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session, instance.id,
- instance.ramdisk_id, user, project,
- ImageType.KERNEL_RAMDISK)
-
- # Create the VM ref and attach the first disk
- first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
- vdis[0]['vdi_uuid'])
-
- vm_mode = instance.vm_mode and instance.vm_mode.lower()
- if vm_mode == 'pv':
- use_pv_kernel = True
- elif vm_mode in ('hv', 'hvm'):
- use_pv_kernel = False
- vm_mode = 'hvm' # Normalize
- else:
- use_pv_kernel = VMHelper.determine_is_pv(self._session,
- instance.id, first_vdi_ref, disk_image_type,
- instance.os_type)
- vm_mode = use_pv_kernel and 'pv' or 'hvm'
-
- if instance.vm_mode != vm_mode:
- # Update database with normalized (or determined) value
- db.instance_update(context.get_admin_context(),
- instance['id'], {'vm_mode': vm_mode})
-
- vm_ref = VMHelper.create_vm(self._session, instance,
- kernel, ramdisk, use_pv_kernel)
+ try:
+ if instance.kernel_id:
+ kernel = VMHelper.fetch_image(self._session, instance.id,
+ instance.kernel_id, user, project,
+ ImageType.KERNEL)[0]
+ if instance.ramdisk_id:
+ ramdisk = VMHelper.fetch_image(self._session, instance.id,
+ instance.ramdisk_id, user, project,
+ ImageType.RAMDISK)[0]
+ # Create the VM ref and attach the first disk
+ first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdis[0]['vdi_uuid'])
+
+ vm_mode = instance.vm_mode and instance.vm_mode.lower()
+ if vm_mode == 'pv':
+ use_pv_kernel = True
+ elif vm_mode in ('hv', 'hvm'):
+ use_pv_kernel = False
+ vm_mode = 'hvm' # Normalize
+ else:
+ use_pv_kernel = VMHelper.determine_is_pv(self._session,
+ instance.id, first_vdi_ref, disk_image_type,
+ instance.os_type)
+ vm_mode = use_pv_kernel and 'pv' or 'hvm'
+
+ if instance.vm_mode != vm_mode:
+ # Update database with normalized (or determined) value
+ db.instance_update(context.get_admin_context(),
+ instance['id'], {'vm_mode': vm_mode})
+ vm_ref = VMHelper.create_vm(self._session, instance,
+ kernel and kernel.get('file', None) or None,
+ ramdisk and ramdisk.get('file', None) or None,
+ use_pv_kernel)
+ except (self.XenAPI.Failure, OSError, IOError) as vm_create_error:
+ # Collect VDI/file resources to clean up;
+ # These resources will be removed by _handle_spawn_error.
+ LOG.exception(_("instance %s: Failed to spawn - " +
+ "Unable to create VM"),
+ instance.id, exc_info=sys.exc_info())
+ last_arg = None
+ resources = []
+
+ if vm_create_error.args:
+ last_arg = vm_create_error.args[-1]
+ if isinstance(last_arg, list):
+ resources = last_arg
+ else:
+ vm_create_error.args = vm_create_error.args + (resources,)
+
+ if kernel:
+ resources.append(kernel)
+ if ramdisk:
+ resources.append(ramdisk)
+
+ raise vm_create_error
+
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=0, bootable=True)
@@ -321,6 +354,47 @@ class VMOps(object):
return timer.start(interval=0.5, now=True)
+ def _handle_spawn_error(self, vdis, spawn_error):
+ # Extract resource list from spawn_error.
+ resources = []
+ if spawn_error.args:
+ last_arg = spawn_error.args[-1]
+ resources = last_arg
+ if vdis:
+ for vdi in vdis:
+ resources.append(dict(vdi_type=vdi['vdi_type'],
+ vdi_uuid=vdi['vdi_uuid'],
+ file=None))
+
+ LOG.debug(_("Resources to remove:%s"), resources)
+ kernel_file = None
+ ramdisk_file = None
+
+ for item in resources:
+ vdi_type = item['vdi_type']
+ vdi_to_remove = item['vdi_uuid']
+ if vdi_to_remove:
+ try:
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdi_to_remove)
+ LOG.debug(_('Removing VDI %(vdi_ref)s' +
+ '(uuid:%(vdi_to_remove)s)'), locals())
+ VMHelper.destroy_vdi(self._session, vdi_ref)
+ except self.XenAPI.Failure:
+ # Vdi has already been deleted
+ LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
+ if item['file']:
+ # There is also a file to remove.
+ if vdi_type == ImageType.KERNEL_STR:
+ kernel_file = item['file']
+ elif vdi_type == ImageType.RAMDISK_STR:
+ ramdisk_file = item['file']
+
+ if kernel_file or ramdisk_file:
+ LOG.debug(_("Removing kernel/ramdisk files from dom0"))
+ self._destroy_kernel_ramdisk_plugin_call(kernel_file,
+ ramdisk_file)
+
def _get_vm_opaque_ref(self, instance_or_vm):
"""
Refactored out the common code of many methods that receive either
@@ -698,6 +772,16 @@ class VMOps(object):
VMHelper.unplug_vbd(self._session, vbd_ref)
VMHelper.destroy_vbd(self._session, vbd_ref)
+ def _destroy_kernel_ramdisk_plugin_call(self, kernel, ramdisk):
+ args = {}
+ if kernel:
+ args['kernel-file'] = kernel
+ if ramdisk:
+ args['ramdisk-file'] = ramdisk
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task)
+
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
@@ -727,13 +811,7 @@ class VMOps(object):
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
vm_ref)
- LOG.debug(_("Removing kernel/ramdisk files"))
-
- args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
- task = self._session.async_call_plugin(
- 'glance', 'remove_kernel_ramdisk', args)
- self._session.wait_for_task(task, instance.id)
-
+ self._destroy_kernel_ramdisk_plugin_call(kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"))
def _destroy_vm(self, instance, vm_ref):
@@ -932,6 +1010,31 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ args = {"enabled": json.dumps(enabled)}
+ json_resp = self._call_xenhost("set_host_enabled", args)
+ resp = json.loads(json_resp)
+ return resp["status"]
+
+ def _call_xenhost(self, method, arg_dict):
+ """There will be several methods that will need this general
+ handling for interacting with the xenhost plugin, so this abstracts
+ out that behavior.
+ """
+ # Create a task ID as something that won't match any instance ID
+ task_id = random.randint(-80000, -70000)
+ try:
+ task = self._session.async_call_plugin("xenhost", method,
+ args=arg_dict)
+ #args={"params": arg_dict})
+ ret = self._session.wait_for_task(task, task_id)
+ except self.XenAPI.Failure as e:
+ ret = None
+ LOG.error(_("The call to %(method)s returned an error: %(e)s.")
+ % locals())
+ return ret
+
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index cd4dc1b60..ec8c44c1c 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -336,6 +336,10 @@ class XenAPIConnection(driver.ComputeDriver):
True, run the update first."""
return self.HostState.get_host_stats(refresh=refresh)
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._vmops.set_host_enabled(host, enabled)
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 46031ebe8..fbe080b22 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -412,8 +412,8 @@ def copy_kernel_vdi(session, args):
def remove_kernel_ramdisk(session, args):
"""Removes kernel and/or ramdisk from dom0's file system"""
- kernel_file = exists(args, 'kernel-file')
- ramdisk_file = exists(args, 'ramdisk-file')
+ kernel_file = optional(args, 'kernel-file')
+ ramdisk_file = optional(args, 'ramdisk-file')
if kernel_file:
os.remove(kernel_file)
if ramdisk_file:
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
index a8428e841..292bbce12 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -33,9 +33,10 @@ import tempfile
import time
import XenAPIPlugin
+import pluginlib_nova as pluginlib
-from pluginlib_nova import *
-configure_logging("xenhost")
+
+pluginlib.configure_logging("xenhost")
host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
@@ -65,14 +66,49 @@ def _run_command(cmd):
return proc.stdout.read()
+def _get_host_uuid():
+ cmd = "xe host-list | grep uuid"
+ resp = _run_command(cmd)
+ return resp.split(":")[-1].strip()
+
+
+@jsonify
+def set_host_enabled(self, arg_dict):
+ """Sets this host's ability to accept new instances.
+ It will otherwise continue to operate normally.
+ """
+ enabled = arg_dict.get("enabled")
+ if enabled is None:
+ raise pluginlib.PluginError(
+ _("Missing 'enabled' argument to set_host_enabled"))
+ if enabled == "true":
+ result = _run_command("xe host-enable")
+ elif enabled == "false":
+ result = _run_command("xe host-disable")
+ else:
+ raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled)
+ # Should be empty string
+ if result:
+ raise pluginlib.PluginError(result)
+ # Return the current enabled status
+ host_uuid = _get_host_uuid()
+ cmd = "xe host-param-list uuid=%s | grep enabled" % host_uuid
+ resp = _run_command(cmd)
+ # Response should be in the format: "enabled ( RO): true"
+ host_enabled = resp.strip().split()[-1]
+ if host_enabled == "true":
+ status = "enabled"
+ else:
+ status = "disabled"
+ return {"status": status}
+
+
@jsonify
def host_data(self, arg_dict):
"""Runs the commands on the xenstore host to return the current status
information.
"""
- cmd = "xe host-list | grep uuid"
- resp = _run_command(cmd)
- host_uuid = resp.split(":")[-1].strip()
+ host_uuid = _get_host_uuid()
cmd = "xe host-param-list uuid=%s" % host_uuid
resp = _run_command(cmd)
parsed_data = parse_response(resp)
@@ -180,4 +216,5 @@ def cleanup(dct):
if __name__ == "__main__":
XenAPIPlugin.dispatch(
- {"host_data": host_data})
+ {"host_data": host_data,
+ "set_host_enabled": set_host_enabled})