summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJosh Kearney <josh.kearney@rackspace.com>2011-03-02 23:37:31 +0000
committerTarmac <>2011-03-02 23:37:31 +0000
commit98e665f870c20d81db13bb9e5402a7b187dfe539 (patch)
tree8734c40dbe2d1b37c9f1bb3c042dec08615dcf6d /nova
parentce5d90ebaf0eb57a396001e6947db3cbc31fe36e (diff)
parente34e9dd982870915f8c4dbf84a08bece42b0c592 (diff)
downloadnova-98e665f870c20d81db13bb9e5402a7b187dfe539.tar.gz
nova-98e665f870c20d81db13bb9e5402a7b187dfe539.tar.xz
nova-98e665f870c20d81db13bb9e5402a7b187dfe539.zip
Provide the ability to rescue and unrescue a XenServer instance.
Diffstat (limited to 'nova')
-rw-r--r--nova/api/openstack/__init__.py2
-rw-r--r--nova/api/openstack/servers.py22
-rw-r--r--nova/compute/manager.py34
-rw-r--r--nova/db/sqlalchemy/models.py7
-rw-r--r--nova/tests/xenapi/stubs.py2
-rw-r--r--nova/virt/libvirt_conn.py4
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py22
-rw-r--r--nova/virt/xenapi/vmops.py159
-rw-r--r--nova/virt/xenapi/volumeops.py2
-rw-r--r--nova/virt/xenapi_conn.py23
11 files changed, 218 insertions, 61 deletions
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index b1b38ed2d..2f226d9b5 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -80,6 +80,8 @@ class APIRouter(wsgi.Router):
server_members["actions"] = "GET"
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
+ server_members['rescue'] = 'POST'
+ server_members['unrescue'] = 'POST'
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 69273ad7b..08b95b46a 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -335,6 +335,28 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def rescue(self, req, id):
+ """Permit users to rescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.rescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::rescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def unrescue(self, req, id):
+ """Permit users to unrescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.unrescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::unrescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def get_ajax_console(self, req, id):
""" Returns a url to an instance's ajaxterm console. """
try:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d659712ad..3af97683f 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -370,12 +370,19 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
self.network_manager.setup_compute_network(context, instance_id)
- self.driver.rescue(instance_ref)
+ self.driver.rescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -385,11 +392,18 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- self.driver.unrescue(instance_ref)
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ self.driver.unrescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@staticmethod
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 821fd4a83..9ad4ef577 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -126,11 +126,16 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
+ onset_files = []
+
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
- return FLAGS.instance_name_template % self.id
+ base_name = FLAGS.instance_name_template % self.id
+ if getattr(self, '_rescue', False):
+ base_name += "-rescue"
+ return base_name
admin_pass = Column(String(255))
user_id = Column(String(255))
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 9beab86e7..3de73d617 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -27,7 +27,7 @@ def stubout_instance_snapshot(stubs):
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
- def fake_wait_for_task(self, id, task):
+ def fake_wait_for_task(self, task, id):
class FakeEvent:
def send(self, value):
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 4e0fd106f..778510812 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -362,7 +362,7 @@ class LibvirtConnection(object):
raise exception.APIError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance):
+ def rescue(self, instance, callback=None):
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -392,7 +392,7 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance):
+ def unrescue(self, instance, callback=None):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
self.reboot(instance)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 018d0dcd3..e1ae03e70 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -401,7 +401,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- LOG.debuug(_('Raising NotImplemented'))
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6d9aeb060..977c19359 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -205,19 +205,17 @@ class VMHelper(HelperBase):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- session.wait_for_task(0, task)
+ session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address, dev="0"):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
- vif_rec['device'] = '0'
+ vif_rec['device'] = dev
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
@@ -269,7 +267,7 @@ class VMHelper(HelperBase):
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
- template_vm_ref = session.wait_for_task(instance_id, task)
+ template_vm_ref = session.wait_for_task(task, instance_id)
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
@@ -302,7 +300,7 @@ class VMHelper(HelperBase):
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
- session.wait_for_task(instance_id, task)
+ session.wait_for_task(task, instance_id)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project,
@@ -345,7 +343,7 @@ class VMHelper(HelperBase):
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
- vdi_uuid = session.wait_for_task(instance_id, task)
+ vdi_uuid = session.wait_for_task(task, instance_id)
scan_sr(session, instance_id, sr_ref)
@@ -401,7 +399,7 @@ class VMHelper(HelperBase):
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(instance_id, task)
+ filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
session.get_xenapi().VDI.destroy(vdi)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
@@ -493,7 +491,7 @@ class VMHelper(HelperBase):
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(instance_id, task)
+ uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
@@ -513,7 +511,7 @@ class VMHelper(HelperBase):
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(instance_id, task)
+ pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
@@ -654,7 +652,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(instance_id, task)
+ session.wait_for_task(task, instance_id)
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 094b41588..9ac83efb0 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -49,6 +49,7 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
+
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
@@ -62,20 +63,20 @@ class VMOps(object):
def spawn(self, instance):
"""Create VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
if vm is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance.name)
+ ' non-unique name %s') % instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
- name = instance['name']
- LOG.exception(_('instance %(name)s: not enough free memory')
- % locals())
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- return
+ LOG.exception(_('instance %(instance_name)s: not enough free '
+ 'memory') % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
@@ -116,10 +117,9 @@ class VMOps(object):
self.create_vifs(instance, networks)
LOG.debug(_('Starting VM %s...'), vm_ref)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
- instance_name = instance.name
+ self._start(instance, vm_ref)
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
- % locals())
+ % locals())
def _inject_onset_files():
onset_files = instance.onset_files
@@ -143,18 +143,18 @@ class VMOps(object):
def _wait_for_boot():
try:
- state = self.get_info(instance['name'])['state']
+ state = self.get_info(instance_name)['state']
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- LOG.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
_inject_onset_files()
return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
+ instance_name)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -202,6 +202,20 @@ class VMOps(object):
_('Instance not present %s') % instance_name)
return vm
+ def _acquire_bootlock(self, vm):
+ """Prevent an instance from booting"""
+ self._session.call_xenapi(
+ "VM.set_blocked_operations",
+ vm,
+ {"start": ""})
+
+ def _release_bootlock(self, vm):
+ """Allow an instance to boot"""
+ self._session.call_xenapi(
+ "VM.remove_from_blocked_operations",
+ vm,
+ "start")
+
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance
@@ -254,7 +268,7 @@ class VMOps(object):
"""Reboot VM instance"""
vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance. This is done via
@@ -294,6 +308,11 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
+ def _start(self, instance, vm):
+ """Start an instance"""
+ task = self._session.call_xenapi("Async.VM.start", vm, False, False)
+ self._session.wait_for_task(task, instance.id)
+
def inject_file(self, instance, b64_path, b64_contents):
"""Write a file to the VM instance. The path to which it is to be
written and the contents of the file need to be supplied; both should
@@ -320,8 +339,8 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm):
- """Shutdown an instance """
+ def _shutdown(self, instance, vm, hard=True):
+ """Shutdown an instance"""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
@@ -332,8 +351,13 @@ class VMOps(object):
LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
% locals())
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
- self._session.wait_for_task(instance.id, task)
+ task = None
+ if hard:
+ task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
+ else:
+ task = self._session.call_xenapi("Async.VM.clean_shutdown", vm)
+
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -350,7 +374,7 @@ class VMOps(object):
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -389,7 +413,7 @@ class VMOps(object):
args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
task = self._session.async_call_plugin(
'glance', 'remove_kernel_ramdisk', args)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
LOG.debug(_("kernel/ramdisk files removed"))
@@ -398,7 +422,7 @@ class VMOps(object):
instance_id = instance.id
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
- self._session.wait_for_task(instance_id, task)
+ self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -441,7 +465,7 @@ class VMOps(object):
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
callback(ret)
@@ -470,6 +494,78 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
self._wait_with_callback(instance.id, task, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance
+ - shutdown the instance VM
+ - set 'bootlock' to prevent the instance from starting in rescue
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue)
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+ if rescue_vm:
+ raise RuntimeError(_(
+ "Instance is already in Rescue Mode: %s" % instance.name))
+
+ vm = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm)
+ self._acquire_bootlock(vm)
+
+ instance._rescue = True
+ self.spawn(instance)
+ rescue_vm = self._get_vm_opaque_ref(instance)
+
+ vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
+ vbd_ref = VMHelper.create_vbd(
+ self._session,
+ rescue_vm,
+ vdi_ref,
+ 1,
+ False)
+
+ self._session.call_xenapi("Async.VBD.plug", vbd_ref)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance
+ - unplug the instance VM's disk from the rescue VM
+ - teardown the rescue VM
+ - release the bootlock to allow the instance VM to start
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+
+ if not rescue_vm:
+ raise exception.NotFound(_(
+ "Instance is not in Rescue Mode: %s" % instance.name))
+
+ original_vm = self._get_vm_opaque_ref(instance)
+ vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
+
+ instance._rescue = False
+
+ for vbd_ref in vbds:
+ vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd["userdevice"] == "1":
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
+ task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
+ self._session.wait_for_task(task1, instance.id)
+
+ vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
+ for vdi in vdis:
+ try:
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ self._session.wait_for_task(task, instance.id)
+ except self.XenAPI.Failure:
+ continue
+
+ task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
+ self._session.wait_for_task(task2, instance.id)
+
+ self._release_bootlock(original_vm)
+ self._start(instance, original_vm)
+
def get_info(self, instance):
"""Return data about VM instance"""
vm = self._get_vm_opaque_ref(instance)
@@ -568,8 +664,17 @@ class VMOps(object):
NetworkHelper.find_network_with_bridge(self._session, bridge)
if network_ref:
- VMHelper.create_vif(self._session, vm_opaque_ref,
- network_ref, instance.mac_address)
+ try:
+ device = "1" if instance._rescue else "0"
+ except AttributeError:
+ device = "0"
+
+ VMHelper.create_vif(
+ self._session,
+ vm_opaque_ref,
+ network_ref,
+ instance.mac_address,
+ device)
def reset_network(self, instance):
"""
@@ -639,7 +744,7 @@ class VMOps(object):
args.update(addl_args)
try:
task = self._session.async_call_plugin(plugin, method, args)
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, e:
ret = None
err_trace = e.details[-1]
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d89a6f995..757ecf5ad 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -83,7 +83,7 @@ class VolumeOps(object):
try:
task = self._session.call_xenapi('Async.VBD.plug',
vbd_ref)
- self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ self._session.wait_for_task(task, vol_rec['deviceNumber'])
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 60bfe0a9f..d578c04ec 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -196,6 +196,14 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ self._vmops.rescue(instance, callback)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ self._vmops.unrescue(instance, callback)
+
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
@@ -279,7 +287,7 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, id, task):
+ def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
until it completes. Not re-entrant."""
done = event.Event()
@@ -306,10 +314,11 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
if status == "pending":
return
elif status == "success":
@@ -323,7 +332,9 @@ class XenAPISession(object):
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
- db.instance_action_create(context.get_admin_context(), action)
+
+ if id:
+ db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
LOG.warn(exc)
done.send_exception(*sys.exc_info())