summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Authors2
-rw-r--r--nova/api/openstack/__init__.py2
-rw-r--r--nova/api/openstack/common.py8
-rw-r--r--nova/api/openstack/servers.py22
-rw-r--r--nova/compute/manager.py34
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py90
-rw-r--r--nova/db/sqlalchemy/models.py10
-rw-r--r--nova/network/manager.py5
-rw-r--r--nova/tests/api/openstack/test_common.py21
-rw-r--r--nova/tests/xenapi/stubs.py2
-rw-r--r--nova/virt/libvirt_conn.py4
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py22
-rw-r--r--nova/virt/xenapi/vmops.py187
-rw-r--r--nova/virt/xenapi/volumeops.py2
-rw-r--r--nova/virt/xenapi_conn.py23
17 files changed, 366 insertions, 71 deletions
diff --git a/.mailmap b/.mailmap
index 9c1e08269..df012e066 100644
--- a/.mailmap
+++ b/.mailmap
@@ -19,6 +19,7 @@
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
+<josh@jk0.org> <josh.kearney@rackspace.com>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<justin@fathomdb.com> <superstack@superstack.org>
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
diff --git a/Authors b/Authors
index 9d1843508..b279d8a53 100644
--- a/Authors
+++ b/Authors
@@ -31,7 +31,7 @@ John Dewey <john@dewey.ws>
Jonathan Bryce <jbryce@jbryce.com>
Jordan Rinke <jordan@openstack.org>
Josh Durgin <joshd@hq.newdream.net>
-Josh Kearney <josh.kearney@rackspace.com>
+Josh Kearney <josh@jk0.org>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 2a8f2d412..274330e3b 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -81,6 +81,8 @@ class APIRouter(wsgi.Router):
server_members["actions"] = "GET"
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
+ server_members['rescue'] = 'POST'
+ server_members['unrescue'] = 'POST'
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 1dc3767e2..9f85c5c8a 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import webob.exc
+
from nova import exception
@@ -27,7 +29,8 @@ def limited(items, request, max_limit=1000):
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
- to max_limit.
+ to max_limit. Negative values for either offset or limit
+ will cause exc.HTTPBadRequest() exceptions to be raised.
@kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
@@ -40,6 +43,9 @@ def limited(items, request, max_limit=1000):
except ValueError:
limit = max_limit
+ if offset < 0 or limit < 0:
+ raise webob.exc.HTTPBadRequest()
+
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 69273ad7b..08b95b46a 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -335,6 +335,28 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def rescue(self, req, id):
+ """Permit users to rescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.rescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::rescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def unrescue(self, req, id):
+ """Permit users to unrescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.unrescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::unrescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def get_ajax_console(self, req, id):
""" Returns a url to an instance's ajaxterm console. """
try:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d659712ad..3af97683f 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -370,12 +370,19 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
self.network_manager.setup_compute_network(context, instance_id)
- self.driver.rescue(instance_ref)
+ self.driver.rescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -385,11 +392,18 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- self.driver.unrescue(instance_ref)
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ self.driver.unrescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@staticmethod
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
new file mode 100644
index 000000000..427934d53
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -0,0 +1,90 @@
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+fixed_ips = Table(
+ "fixed_ips",
+ meta,
+ Column(
+ "id",
+ Integer(),
+ primary_key=True,
+ nullable=False))
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+fixed_ips_addressV6 = Column(
+ "addressV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_netmaskV6 = Column(
+ "netmaskV6",
+ String(
+ length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_gatewayV6 = Column(
+ "gatewayV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ fixed_ips.create_column(fixed_ips_addressV6)
+ fixed_ips.create_column(fixed_ips_netmaskV6)
+ fixed_ips.create_column(fixed_ips_gatewayV6)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index daf111fa8..5e7c399ef 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -126,11 +126,16 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
+ onset_files = []
+
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
- return FLAGS.instance_name_template % self.id
+ base_name = FLAGS.instance_name_template % self.id
+ if getattr(self, '_rescue', False):
+ base_name += "-rescue"
+ return base_name
admin_pass = Column(String(255))
user_id = Column(String(255))
@@ -451,6 +456,9 @@ class FixedIp(BASE, NovaBase):
allocated = Column(Boolean, default=False)
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
+ addressV6 = Column(String(255))
+ netmaskV6 = Column(String(3))
+ gatewayV6 = Column(String(255))
class User(BASE, NovaBase):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 500f2a1e8..b36dd59cf 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -531,6 +531,11 @@ class VlanManager(NetworkManager):
' than 4094'))
fixed_net = IPy.IP(cidr)
+ if fixed_net.len() < num_networks * network_size:
+ raise ValueError(_('The network range is not big enough to fit '
+ '%(num_networks)s. Network size is %(network_size)s' %
+ locals()))
+
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 59d850157..92023362c 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -19,6 +19,7 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
+import webob.exc
from webob import Request
@@ -160,3 +161,23 @@ class LimiterTest(test.TestCase):
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(limited(items, req, max_limit=2000), [])
+
+ def test_limiter_negative_limit(self):
+ """
+ Test a negative limit.
+ """
+ def _limit_large():
+ limited(self.large, req, max_limit=2000)
+
+ req = Request.blank('/?limit=-3000')
+ self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
+
+ def test_limiter_negative_offset(self):
+ """
+ Test a negative offset.
+ """
+ def _limit_large():
+ limited(self.large, req, max_limit=2000)
+
+ req = Request.blank('/?offset=-30')
+ self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 9beab86e7..3de73d617 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -27,7 +27,7 @@ def stubout_instance_snapshot(stubs):
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
- def fake_wait_for_task(self, id, task):
+ def fake_wait_for_task(self, task, id):
class FakeEvent:
def send(self, value):
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 112ccd43a..8a83e657f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -363,7 +363,7 @@ class LibvirtConnection(object):
raise exception.APIError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance):
+ def rescue(self, instance, callback=None):
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -393,7 +393,7 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance):
+ def unrescue(self, instance, callback=None):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
self.reboot(instance)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 018d0dcd3..e1ae03e70 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -401,7 +401,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- LOG.debuug(_('Raising NotImplemented'))
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 59dd7057d..a6de3c9aa 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -207,19 +207,17 @@ class VMHelper(HelperBase):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- session.wait_for_task(0, task)
+ session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address, dev="0"):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
- vif_rec['device'] = '0'
+ vif_rec['device'] = dev
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
@@ -271,7 +269,7 @@ class VMHelper(HelperBase):
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
- template_vm_ref = session.wait_for_task(instance_id, task)
+ template_vm_ref = session.wait_for_task(task, instance_id)
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
@@ -304,7 +302,7 @@ class VMHelper(HelperBase):
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
- session.wait_for_task(instance_id, task)
+ session.wait_for_task(task, instance_id)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project,
@@ -347,7 +345,7 @@ class VMHelper(HelperBase):
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
- vdi_uuid = session.wait_for_task(instance_id, task)
+ vdi_uuid = session.wait_for_task(task, instance_id)
scan_sr(session, instance_id, sr_ref)
@@ -403,7 +401,7 @@ class VMHelper(HelperBase):
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(instance_id, task)
+ filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
session.get_xenapi().VDI.destroy(vdi)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
@@ -495,7 +493,7 @@ class VMHelper(HelperBase):
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(instance_id, task)
+ uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
@@ -515,7 +513,7 @@ class VMHelper(HelperBase):
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(instance_id, task)
+ pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
@@ -656,7 +654,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(instance_id, task)
+ session.wait_for_task(task, instance_id)
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index bc39aa140..9ac83efb0 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -49,6 +49,7 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
+
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
@@ -62,20 +63,20 @@ class VMOps(object):
def spawn(self, instance):
"""Create VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
if vm is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance.name)
+ ' non-unique name %s') % instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
- name = instance['name']
- LOG.exception(_('instance %(name)s: not enough free memory')
- % locals())
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- return
+ LOG.exception(_('instance %(instance_name)s: not enough free '
+ 'memory') % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
@@ -116,10 +117,9 @@ class VMOps(object):
self.create_vifs(instance, networks)
LOG.debug(_('Starting VM %s...'), vm_ref)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
- instance_name = instance.name
+ self._start(instance, vm_ref)
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
- % locals())
+ % locals())
def _inject_onset_files():
onset_files = instance.onset_files
@@ -143,18 +143,18 @@ class VMOps(object):
def _wait_for_boot():
try:
- state = self.get_info(instance['name'])['state']
+ state = self.get_info(instance_name)['state']
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- LOG.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
_inject_onset_files()
return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
+ instance_name)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -202,6 +202,20 @@ class VMOps(object):
_('Instance not present %s') % instance_name)
return vm
+ def _acquire_bootlock(self, vm):
+ """Prevent an instance from booting"""
+ self._session.call_xenapi(
+ "VM.set_blocked_operations",
+ vm,
+ {"start": ""})
+
+ def _release_bootlock(self, vm):
+ """Allow an instance to boot"""
+ self._session.call_xenapi(
+ "VM.remove_from_blocked_operations",
+ vm,
+ "start")
+
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance
@@ -254,7 +268,7 @@ class VMOps(object):
"""Reboot VM instance"""
vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance. This is done via
@@ -294,6 +308,11 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
+ def _start(self, instance, vm):
+ """Start an instance"""
+ task = self._session.call_xenapi("Async.VM.start", vm, False, False)
+ self._session.wait_for_task(task, instance.id)
+
def inject_file(self, instance, b64_path, b64_contents):
"""Write a file to the VM instance. The path to which it is to be
written and the contents of the file need to be supplied; both should
@@ -320,8 +339,8 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm):
- """Shutdown an instance """
+ def _shutdown(self, instance, vm, hard=True):
+ """Shutdown an instance"""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
@@ -332,8 +351,13 @@ class VMOps(object):
LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
% locals())
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
- self._session.wait_for_task(instance.id, task)
+ task = None
+ if hard:
+ task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
+ else:
+ task = self._session.call_xenapi("Async.VM.clean_shutdown", vm)
+
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -350,7 +374,7 @@ class VMOps(object):
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -389,7 +413,7 @@ class VMOps(object):
args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
task = self._session.async_call_plugin(
'glance', 'remove_kernel_ramdisk', args)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
LOG.debug(_("kernel/ramdisk files removed"))
@@ -398,7 +422,7 @@ class VMOps(object):
instance_id = instance.id
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
- self._session.wait_for_task(instance_id, task)
+ self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -441,7 +465,7 @@ class VMOps(object):
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
callback(ret)
@@ -470,6 +494,78 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
self._wait_with_callback(instance.id, task, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance
+ - shutdown the instance VM
+ - set 'bootlock' to prevent the instance from starting in rescue
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue)
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+ if rescue_vm:
+ raise RuntimeError(_(
+ "Instance is already in Rescue Mode: %s" % instance.name))
+
+ vm = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm)
+ self._acquire_bootlock(vm)
+
+ instance._rescue = True
+ self.spawn(instance)
+ rescue_vm = self._get_vm_opaque_ref(instance)
+
+ vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
+ vbd_ref = VMHelper.create_vbd(
+ self._session,
+ rescue_vm,
+ vdi_ref,
+ 1,
+ False)
+
+ self._session.call_xenapi("Async.VBD.plug", vbd_ref)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance
+ - unplug the instance VM's disk from the rescue VM
+ - teardown the rescue VM
+ - release the bootlock to allow the instance VM to start
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+
+ if not rescue_vm:
+ raise exception.NotFound(_(
+ "Instance is not in Rescue Mode: %s" % instance.name))
+
+ original_vm = self._get_vm_opaque_ref(instance)
+ vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
+
+ instance._rescue = False
+
+ for vbd_ref in vbds:
+ vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd["userdevice"] == "1":
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
+ task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
+ self._session.wait_for_task(task1, instance.id)
+
+ vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
+ for vdi in vdis:
+ try:
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ self._session.wait_for_task(task, instance.id)
+ except self.XenAPI.Failure:
+ continue
+
+ task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
+ self._session.wait_for_task(task2, instance.id)
+
+ self._release_bootlock(original_vm)
+ self._start(instance, original_vm)
+
def get_info(self, instance):
"""Return data about VM instance"""
vm = self._get_vm_opaque_ref(instance)
@@ -514,18 +610,30 @@ class VMOps(object):
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
def ip_dict(ip):
- return {'netmask': network['netmask'],
- 'enabled': '1',
- 'ip': ip.address}
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ return {
+ "ip": ip6.addressV6,
+ "netmask": ip6.netmaskV6,
+ "gateway": ip6.gatewayV6,
+ "enabled": "1"}
mac_id = instance.mac_address.replace(':', '')
location = 'vm-data/networking/%s' % mac_id
- mapping = {'label': network['label'],
- 'gateway': network['gateway'],
- 'mac': instance.mac_address,
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
+ mapping = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance.mac_address,
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs],
+ 'ip6s': [ip6_dict(ip) for ip in network_IPs]}
+
self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
+
try:
self.write_to_xenstore(vm_opaque_ref, location,
mapping['location'])
@@ -556,8 +664,17 @@ class VMOps(object):
NetworkHelper.find_network_with_bridge(self._session, bridge)
if network_ref:
- VMHelper.create_vif(self._session, vm_opaque_ref,
- network_ref, instance.mac_address)
+ try:
+ device = "1" if instance._rescue else "0"
+ except AttributeError:
+ device = "0"
+
+ VMHelper.create_vif(
+ self._session,
+ vm_opaque_ref,
+ network_ref,
+ instance.mac_address,
+ device)
def reset_network(self, instance):
"""
@@ -627,7 +744,7 @@ class VMOps(object):
args.update(addl_args)
try:
task = self._session.async_call_plugin(plugin, method, args)
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, e:
ret = None
err_trace = e.details[-1]
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d89a6f995..757ecf5ad 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -83,7 +83,7 @@ class VolumeOps(object):
try:
task = self._session.call_xenapi('Async.VBD.plug',
vbd_ref)
- self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ self._session.wait_for_task(task, vol_rec['deviceNumber'])
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 60bfe0a9f..d578c04ec 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -196,6 +196,14 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ self._vmops.rescue(instance, callback)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ self._vmops.unrescue(instance, callback)
+
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
@@ -279,7 +287,7 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, id, task):
+ def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
until it completes. Not re-entrant."""
done = event.Event()
@@ -306,10 +314,11 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
if status == "pending":
return
elif status == "success":
@@ -323,7 +332,9 @@ class XenAPISession(object):
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
- db.instance_action_create(context.get_admin_context(), action)
+
+ if id:
+ db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
LOG.warn(exc)
done.send_exception(*sys.exc_info())