summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-24 11:06:17 +0000
committerSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-24 11:06:17 +0000
commit8728108512052d24d363d149307fafa993e323a5 (patch)
tree5cc53e8dfc54b108c8721063e191aa7f69adf528 /nova/virt
parent90085fdc93a9e466b90048069ef5d446ca7d3ddf (diff)
parent2434138bbc73a8dbaee44c66cb7bed9f1fa40b2b (diff)
downloadnova-8728108512052d24d363d149307fafa993e323a5.tar.gz
nova-8728108512052d24d363d149307fafa993e323a5.tar.xz
nova-8728108512052d24d363d149307fafa993e323a5.zip
merge trunk
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/connection.py4
-rw-r--r--nova/virt/driver.py234
-rw-r--r--nova/virt/fake.py39
-rw-r--r--nova/virt/hyperv.py22
-rw-r--r--nova/virt/libvirt_conn.py67
-rw-r--r--nova/virt/xenapi/vmops.py167
-rw-r--r--nova/virt/xenapi_conn.py23
7 files changed, 480 insertions, 76 deletions
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 13181b730..af7001715 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -23,6 +23,8 @@ import sys
from nova import flags
from nova import log as logging
+from nova import utils
+from nova.virt import driver
from nova.virt import fake
from nova.virt import libvirt_conn
from nova.virt import xenapi_conn
@@ -72,4 +74,4 @@ def get_connection(read_only=False):
if conn is None:
LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
- return conn
+ return utils.check_isinstance(conn, driver.ComputeDriver)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
new file mode 100644
index 000000000..0e3a4aa3b
--- /dev/null
+++ b/nova/virt/driver.py
@@ -0,0 +1,234 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Driver base-classes:
+
+ (Beginning of) the contract that compute drivers must follow, and shared
+ types that support that contract
+"""
+
+from nova.compute import power_state
+
+
+class InstanceInfo(object):
+ def __init__(self, name, state):
+ self.name = name
+ assert state in power_state.valid_states(), "Bad state: %s" % state
+ self.state = state
+
+
+class ComputeDriver(object):
+ """Base class for compute drivers.
+
+ Lots of documentation is currently on fake.py.
+ """
+
+ def init_host(self, host):
+ """Adopt existing VM's running here"""
+ raise NotImplementedError()
+
+ def get_info(self, instance_name):
+ """Get the current status of an instance, by name (not ID!)
+
+ Returns a dict containing:
+ :state: the running state, one of the power_state codes
+ :max_mem: (int) the maximum memory in KBytes allowed
+ :mem: (int) the memory in KBytes used by the domain
+ :num_cpu: (int) the number of virtual CPUs for the domain
+ :cpu_time: (int) the CPU time used in nanoseconds
+ """
+ raise NotImplementedError()
+
+ def list_instances(self):
+ raise NotImplementedError()
+
+ def list_instances_detail(self):
+ """Return a list of InstanceInfo for all registered VMs"""
+ raise NotImplementedError()
+
+ def spawn(self, instance):
+ """Launch a VM for the specified instance"""
+ raise NotImplementedError()
+
+ def destroy(self, instance, cleanup=True):
+ """Shutdown specified VM"""
+ raise NotImplementedError()
+
+ def reboot(self, instance):
+ """Reboot specified VM"""
+ raise NotImplementedError()
+
+ def snapshot_instance(self, context, instance_id, image_id):
+ raise NotImplementedError()
+
+ def get_console_pool_info(self, console_type):
+ """???
+
+ Returns a dict containing:
+ :address: ???
+ :username: ???
+ :password: ???
+ """
+ raise NotImplementedError()
+
+ def get_console_output(self, instance):
+ raise NotImplementedError()
+
+ def get_ajax_console(self, instance):
+ raise NotImplementedError()
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics"""
+ raise NotImplementedError()
+
+ def get_host_ip_addr(self):
+ raise NotImplementedError()
+
+ def attach_volume(self, context, instance_id, volume_id, mountpoint):
+ raise NotImplementedError()
+
+ def detach_volume(self, context, instance_id, volume_id):
+ raise NotImplementedError()
+
+ def compare_cpu(self, context, cpu_info):
+ raise NotImplementedError()
+
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ raise NotImplementedError()
+
+ def snapshot(self, instance, image_id):
+ """ Create snapshot from a running VM instance """
+ raise NotImplementedError()
+
+ def finish_resize(self, instance, disk_info):
+ """Completes a resize, turning on the migrated instance"""
+ raise NotImplementedError()
+
+ def revert_resize(self, instance):
+ """Reverts a resize, powering back on the instance"""
+ raise NotImplementedError()
+
+ def pause(self, instance, callback):
+ """Pause VM instance"""
+ raise NotImplementedError()
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance"""
+ raise NotImplementedError()
+
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ raise NotImplementedError()
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ raise NotImplementedError()
+
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ raise NotImplementedError()
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ raise NotImplementedError()
+
+ def update_available_resource(self, ctxt, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called when nova-compute launches, and
+ whenever admin executes "nova-manage service update_resource".
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+ raise NotImplementedError()
+
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Spawning live_migration operation for distributing high-load.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ raise NotImplementedError()
+
+ def reset_network(self, instance):
+ """reset networking for specified instance"""
+ raise NotImplementedError()
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """Setting up filtering rules and waiting for its completion.
+
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filtering rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+
+ :params instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+ raise NotImplementedError()
+
+ def unfilter_instance(self, instance):
+ """Stop filtering instance"""
+ raise NotImplementedError()
+
+ def set_admin_password(self, context, instance_id, new_pass=None):
+ """Set the root/admin password for an instance on this server."""
+ raise NotImplementedError()
+
+ def inject_file(self, instance, b64_path, b64_contents):
+ """Create a file on the VM instance. The file path and contents
+ should be base64-encoded.
+ """
+ raise NotImplementedError()
+
+ def inject_network_info(self, instance):
+ """inject network info for specified instance"""
+ raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 3a06284a1..5b0fe1877 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -26,7 +26,9 @@ semantics of real hypervisor connections.
"""
from nova import exception
+from nova import utils
from nova.compute import power_state
+from nova.virt import driver
def get_connection(_):
@@ -34,7 +36,14 @@ def get_connection(_):
return FakeConnection.instance()
-class FakeConnection(object):
+class FakeInstance(object):
+
+ def __init__(self, name, state):
+ self.name = name
+ self.state = state
+
+
+class FakeConnection(driver.ComputeDriver):
"""
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
@@ -90,6 +99,17 @@ class FakeConnection(object):
"""
return self.instances.keys()
+ def _map_to_instance_info(self, instance):
+ instance = utils.check_isinstance(instance, FakeInstance)
+ info = driver.InstanceInfo(instance.name, instance.state)
+ return info
+
+ def list_instances_detail(self):
+ info_list = []
+ for instance in self.instances.values():
+ info_list.append(self._map_to_instance_info(instance))
+ return info_list
+
def spawn(self, instance):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -109,9 +129,10 @@ class FakeConnection(object):
that it was before this call began.
"""
- fake_instance = FakeInstance()
- self.instances[instance.name] = fake_instance
- fake_instance._state = power_state.RUNNING
+ name = instance.name
+ state = power_state.RUNNING
+ fake_instance = FakeInstance(name, state)
+ self.instances[name] = fake_instance
def snapshot(self, instance, name):
"""
@@ -270,7 +291,7 @@ class FakeConnection(object):
raise exception.NotFound(_("Instance %s Not Found")
% instance_name)
i = self.instances[instance_name]
- return {'state': i._state,
+ return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
@@ -428,8 +449,6 @@ class FakeConnection(object):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
-
-class FakeInstance(object):
-
- def __init__(self):
- self._state = power_state.NOSTATE
+ def test_remove_vm(self, instance_name):
+ """ Removes the named VM, as if it crashed. For testing"""
+ self.instances.pop(instance_name)
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 29d18dac5..a1ed5ebbf 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -68,6 +68,7 @@ from nova import flags
from nova import log as logging
from nova.auth import manager
from nova.compute import power_state
+from nova.virt import driver
from nova.virt import images
wmi = None
@@ -108,8 +109,9 @@ def get_connection(_):
return HyperVConnection()
-class HyperVConnection(object):
+class HyperVConnection(driver.ComputeDriver):
def __init__(self):
+ super(HyperVConnection, self).__init__()
self._conn = wmi.WMI(moniker='//./root/virtualization')
self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
@@ -124,6 +126,19 @@ class HyperVConnection(object):
for v in self._conn.Msvm_ComputerSystem(['ElementName'])]
return vms
+ def list_instances_detail(self):
+ # TODO(justinsb): This is a terrible implementation (1+N)
+ instance_infos = []
+ for instance_name in self.list_instances():
+ info = self.get_info(instance_name)
+
+ state = info['state']
+
+ instance_info = driver.InstanceInfo(instance_name, state)
+ instance_infos.append(instance_info)
+
+ return instance_infos
+
def spawn(self, instance):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
@@ -345,7 +360,7 @@ class HyperVConnection(object):
newinst = cl.new()
#Copy the properties from the original.
for prop in wmi_obj._properties:
- newinst.Properties_.Item(prop).Value =\
+ newinst.Properties_.Item(prop).Value = \
wmi_obj.Properties_.Item(prop).Value
return newinst
@@ -467,3 +482,6 @@ class HyperVConnection(object):
if vm is None:
raise exception.NotFound('Cannot detach volume from missing %s '
% instance_name)
+
+ def poll_rescued_instances(self, timeout):
+ pass
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index f264cf619..e1a0a6f29 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -42,13 +42,12 @@ import shutil
import sys
import random
import subprocess
-import time
import uuid
from xml.dom import minidom
from eventlet import greenthread
from eventlet import tpool
-from eventlet import semaphore
+
import IPy
from nova import context
@@ -62,6 +61,7 @@ from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
+from nova.virt import driver
from nova.virt import images
libvirt = None
@@ -133,8 +133,8 @@ def get_connection(read_only):
def _late_load_cheetah():
global Template
if Template is None:
- t = __import__('Cheetah.Template', globals(), locals(), ['Template'],
- -1)
+ t = __import__('Cheetah.Template', globals(), locals(),
+ ['Template'], -1)
Template = t.Template
@@ -153,9 +153,10 @@ def _get_ip_version(cidr):
return int(net.version())
-class LibvirtConnection(object):
+class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
+ super(LibvirtConnection, self).__init__()
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
@@ -235,6 +236,29 @@ class LibvirtConnection(object):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
+ def _map_to_instance_info(self, domain):
+ """Gets info from a virsh domain object into an InstanceInfo"""
+
+ # domain.info() returns a list of:
+ # state: one of the state values (virDomainState)
+ # maxMemory: the maximum memory used by the domain
+ # memory: the current amount of memory used by the domain
+ # nbVirtCPU: the number of virtual CPU
+ # puTime: the time used by the domain in nanoseconds
+
+ (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
+ name = domain.name()
+
+ return driver.InstanceInfo(name, state)
+
+ def list_instances_detail(self):
+ infos = []
+ for domain_id in self._conn.listDomainsID():
+ domain = self._conn.lookupByID(domain_id)
+ info = self._map_to_instance_info(domain)
+ infos.append(info)
+ return infos
+
def destroy(self, instance, cleanup=True):
try:
virt_dom = self._conn.lookupByName(instance['name'])
@@ -417,6 +441,10 @@ class LibvirtConnection(object):
self.reboot(instance)
@exception.wrap_exception
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ @exception.wrap_exception
def spawn(self, instance):
xml = self.to_xml(instance)
db.instance_set_state(context.get_admin_context(),
@@ -556,13 +584,12 @@ class LibvirtConnection(object):
os.mkdir(base_dir)
base = os.path.join(base_dir, fname)
- if fname not in LibvirtConnection._image_sems:
- LibvirtConnection._image_sems[fname] = semaphore.Semaphore()
- with LibvirtConnection._image_sems[fname]:
+ @utils.synchronized(fname)
+ def call_if_not_exists(base, fn, *args, **kwargs):
if not os.path.exists(base):
fn(target=base, *args, **kwargs)
- if not LibvirtConnection._image_sems[fname].locked():
- del LibvirtConnection._image_sems[fname]
+
+ call_if_not_exists(base, fn, *args, **kwargs)
if cow:
utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
@@ -1144,7 +1171,8 @@ class LibvirtConnection(object):
return
- def ensure_filtering_rules_for_instance(self, instance_ref):
+ def ensure_filtering_rules_for_instance(self, instance_ref,
+ time=None):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
@@ -1168,6 +1196,9 @@ class LibvirtConnection(object):
"""
+ if not time:
+ time = greenthread
+
# If any instances never launch at destination host,
# basic-filtering must be set here.
self.firewall_driver.setup_basic_filtering(instance_ref)
@@ -1780,15 +1811,15 @@ class IptablesFirewallDriver(FirewallDriver):
pass
def refresh_security_group_rules(self, security_group):
- # We use the semaphore to make sure noone applies the rule set
- # after we've yanked the existing rules but before we've put in
- # the new ones.
- with self.iptables.semaphore:
- for instance in self.instances.values():
- self.remove_filters_for_instance(instance)
- self.add_filters_for_instance(instance)
+ self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
+ @utils.synchronized('iptables', external=True)
+ def do_refresh_security_group_rules(self, security_group):
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ self.add_filters_for_instance(instance)
+
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index df6d3747f..c2c173b6f 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -37,6 +37,7 @@ from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import power_state
+from nova.virt import driver
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
@@ -52,11 +53,14 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
+ self.poll_rescue_last_ran = None
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
"""List VM instances"""
+ # TODO(justinsb): Should we just always use the details method?
+ # Seems to be the same number of API calls..
vm_refs = []
for vm_ref in self._session.get_xenapi().VM.get_all():
vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
@@ -64,6 +68,33 @@ class VMOps(object):
vm_refs.append(vm_rec["name_label"])
return vm_refs
+ def list_instances_detail(self):
+ """List VM instances, returning InstanceInfo objects"""
+ instance_infos = []
+ for vm_ref in self._session.get_xenapi().VM.get_all():
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
+ name = vm_rec["name_label"]
+
+ # TODO(justinsb): This a roundabout way to map the state
+ openstack_format = VMHelper.compile_info(vm_rec)
+ state = openstack_format['state']
+
+ instance_info = driver.InstanceInfo(name, state)
+ instance_infos.append(instance_info)
+ return instance_infos
+
+ def revert_resize(self, instance):
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ self._start(instance, vm_ref)
+
+ def finish_resize(self, instance, disk_info):
+ vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
+ disk_info['cow'])
+ vm_ref = self._create_vm(instance, vdi_uuid)
+ self.resize_instance(instance, vdi_uuid)
+ self._spawn(instance, vm_ref)
+
def _start(self, instance, vm_ref=None):
"""Power on a VM instance"""
if not vm_ref:
@@ -74,7 +105,7 @@ class VMOps(object):
LOG.debug(_("Starting instance %s"), instance.name)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- def create_disk(self, instance):
+ def _create_disk(self, instance):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance)
@@ -86,8 +117,9 @@ class VMOps(object):
def spawn(self, instance, network_info=None):
try:
vdi_uuid = disk_image_type = None
- (vdi_uuid, disk_image_type) = self.create_disk(instance)
- self._spawn_with_disk(instance, vdi_uuid, network_info)
+ (vdi_uuid, disk_image_type) = self._create_disk(instance)
+ vm_ref = self._create_vm(instance, vdi_uuid, network_info)
+ self._spawn(instance, vm_ref)
except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
LOG.exception(_("instance %s: Failed to spawn"),
@@ -98,7 +130,7 @@ class VMOps(object):
#re-throw the error
raise spawn_error
- def _spawn_with_disk(self, instance, vdi_uuid, network_info=None):
+ def _create_vm(self, instance, vdi_uuid, network_info=None):
"""Create VM instance"""
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
@@ -164,16 +196,19 @@ class VMOps(object):
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=0, bootable=True)
- # inject_network_info and create vifs
# TODO(tr3buchet) - check to make sure we have network info, otherwise
# create it now. This goes away once nova-multi-nic hits.
if network_info is None:
network_info = self._get_network_info(instance)
self.create_vifs(vm_ref, network_info)
self.inject_network_info(instance, vm_ref, network_info)
+ return vm_ref
+ def _spawn(self, instance, vm_ref):
+ """Spawn a new instance"""
LOG.debug(_('Starting VM %s...'), vm_ref)
self._start(instance, vm_ref)
+ instance_name = instance.name
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
@@ -380,7 +415,7 @@ class VMOps(object):
try:
# transfer the base copy
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
- base_copy_uuid = template_vdi_uuids[1]
+ base_copy_uuid = template_vdi_uuids['image']
vdi_ref, vm_vdi_rec = \
VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
@@ -395,7 +430,7 @@ class VMOps(object):
self._session.wait_for_task(task, instance.id)
# Now power down the instance and transfer the COW VHD
- self._shutdown(instance, vm_ref, method='clean')
+ self._shutdown(instance, vm_ref, hard=False)
params = {'host': dest,
'vdi_uuid': cow_uuid,
@@ -415,7 +450,7 @@ class VMOps(object):
# sensible so we don't need to blindly pass around dictionaries
return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
- def attach_disk(self, instance, base_copy_uuid, cow_uuid):
+ def link_disks(self, instance, base_copy_uuid, cow_uuid):
"""Links the base copy VHD to the COW via the XAPI plugin"""
vm_ref = VMHelper.lookup(self._session, instance.name)
new_base_copy_uuid = str(uuid.uuid4())
@@ -436,9 +471,19 @@ class VMOps(object):
return new_cow_uuid
- def resize(self, instance, flavor):
+ def resize_instance(self, instance, vdi_uuid):
"""Resize a running instance by changing it's RAM and disk size """
- raise NotImplementedError()
+ #TODO(mdietz): this will need to be adjusted for swap later
+ #The new disk size must be in bytes
+
+ new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024)
+ instance_name = instance.name
+ instance_local_gb = instance.local_gb
+ LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s."
+ " Expanding to %(instance_local_gb)d GB") % locals())
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size)
+ LOG.debug(_("Resize instance %s complete") % (instance.name))
def reboot(self, instance):
"""Reboot VM instance"""
@@ -513,8 +558,9 @@ class VMOps(object):
"""Shutdown an instance"""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
- LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
- locals())
+ instance_name = instance.name
+ LOG.warn(_("VM %(instance_name)s already halted,"
+ "skipping shutdown...") % locals())
return
instance_id = instance.id
@@ -532,6 +578,10 @@ class VMOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _shutdown_rescue(self, rescue_vm_ref):
+ """Shutdown a rescue instance"""
+ self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm_ref)
+
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM"""
instance_id = instance.id
@@ -549,6 +599,24 @@ class VMOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _destroy_rescue_vdis(self, rescue_vm_ref):
+ """Destroys all VDIs associated with a rescued VM"""
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
+ for vdi_ref in vdi_refs:
+ try:
+ self._session.call_xenapi("Async.VDI.destroy", vdi_ref)
+ except self.XenAPI.Failure:
+ continue
+
+ def _destroy_rescue_vbds(self, rescue_vm_ref):
+ """Destroys all VBDs tied to a rescue VM"""
+ vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
+ for vbd_ref in vbd_refs:
+ vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd_rec["userdevice"] == "1": # primary VBD is always 1
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
def _destroy_kernel_ramdisk_plugin_call(self, kernel, ramdisk):
args = {}
if kernel:
@@ -603,6 +671,14 @@ class VMOps(object):
LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+ def _destroy_rescue_instance(self, rescue_vm_ref):
+ """Destroy a rescue instance"""
+ self._destroy_rescue_vbds(rescue_vm_ref)
+ self._shutdown_rescue(rescue_vm_ref)
+ self._destroy_rescue_vdis(rescue_vm_ref)
+
+ self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref)
+
def destroy(self, instance):
"""
Destroy VM instance
@@ -706,40 +782,56 @@ class VMOps(object):
"""
rescue_vm_ref = VMHelper.lookup(self._session,
- instance.name + "-rescue")
+ instance.name + "-rescue")
if not rescue_vm_ref:
raise exception.NotFound(_(
"Instance is not in Rescue Mode: %s" % instance.name))
original_vm_ref = self._get_vm_opaque_ref(instance)
- vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
-
instance._rescue = False
- for vbd_ref in vbd_refs:
- _vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)
- if _vbd_ref["userdevice"] == "1":
- VMHelper.unplug_vbd(self._session, vbd_ref)
- VMHelper.destroy_vbd(self._session, vbd_ref)
+ self._destroy_rescue_instance(rescue_vm_ref)
+ self._release_bootlock(original_vm_ref)
+ self._start(instance, original_vm_ref)
+
+ def poll_rescued_instances(self, timeout):
+ """Look for expirable rescued instances
+ - forcibly exit rescue mode for any instances that have been
+ in rescue mode for >= the provided timeout
+ """
+ last_ran = self.poll_rescue_last_ran
+ if last_ran:
+ if not utils.is_older_than(last_ran, timeout):
+ # Do not run. Let's bail.
+ return
+ else:
+ # Update the time tracker and proceed.
+ self.poll_rescue_last_ran = utils.utcnow()
+ else:
+ # We need a base time to start tracking.
+ self.poll_rescue_last_ran = utils.utcnow()
+ return
- task1 = self._session.call_xenapi("Async.VM.hard_shutdown",
- rescue_vm_ref)
- self._session.wait_for_task(task1, instance.id)
+ rescue_vms = []
+ for instance in self.list_instances():
+ if instance.endswith("-rescue"):
+ rescue_vms.append(dict(name=instance,
+ vm_ref=VMHelper.lookup(self._session,
+ instance)))
- vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
- for vdi_ref in vdi_refs:
- try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
- self._session.wait_for_task(task, instance.id)
- except self.XenAPI.Failure:
- continue
+ for vm in rescue_vms:
+ rescue_name = vm["name"]
+ rescue_vm_ref = vm["vm_ref"]
- task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref)
- self._session.wait_for_task(task2, instance.id)
+ self._destroy_rescue_instance(rescue_vm_ref)
- self._release_bootlock(original_vm_ref)
- self._start(instance, original_vm_ref)
+ original_name = vm["name"].split("-rescue", 1)[0]
+ original_vm_ref = VMHelper.lookup(self._session, original_name)
+
+ self._release_bootlock(original_vm_ref)
+ self._session.call_xenapi("VM.start", original_vm_ref, False,
+ False)
def get_info(self, instance):
"""Return data about VM instance"""
@@ -797,8 +889,9 @@ class VMOps(object):
'mac': instance.mac_address,
'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs],
- 'ip6s': [ip6_dict(ip) for ip in network_IPs]}
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ if network['cidr_v6']:
+ info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
network_info.append((network, info))
return network_info
@@ -989,7 +1082,7 @@ class VMOps(object):
"""
vm_ref = self._get_vm_opaque_ref(instance_or_vm)
data = self._session.call_xenapi_request('VM.get_xenstore_data',
- (vm_ref, ))
+ (vm_ref,))
ret = {}
if keys is None:
keys = data.keys()
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 58640fba5..c7e94c508 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -69,6 +69,7 @@ from nova import db
from nova import utils
from nova import flags
from nova import log as logging
+from nova.virt import driver
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
@@ -141,10 +142,11 @@ def get_connection(_):
return XenAPIConnection(url, username, password)
-class XenAPIConnection(object):
+class XenAPIConnection(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform"""
def __init__(self, url, user, pw):
+ super(XenAPIConnection, self).__init__()
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
@@ -160,24 +162,25 @@ class XenAPIConnection(object):
"""List VM instances"""
return self._vmops.list_instances()
+ def list_instances_detail(self):
+ return self._vmops.list_instances_detail()
+
def spawn(self, instance):
"""Create VM instance"""
self._vmops.spawn(instance)
+ def revert_resize(self, instance):
+ """Reverts a resize, powering back on the instance"""
+ self._vmops.revert_resize(instance)
+
def finish_resize(self, instance, disk_info):
"""Completes a resize, turning on the migrated instance"""
- vdi_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'],
- disk_info['cow'])
- self._vmops._spawn_with_disk(instance, vdi_uuid)
+ self._vmops.finish_resize(instance, disk_info)
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(instance, image_id)
- def resize(self, instance, flavor):
- """Resize a VM instance"""
- raise NotImplementedError()
-
def reboot(self, instance):
"""Reboot VM instance"""
self._vmops.reboot(instance)
@@ -225,6 +228,10 @@ class XenAPIConnection(object):
"""Unrescue the specified instance"""
self._vmops.unrescue(instance, callback)
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ self._vmops.poll_rescued_instances(timeout)
+
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)