summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/cpuinfo.xml.template9
-rw-r--r--nova/virt/disk.py44
-rw-r--r--nova/virt/fake.py21
-rw-r--r--nova/virt/images.py30
-rw-r--r--nova/virt/libvirt_conn.py666
-rw-r--r--nova/virt/xenapi/vm_utils.py282
-rw-r--r--nova/virt/xenapi/vmops.py247
-rw-r--r--nova/virt/xenapi/volume_utils.py6
-rw-r--r--nova/virt/xenapi_conn.py27
9 files changed, 912 insertions, 420 deletions
diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template
new file mode 100644
index 000000000..48842b29d
--- /dev/null
+++ b/nova/virt/cpuinfo.xml.template
@@ -0,0 +1,9 @@
+<cpu>
+ <arch>$arch</arch>
+ <model>$model</model>
+ <vendor>$vendor</vendor>
+ <topology sockets="$topology.sockets" cores="$topology.cores" threads="$topology.threads"/>
+#for $var in $features
+ <features name="$var" />
+#end for
+</cpu>
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index 2bded07a4..9abe44cc3 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -49,10 +49,10 @@ def extend(image, size):
file_size = os.path.getsize(image)
if file_size >= size:
return
- utils.execute('truncate -s %s %s' % (size, image))
+ utils.execute('truncate', '-s', size, image)
# NOTE(vish): attempts to resize filesystem
- utils.execute('e2fsck -fp %s' % image, check_exit_code=False)
- utils.execute('resize2fs %s' % image, check_exit_code=False)
+ utils.execute('e2fsck', '-fp', image, check_exit_code=False)
+ utils.execute('resize2fs', image, check_exit_code=False)
def inject_data(image, key=None, net=None, partition=None, nbd=False):
@@ -68,7 +68,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
try:
if not partition is None:
# create partition
- out, err = utils.execute('sudo kpartx -a %s' % device)
+ out, err = utils.execute('sudo', 'kpartx', '-a', device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
@@ -84,13 +84,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
- out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
+ out, err = utils.execute('sudo', 'tune2fs',
+ '-c', 0, '-i', 0, mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = utils.execute(
- 'sudo mount %s %s' % (mapped_device, tmpdir))
+ 'sudo', 'mount', mapped_device, tmpdir)
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
@@ -103,13 +104,13 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
_inject_net_into_fs(net, tmpdir)
finally:
# unmount device
- utils.execute('sudo umount %s' % mapped_device)
+ utils.execute('sudo', 'umount', mapped_device)
finally:
# remove temporary directory
- utils.execute('rmdir %s' % tmpdir)
+ utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
- utils.execute('sudo kpartx -d %s' % device)
+ utils.execute('sudo', 'kpartx', '-d', device)
finally:
_unlink_device(device, nbd)
@@ -118,7 +119,7 @@ def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
if nbd:
device = _allocate_device()
- utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
+ utils.execute('sudo', 'qemu-nbd', '-c', device, image)
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuuing
for i in xrange(FLAGS.timeout_nbd):
@@ -127,7 +128,7 @@ def _link_device(image, nbd):
time.sleep(1)
raise exception.Error(_('nbd device %s did not show up') % device)
else:
- out, err = utils.execute('sudo losetup --find --show %s' % image)
+ out, err = utils.execute('sudo', 'losetup', '--find', '--show', image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
@@ -137,10 +138,10 @@ def _link_device(image, nbd):
def _unlink_device(device, nbd):
"""Unlink image from device using loopback or nbd"""
if nbd:
- utils.execute('sudo qemu-nbd -d %s' % device)
+ utils.execute('sudo', 'qemu-nbd', '-d', device)
_free_device(device)
else:
- utils.execute('sudo losetup --detach %s' % device)
+ utils.execute('sudo', 'losetup', '--detach', device)
_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
@@ -170,11 +171,12 @@ def _inject_key_into_fs(key, fs):
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
- utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
- utils.execute('sudo chown root %s' % sshdir)
- utils.execute('sudo chmod 700 %s' % sshdir)
+ utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter
+ utils.execute('sudo', 'chown', 'root', sshdir)
+ utils.execute('sudo', 'chmod', '700', sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+ utils.execute('sudo', 'tee', '-a', keyfile,
+ process_input='\n' + key.strip() + '\n')
def _inject_net_into_fs(net, fs):
@@ -183,8 +185,8 @@ def _inject_net_into_fs(net, fs):
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
- utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
- utils.execute('sudo chown root:root %s' % netdir)
- utils.execute('sudo chmod 755 %s' % netdir)
+ utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter
+ utils.execute('sudo', 'chown', 'root:root', netdir)
+ utils.execute('sudo', 'chmod', 755, netdir)
netfile = os.path.join(netdir, 'interfaces')
- utils.execute('sudo tee %s' % netfile, net)
+ utils.execute('sudo', 'tee', netfile, process_input=net)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index c744acf91..3a06284a1 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -407,6 +407,27 @@ class FakeConnection(object):
"""
return True
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+ def compare_cpu(self, xml):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only by libvirt."""
+ return
+
+ def unfilter_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
class FakeInstance(object):
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 7a6fef330..2e3f2ee4d 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -28,29 +28,32 @@ import time
import urllib2
import urlparse
+from nova import context
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
-from nova.objectstore import image
FLAGS = flags.FLAGS
-flags.DEFINE_bool('use_s3', True,
- 'whether to get images from s3 or use local copy')
-
LOG = logging.getLogger('nova.virt.images')
-def fetch(image, path, user, project):
- if FLAGS.use_s3:
- f = _fetch_s3_image
- else:
- f = _fetch_local_image
- return f(image, path, user, project)
+def fetch(image_id, path, _user, _project):
+ # TODO(vish): Improve context handling and add owner and auth data
+ # when it is added to glance. Right now there is no
+ # auth checking in glance, so we assume that access was
+ # checked before we got here.
+ image_service = utils.import_object(FLAGS.image_service)
+ with open(path, "wb") as image_file:
+ elevated = context.get_admin_context()
+ metadata = image_service.get(elevated, image_id, image_file)
+ return metadata
+# NOTE(vish): The methods below should be unnecessary, but I'm leaving
+# them in case the glance client does not work on windows.
def _fetch_image_no_curl(url, path, headers):
request = urllib2.Request(url)
for (k, v) in headers.iteritems():
@@ -94,8 +97,7 @@ def _fetch_s3_image(image, path, user, project):
cmd += ['-H', '\'%s: %s\'' % (k, v)]
cmd += ['-o', path]
- cmd_out = ' '.join(cmd)
- return utils.execute(cmd_out)
+ return utils.execute(*cmd)
def _fetch_local_image(image, path, user, project):
@@ -103,13 +105,15 @@ def _fetch_local_image(image, path, user, project):
if sys.platform.startswith('win'):
return shutil.copy(source, path)
else:
- return utils.execute('cp %s %s' % (source, path))
+ return utils.execute('cp', source, path)
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
+# TODO(vish): xenapi should use the glance client code directly instead
+# of retrieving the image using this method.
def image_url(image):
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 9f7315c17..2559c2b81 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -36,17 +36,19 @@ Supports KVM, QEMU, UML, and XEN.
"""
+import multiprocessing
import os
import shutil
+import sys
import random
import subprocess
+import time
import uuid
from xml.dom import minidom
-from eventlet import greenthread
-from eventlet import event
from eventlet import tpool
+from eventlet import semaphore
import IPy
@@ -57,7 +59,6 @@ from nova import flags
from nova import log as logging
#from nova import test
from nova import utils
-#from nova.api import context
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
@@ -71,6 +72,7 @@ Template = None
LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
@@ -101,6 +103,17 @@ flags.DEFINE_string('ajaxterm_portrange',
flags.DEFINE_string('firewall_driver',
'nova.virt.libvirt_conn.IptablesFirewallDriver',
'Firewall driver (defaults to iptables)')
+flags.DEFINE_string('cpuinfo_xml_template',
+ utils.abspath('virt/cpuinfo.xml.template'),
+ 'CpuInfo XML Template (Used only live migration now)')
+flags.DEFINE_string('live_migration_uri',
+ "qemu+tcp://%s/system",
+ 'Define protocol used by live_migration feature')
+flags.DEFINE_string('live_migration_flag',
+ "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
+ 'Define live migration behavior.')
+flags.DEFINE_integer('live_migration_bandwidth', 0,
+ 'Define live migration behavior')
def get_connection(read_only):
@@ -147,6 +160,7 @@ class LibvirtConnection(object):
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
+ self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -348,19 +362,19 @@ class LibvirtConnection(object):
@exception.wrap_exception
def pause(self, instance, callback):
- raise exception.APIError("pause not supported for libvirt.")
+ raise exception.ApiError("pause not supported for libvirt.")
@exception.wrap_exception
def unpause(self, instance, callback):
- raise exception.APIError("unpause not supported for libvirt.")
+ raise exception.ApiError("unpause not supported for libvirt.")
@exception.wrap_exception
def suspend(self, instance, callback):
- raise exception.APIError("suspend not supported for libvirt")
+ raise exception.ApiError("suspend not supported for libvirt")
@exception.wrap_exception
def resume(self, instance, callback):
- raise exception.APIError("resume not supported for libvirt")
+ raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
def rescue(self, instance, callback=None):
@@ -439,8 +453,10 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
LOG.info(_("cool, it's a device"))
- out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
- virsh_output, check_exit_code=False)
+ out, err = utils.execute('sudo', 'dd',
+ "if=%s" % virsh_output,
+ 'iflag=nonblock',
+ check_exit_code=False)
return out
else:
return ''
@@ -462,11 +478,11 @@ class LibvirtConnection(object):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
- utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
+ utils.execute('sudo', 'chown', os.getuid(), console_log)
if FLAGS.libvirt_type == 'xen':
# Xen is special
- virsh_output = utils.execute("virsh ttyconsole %s" %
+ virsh_output = utils.execute('virsh', 'ttyconsole',
instance['name'])
data = self._flush_xen_console(virsh_output)
fpath = self._append_to_file(data, console_log)
@@ -483,9 +499,10 @@ class LibvirtConnection(object):
port = random.randint(int(start_port), int(end_port))
# netcat will exit with 0 only if the port is in use,
# so a nonzero return value implies it is unused
- cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
- stdout, stderr = utils.execute(cmd)
- if stdout.strip() == 'free':
+ cmd = 'netcat', '0.0.0.0', port, '-w', '1'
+ try:
+ stdout, stderr = utils.execute(*cmd, process_input='')
+ except exception.ProcessExecutionError:
return port
raise Exception(_('Unable to find an open port'))
@@ -512,7 +529,10 @@ class LibvirtConnection(object):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
- def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs):
+ _image_sems = {}
+
+ @staticmethod
+ def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
@@ -531,14 +551,21 @@ class LibvirtConnection(object):
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base = os.path.join(base_dir, fname)
- if not os.path.exists(base):
- fn(target=base, *args, **kwargs)
+
+ if fname not in LibvirtConnection._image_sems:
+ LibvirtConnection._image_sems[fname] = semaphore.Semaphore()
+ with LibvirtConnection._image_sems[fname]:
+ if not os.path.exists(base):
+ fn(target=base, *args, **kwargs)
+ if not LibvirtConnection._image_sems[fname].locked():
+ del LibvirtConnection._image_sems[fname]
+
if cow:
- utils.execute('qemu-img create -f qcow2 -o '
- 'cluster_size=2M,backing_file=%s %s'
- % (base, target))
+ utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
+ 'cluster_size=2M,backing_file=%s' % base,
+ target)
else:
- utils.execute('cp %s %s' % (base, target))
+ utils.execute('cp', base, target)
def _fetch_image(self, target, image_id, user, project, size=None):
"""Grab image and optionally attempt to resize it"""
@@ -548,7 +575,7 @@ class LibvirtConnection(object):
def _create_local(self, target, local_gb):
"""Create a blank image of specified size"""
- utils.execute('truncate %s -s %dG' % (target, local_gb))
+ utils.execute('truncate', target, '-s', "%dG" % local_gb)
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
@@ -559,7 +586,7 @@ class LibvirtConnection(object):
fname + suffix)
# ensure directories exist and are writable
- utils.execute('mkdir -p %s' % basepath(suffix=''))
+ utils.execute('mkdir', '-p', basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
@@ -579,21 +606,23 @@ class LibvirtConnection(object):
'ramdisk_id': inst['ramdisk_id']}
if disk_images['kernel_id']:
+ fname = '%08x' % int(disk_images['kernel_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('kernel'),
- fname=disk_images['kernel_id'],
+ fname=fname,
image_id=disk_images['kernel_id'],
user=user,
project=project)
if disk_images['ramdisk_id']:
+ fname = '%08x' % int(disk_images['ramdisk_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('ramdisk'),
- fname=disk_images['ramdisk_id'],
+ fname=fname,
image_id=disk_images['ramdisk_id'],
user=user,
project=project)
- root_fname = disk_images['image_id']
+ root_fname = '%08x' % int(disk_images['image_id'])
size = FLAGS.minimum_root_size
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
size = None
@@ -659,7 +688,7 @@ class LibvirtConnection(object):
' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
- utils.execute('sudo chown root %s' % basepath('disk'))
+ utils.execute('sudo', 'chown', 'root', basepath('disk'))
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
@@ -750,7 +779,7 @@ class LibvirtConnection(object):
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
- raise exception.APIError(_("diagnostics are not supported "
+ raise exception.ApiError(_("diagnostics are not supported "
"for libvirt"))
def get_disks(self, instance_name):
@@ -837,6 +866,159 @@ class LibvirtConnection(object):
return interfaces
+ def get_vcpu_total(self):
+ """Get vcpu number of physical computer.
+
+ :returns: the number of cpu core.
+
+ """
+
+ # On certain platforms, this will raise a NotImplementedError.
+ try:
+ return multiprocessing.cpu_count()
+ except NotImplementedError:
+ LOG.warn(_("Cannot get the number of cpu, because this "
+ "function is not implemented for this platform. "
+ "This error can be safely ignored for now."))
+ return 0
+
+ def get_memory_mb_total(self):
+ """Get the total memory size(MB) of physical computer.
+
+ :returns: the total amount of memory(MB).
+
+ """
+
+ if sys.platform.upper() != 'LINUX2':
+ return 0
+
+ meminfo = open('/proc/meminfo').read().split()
+ idx = meminfo.index('MemTotal:')
+ # transforming kb to mb.
+ return int(meminfo[idx + 1]) / 1024
+
+ def get_local_gb_total(self):
+ """Get the total hdd size(GB) of physical computer.
+
+ :returns:
+ The total amount of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+
+ """
+
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024
+
+ def get_vcpu_used(self):
+ """ Get vcpu usage number of physical computer.
+
+ :returns: The total number of vcpu that currently used.
+
+ """
+
+ total = 0
+ for dom_id in self._conn.listDomainsID():
+ dom = self._conn.lookupByID(dom_id)
+ total += len(dom.vcpus()[1])
+ return total
+
+ def get_memory_mb_used(self):
+ """Get the free memory size(MB) of physical computer.
+
+ :returns: the total usage of memory(MB).
+
+ """
+
+ if sys.platform.upper() != 'LINUX2':
+ return 0
+
+ m = open('/proc/meminfo').read().split()
+ idx1 = m.index('MemFree:')
+ idx2 = m.index('Buffers:')
+ idx3 = m.index('Cached:')
+ avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
+ return self.get_memory_mb_total() - avail
+
+ def get_local_gb_used(self):
+ """Get the free hdd size(GB) of physical computer.
+
+ :returns:
+ The total usage of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+
+ """
+
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024
+ return self.get_local_gb_total() - avail
+
+ def get_hypervisor_type(self):
+ """Get hypervisor type.
+
+ :returns: hypervisor type (ex. qemu)
+
+ """
+
+ return self._conn.getType()
+
+ def get_hypervisor_version(self):
+ """Get hypervisor version.
+
+ :returns: hypervisor version (ex. 12003)
+
+ """
+
+ return self._conn.getVersion()
+
+ def get_cpu_info(self):
+ """Get cpuinfo information.
+
+ Obtains cpu feature from virConnect.getCapabilities,
+ and returns as a json string.
+
+ :return: see above description
+
+ """
+
+ xml = self._conn.getCapabilities()
+ xml = libxml2.parseDoc(xml)
+ nodes = xml.xpathEval('//host/cpu')
+ if len(nodes) != 1:
+ raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
+ "but %d\n") % len(nodes)
+ + xml.serialize())
+
+ cpu_info = dict()
+ cpu_info['arch'] = xml.xpathEval('//host/cpu/arch')[0].getContent()
+ cpu_info['model'] = xml.xpathEval('//host/cpu/model')[0].getContent()
+ cpu_info['vendor'] = xml.xpathEval('//host/cpu/vendor')[0].getContent()
+
+ topology_node = xml.xpathEval('//host/cpu/topology')[0]\
+ .get_properties()
+ topology = dict()
+ while topology_node != None:
+ name = topology_node.get_name()
+ topology[name] = topology_node.getContent()
+ topology_node = topology_node.get_next()
+
+ keys = ['cores', 'sockets', 'threads']
+ tkeys = topology.keys()
+ if list(set(tkeys)) != list(set(keys)):
+ ks = ', '.join(keys)
+ raise exception.Invalid(_("Invalid xml: topology(%(topology)s) "
+ "must have %(ks)s") % locals())
+
+ feature_nodes = xml.xpathEval('//host/cpu/feature')
+ features = list()
+ for nodes in feature_nodes:
+ features.append(nodes.get_properties().getContent())
+
+ cpu_info['topology'] = topology
+ cpu_info['features'] = features
+ return utils.dumps(cpu_info)
+
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name, not an Instance, so
@@ -867,6 +1049,207 @@ class LibvirtConnection(object):
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
+ def update_available_resource(self, ctxt, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called when nova-coompute launches, and
+ whenever admin executes "nova-manage service update_resource".
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+
+ try:
+ service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
+ except exception.NotFound:
+ raise exception.Invalid(_("Cannot update compute manager "
+ "specific info, because no service "
+ "record was found."))
+
+ # Updating host information
+ dic = {'vcpus': self.get_vcpu_total(),
+ 'memory_mb': self.get_memory_mb_total(),
+ 'local_gb': self.get_local_gb_total(),
+ 'vcpus_used': self.get_vcpu_used(),
+ 'memory_mb_used': self.get_memory_mb_used(),
+ 'local_gb_used': self.get_local_gb_used(),
+ 'hypervisor_type': self.get_hypervisor_type(),
+ 'hypervisor_version': self.get_hypervisor_version(),
+ 'cpu_info': self.get_cpu_info()}
+
+ compute_node_ref = service_ref['compute_node']
+ if not compute_node_ref:
+ LOG.info(_('Compute_service record created for %s ') % host)
+ dic['service_id'] = service_ref['id']
+ db.compute_node_create(ctxt, dic)
+ else:
+ LOG.info(_('Compute_service record updated for %s ') % host)
+ db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
+
+ def compare_cpu(self, cpu_info):
+ """Checks the host cpu is compatible to a cpu given by xml.
+
+ "xml" must be a part of libvirt.openReadonly().getCapabilities().
+ return values follows by virCPUCompareResult.
+ if 0 > return value, do live migration.
+ 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
+
+ :param cpu_info: json string that shows cpu feature(see get_cpu_info())
+ :returns:
+ None. if given cpu info is not compatible to this server,
+ raise exception.
+
+ """
+
+ LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
+ dic = utils.loads(cpu_info)
+ xml = str(Template(self.cpuinfo_xml, searchList=dic))
+ LOG.info(_('to xml...\n:%s ' % xml))
+
+ u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
+ m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
+ # unknown character exists in xml, then libvirt complains
+ try:
+ ret = self._conn.compareCPU(xml, 0)
+ except libvirt.libvirtError, e:
+ ret = e.message
+ LOG.error(m % locals())
+ raise
+
+ if ret <= 0:
+ raise exception.Invalid(m % locals())
+
+ return
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """Setting up filtering rules and waiting for its completion.
+
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filterling rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+
+ :params instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+
+ # If any instances never launch at destination host,
+ # basic-filtering must be set here.
+ self.firewall_driver.setup_basic_filtering(instance_ref)
+ # setting up n)ova-instance-instance-xx mainly.
+ self.firewall_driver.prepare_instance_filter(instance_ref)
+
+ # wait for completion
+ timeout_count = range(FLAGS.live_migration_retry_count)
+ while timeout_count:
+ try:
+ filter_name = 'nova-instance-%s' % instance_ref.name
+ self._conn.nwfilterLookupByName(filter_name)
+ break
+ except libvirt.libvirtError:
+ timeout_count.pop()
+ if len(timeout_count) == 0:
+ ec2_id = instance_ref['hostname']
+ iname = instance_ref.name
+ msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)')
+ raise exception.Error(msg % locals())
+ time.sleep(1)
+
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Spawning live_migration operation for distributing high-load.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+
+ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
+ post_method, recover_method)
+
+ def _live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Do live migration.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+
+ # Do live migration.
+ try:
+ flaglist = FLAGS.live_migration_flag.split(',')
+ flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
+ logical_sum = reduce(lambda x, y: x | y, flagvals)
+
+ if self.read_only:
+ tmpconn = self._connect(self.libvirt_uri, False)
+ dom = tmpconn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
+ tmpconn.close()
+ else:
+ dom = self._conn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
+
+ except Exception:
+ recover_method(ctxt, instance_ref)
+ raise
+
+ # Waiting for completion of live_migration.
+ timer = utils.LoopingCall(f=None)
+
+ def wait_for_live_migration():
+ """waiting for live migration completion"""
+ try:
+ self.get_info(instance_ref.name)['state']
+ except exception.NotFound:
+ timer.stop()
+ post_method(ctxt, instance_ref, dest)
+
+ timer.f = wait_for_live_migration
+ timer.start(interval=0.5, now=True)
+
+ def unfilter_instance(self, instance_ref):
+ """See comments of same method in firewall_driver."""
+ self.firewall_driver.unfilter_instance(instance_ref)
+
class FirewallDriver(object):
def prepare_instance_filter(self, instance):
@@ -1208,10 +1591,16 @@ class NWFilterFirewall(FirewallDriver):
class IptablesFirewallDriver(FirewallDriver):
def __init__(self, execute=None, **kwargs):
- self.execute = execute or utils.execute
+ from nova.network import linux_net
+ self.iptables = linux_net.iptables_manager
self.instances = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
def setup_basic_filtering(self, instance):
"""Use NWFilter from libvirt for this."""
return self.nwfilter.setup_basic_filtering(instance)
@@ -1220,126 +1609,96 @@ class IptablesFirewallDriver(FirewallDriver):
"""No-op. Everything is done in prepare_instance_filter"""
pass
- def remove_instance(self, instance):
- if instance['id'] in self.instances:
- del self.instances[instance['id']]
+ def unfilter_instance(self, instance):
+ if self.instances.pop(instance['id'], None):
+ self.remove_filters_for_instance(instance)
+ self.iptables.apply()
else:
LOG.info(_('Attempted to unfilter instance %s which is not '
- 'filtered'), instance['id'])
+ 'filtered'), instance['id'])
- def add_instance(self, instance):
+ def prepare_instance_filter(self, instance):
self.instances[instance['id']] = instance
+ self.add_filters_for_instance(instance)
+ self.iptables.apply()
- def unfilter_instance(self, instance):
- self.remove_instance(instance)
- self.apply_ruleset()
+ def add_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
- def prepare_instance_filter(self, instance):
- self.add_instance(instance)
- self.apply_ruleset()
-
- def apply_ruleset(self):
- current_filter, _ = self.execute('sudo iptables-save -t filter')
- current_lines = current_filter.split('\n')
- new_filter = self.modify_rules(current_lines, 4)
- self.execute('sudo iptables-restore',
- process_input='\n'.join(new_filter))
- if(FLAGS.use_ipv6):
- current_filter, _ = self.execute('sudo ip6tables-save -t filter')
- current_lines = current_filter.split('\n')
- new_filter = self.modify_rules(current_lines, 6)
- self.execute('sudo ip6tables-restore',
- process_input='\n'.join(new_filter))
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_address = self._ip_for_instance(instance)
+ self.iptables.ipv4['filter'].add_rule('local',
+ '-d %s -j $%s' %
+ (ipv4_address, chain_name))
+
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ ipv6_address = self._ip_for_instance_v6(instance)
+ self.iptables.ipv6['filter'].add_rule('local',
+ '-d %s -j $%s' %
+ (ipv6_address,
+ chain_name))
- def modify_rules(self, current_lines, ip_version=4):
+ ipv4_rules, ipv6_rules = self.instance_rules(instance)
+
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule(chain_name, rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+
+ def remove_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].remove_chain(chain_name)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].remove_chain(chain_name)
+
+ def instance_rules(self, instance):
ctxt = context.get_admin_context()
- # Remove any trace of nova rules.
- new_filter = filter(lambda l: 'nova-' not in l, current_lines)
-
- seen_chains = False
- for rules_index in range(len(new_filter)):
- if not seen_chains:
- if new_filter[rules_index].startswith(':'):
- seen_chains = True
- elif seen_chains == 1:
- if not new_filter[rules_index].startswith(':'):
- break
- our_chains = [':nova-fallback - [0:0]']
- our_rules = ['-A nova-fallback -j DROP']
-
- our_chains += [':nova-local - [0:0]']
- our_rules += ['-A FORWARD -j nova-local']
- our_rules += ['-A OUTPUT -j nova-local']
-
- security_groups = {}
- # Add our chains
- # First, we add instance chains and rules
- for instance_id in self.instances:
- instance = self.instances[instance_id]
- chain_name = self._instance_chain_name(instance)
- if(ip_version == 4):
- ip_address = self._ip_for_instance(instance)
- elif(ip_version == 6):
- ip_address = self._ip_for_instance_v6(instance)
-
- our_chains += [':%s - [0:0]' % chain_name]
-
- # Jump to the per-instance chain
- our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
- chain_name)]
-
- # Always drop invalid packets
- our_rules += ['-A %s -m state --state '
- 'INVALID -j DROP' % (chain_name,)]
-
- # Allow established connections
- our_rules += ['-A %s -m state --state '
- 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
-
- # Jump to each security group chain in turn
- for security_group in \
- db.security_group_get_by_instance(ctxt,
- instance['id']):
- security_groups[security_group['id']] = security_group
-
- sg_chain_name = self._security_group_chain_name(
- security_group['id'])
+ ipv4_rules = []
+ ipv6_rules = []
- our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
-
- if(ip_version == 4):
- # Allow DHCP responses
- dhcp_server = self._dhcp_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
- '-j ACCEPT ' % (chain_name, dhcp_server)]
- #Allow project network traffic
- if (FLAGS.allow_project_net_traffic):
- cidr = self._project_cidr_for_instance(instance)
- our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
- elif(ip_version == 6):
- # Allow RA responses
- ra_server = self._ra_server_for_instance(instance)
- if ra_server:
- our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
- (chain_name, ra_server + "/128")]
- #Allow project network traffic
- if (FLAGS.allow_project_net_traffic):
- cidrv6 = self._project_cidrv6_for_instance(instance)
- our_rules += ['-A %s -s %s -j ACCEPT' %
- (chain_name, cidrv6)]
-
- # If nothing matches, jump to the fallback chain
- our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
+ # Always drop invalid packets
+ ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
+ ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
- # then, security group chains and rules
- for security_group_id in security_groups:
- chain_name = self._security_group_chain_name(security_group_id)
- our_chains += [':%s - [0:0]' % chain_name]
+ # Allow established connections
+ ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+
+ dhcp_server = self._dhcp_server_for_instance(instance)
+ ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT' % (dhcp_server,)]
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidr = self._project_cidr_for_instance(instance)
+ ipv4_rules += ['-s %s -j ACCEPT' % (cidr,)]
+
+ # We wrap these in FLAGS.use_ipv6 because they might cause
+ # a DB lookup. The other ones are just list operations, so
+ # they're not worth the clutter.
+ if FLAGS.use_ipv6:
+ # Allow RA responses
+ ra_server = self._ra_server_for_instance(instance)
+ if ra_server:
+ ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % (ra_server,)]
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrv6 = self._project_cidrv6_for_instance(instance)
+ ipv6_rules += ['-s %s -j ACCEPT' % (cidrv6,)]
- rules = \
- db.security_group_rule_get_by_security_group(ctxt,
- security_group_id)
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance['id'])
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ rules = db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
for rule in rules:
logging.info('%r', rule)
@@ -1350,14 +1709,16 @@ class IptablesFirewallDriver(FirewallDriver):
continue
version = _get_ip_version(rule.cidr)
- if version != ip_version:
- continue
+ if version == 4:
+ rules = ipv4_rules
+ else:
+ rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
- args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr]
+ args = ['-p', protocol, '-s', rule.cidr]
if rule.protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
@@ -1378,32 +1739,39 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
- if(ip_version == 4):
+ if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
- elif(ip_version == 6):
+ elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j ACCEPT']
- our_rules += [' '.join(args)]
+ rules += [' '.join(args)]
+
+ ipv4_rules += ['-j $sg-fallback']
+ ipv6_rules += ['-j $sg-fallback']
- new_filter[rules_index:rules_index] = our_rules
- new_filter[rules_index:rules_index] = our_chains
- logging.info('new_filter: %s', '\n'.join(new_filter))
- return new_filter
+ return ipv4_rules, ipv6_rules
def refresh_security_group_members(self, security_group):
pass
def refresh_security_group_rules(self, security_group):
- self.apply_ruleset()
+ # We use the semaphore to make sure noone applies the rule set
+ # after we've yanked the existing rules but before we've put in
+ # the new ones.
+ with self.iptables.semaphore:
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ self.add_filters_for_instance(instance)
+ self.iptables.apply()
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
def _instance_chain_name(self, instance):
- return 'nova-inst-%s' % (instance['id'],)
+ return 'inst-%s' % (instance['id'],)
def _ip_for_instance(self, instance):
return db.instance_get_fixed_address(context.get_admin_context(),
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ce081a2d6..763c5fe40 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -41,9 +41,11 @@ from nova.virt.xenapi import HelperBase
from nova.virt.xenapi.volume_utils import StorageError
-FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
+FLAGS = flags.FLAGS
+flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
+
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
@@ -80,10 +82,19 @@ class VMHelper(HelperBase):
"""
@classmethod
- def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
+ def create_vm(cls, session, instance, kernel, ramdisk,
+ use_pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
VM reference.
- the pv_kernel flag indicates whether the guest is HVM or PV
+ the use_pv_kernel flag indicates whether the guest is HVM or PV
+
+ There are 3 scenarios:
+
+ 1. Using paravirtualization, kernel passed in
+
+ 2. Using paravirtualization, kernel within the image
+
+ 3. Using hardware virtualization
"""
instance_type = instance_types.\
@@ -91,52 +102,61 @@ class VMHelper(HelperBase):
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
- 'name_label': instance.name,
- 'name_description': '',
+ 'actions_after_crash': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_shutdown': 'destroy',
+ 'affinity': '',
+ 'blocked_operations': {},
+ 'ha_always_run': False,
+ 'ha_restart_priority': '',
+ 'HVM_boot_params': {},
+ 'HVM_boot_policy': '',
'is_a_template': False,
- 'memory_static_min': '0',
- 'memory_static_max': mem,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
- 'VCPUs_at_startup': vcpus,
- 'VCPUs_max': vcpus,
- 'VCPUs_params': {},
- 'actions_after_shutdown': 'destroy',
- 'actions_after_reboot': 'restart',
- 'actions_after_crash': 'destroy',
- 'PV_bootloader': '',
- 'PV_kernel': '',
- 'PV_ramdisk': '',
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_target': mem,
+ 'name_description': '',
+ 'name_label': instance.name,
+ 'other_config': {'allowvssprovider': False},
+ 'other_config': {},
+ 'PCI_bus': '',
+ 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
+ 'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
+ 'PV_bootloader': '',
'PV_bootloader_args': '',
+ 'PV_kernel': '',
'PV_legacy_args': '',
- 'HVM_boot_policy': '',
- 'HVM_boot_params': {},
- 'platform': {},
- 'PCI_bus': '',
+ 'PV_ramdisk': '',
'recommendations': '',
- 'affinity': '',
+ 'tags': [],
'user_version': '0',
- 'other_config': {},
- }
- #Complete VM configuration record according to the image type
- #non-raw/raw with PV kernel/raw in HVM mode
- if instance.kernel_id:
- rec['PV_bootloader'] = ''
- rec['PV_kernel'] = kernel
- rec['PV_ramdisk'] = ramdisk
- rec['PV_args'] = 'root=/dev/xvda1'
- rec['PV_bootloader_args'] = ''
- rec['PV_legacy_args'] = ''
- else:
- if pv_kernel:
- rec['PV_args'] = 'noninteractive'
- rec['PV_bootloader'] = 'pygrub'
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'xenstore_data': {}}
+
+ # Complete VM configuration record according to the image type
+ # non-raw/raw with PV kernel/raw in HVM mode
+ if use_pv_kernel:
+ rec['platform']['nx'] = 'false'
+ if instance.kernel_id:
+ # 1. Kernel explicitly passed in, use that
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
else:
- rec['HVM_boot_policy'] = 'BIOS order'
- rec['HVM_boot_params'] = {'order': 'dc'}
- rec['platform'] = {'acpi': 'true', 'apic': 'true',
- 'pae': 'true', 'viridian': 'true'}
+ # 2. Use kernel within the image
+ rec['PV_args'] = 'clocksource=jiffies'
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ # 3. Using hardware virtualization
+ rec['platform']['nx'] = 'true'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['HVM_boot_policy'] = 'BIOS order'
+
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
instance_name = instance.name
@@ -181,13 +201,13 @@ class VMHelper(HelperBase):
@classmethod
def find_vbd_by_number(cls, session, vm_ref, number):
"""Get the VBD reference from the device number"""
- vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
- if vbds:
- for vbd in vbds:
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref)
if vbd_rec['userdevice'] == str(number):
- return vbd
+ return vbd_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@@ -319,7 +339,7 @@ class VMHelper(HelperBase):
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
- def upload_image(cls, session, instance_id, vdi_uuids, image_id):
+ def upload_image(cls, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
@@ -328,15 +348,18 @@ class VMHelper(HelperBase):
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
+ os_type = instance.os_type or FLAGS.default_os_type
+
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
- 'sr_path': cls.get_sr_path(session)}
+ 'sr_path': cls.get_sr_path(session),
+ 'os_type': os_type}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
- session.wait_for_task(task, instance_id)
+ session.wait_for_task(task, instance.id)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project,
@@ -419,29 +442,29 @@ class VMHelper(HelperBase):
vdi_size += MBR_SIZE_BYTES
name_label = get_name_label_for_image(image)
- vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
+ vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
- with_vdi_attached_here(session, vdi, False,
+ with_vdi_attached_here(session, vdi_ref, False,
lambda dev:
_stream_disk(dev, image_type,
virtual_size, image_file))
if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
fn = "copy_kernel_vdi"
args = {}
- args['vdi-ref'] = vdi
+ args['vdi-ref'] = vdi_ref
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
- session.get_xenapi().VDI.destroy(vdi)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
+ session.get_xenapi().VDI.destroy(vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
return filename
else:
- return session.get_xenapi().VDI.get_uuid(vdi)
+ return session.get_xenapi().VDI.get_uuid(vdi_ref)
@classmethod
def determine_disk_image_type(cls, instance):
@@ -533,17 +556,33 @@ class VMHelper(HelperBase):
return uuid
@classmethod
- def lookup_image(cls, session, instance_id, vdi_ref):
+ def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
+ os_type):
"""
- Determine if VDI is using a PV kernel
+ Determine whether the VM will use a paravirtualized kernel or if it
+ will use hardware virtualization.
+
+ 1. Objectstore (any image type):
+ We use plugin to figure out whether the VDI uses PV
+
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
"""
if FLAGS.xenapi_image_service == 'glance':
- return cls._lookup_image_glance(session, vdi_ref)
+ # 2, 3, 4: Glance
+ return cls._determine_is_pv_glance(
+ session, vdi_ref, disk_image_type, os_type)
else:
- return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
+ # 1. Objecstore
+ return cls._determine_is_pv_objectstore(session, instance_id,
+ vdi_ref)
@classmethod
- def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
+ def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
@@ -559,42 +598,72 @@ class VMHelper(HelperBase):
return pv
@classmethod
- def _lookup_image_glance(cls, session, vdi_ref):
+ def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
+ os_type):
+ """
+ For a Glance image, determine if we need paravirtualization.
+
+ The relevant scenarios are:
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
+ """
+
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
- return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
+ if disk_image_type == ImageType.DISK_VHD:
+ # 2. VHD
+ if os_type == 'windows':
+ is_pv = False
+ else:
+ is_pv = True
+ elif disk_image_type == ImageType.DISK_RAW:
+ # 3. RAW
+ is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
+ elif disk_image_type == ImageType.DISK:
+ # 4. Disk
+ is_pv = True
+ else:
+ raise exception.Error(_("Unknown image format %(disk_image_type)s")
+ % locals())
+
+ return is_pv
@classmethod
- def lookup(cls, session, i):
+ def lookup(cls, session, name_label):
"""Look the instance i up, and returns it if available"""
- vms = session.get_xenapi().VM.get_by_name_label(i)
- n = len(vms)
+ vm_refs = session.get_xenapi().VM.get_by_name_label(name_label)
+ n = len(vm_refs)
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') % i)
+ raise exception.Duplicate(_('duplicate name found: %s') %
+ name_label)
else:
- return vms[0]
+ return vm_refs[0]
@classmethod
- def lookup_vm_vdis(cls, session, vm):
+ def lookup_vm_vdis(cls, session, vm_ref):
"""Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
- vbds = session.get_xenapi().VM.get_VBDs(vm)
- vdis = []
- if vbds:
- for vbd in vbds:
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ vdi_refs = []
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- vdi = session.get_xenapi().VBD.get_VDI(vbd)
+ vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
# Test valid VDI
- record = session.get_xenapi().VDI.get_record(vdi)
+ record = session.get_xenapi().VDI.get_record(vdi_ref)
LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
else:
- vdis.append(vdi)
- if len(vdis) > 0:
- return vdis
+ vdi_refs.append(vdi_ref)
+ if len(vdi_refs) > 0:
+ return vdi_refs
else:
return None
@@ -770,16 +839,16 @@ def safe_find_sr(session):
def find_sr(session):
"""Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
- srs = session.get_xenapi().SR.get_all()
- for sr in srs:
- sr_rec = session.get_xenapi().SR.get_record(sr)
+ sr_refs = session.get_xenapi().SR.get_all()
+ for sr_ref in sr_refs:
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
- for pbd in sr_rec['PBDs']:
- pbd_rec = session.get_xenapi().PBD.get_record(pbd)
+ for pbd_ref in sr_rec['PBDs']:
+ pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
if pbd_rec['host'] == host:
- return sr
+ return sr_ref
return None
@@ -804,11 +873,11 @@ def remap_vbd_dev(dev):
return remapped_dev
-def with_vdi_attached_here(session, vdi, read_only, f):
+def with_vdi_attached_here(session, vdi_ref, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
vbd_rec['VM'] = this_vm_ref
- vbd_rec['VDI'] = vdi
+ vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = 'autodetect'
vbd_rec['bootable'] = False
vbd_rec['mode'] = read_only and 'RO' or 'RW'
@@ -819,28 +888,28 @@ def with_vdi_attached_here(session, vdi, read_only, f):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
- vbd = session.get_xenapi().VBD.create(vbd_rec)
- LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
+ LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref)
+ vbd_ref = session.get_xenapi().VBD.create(vbd_rec)
+ LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref)
try:
- LOG.debug(_('Plugging VBD %s ... '), vbd)
- session.get_xenapi().VBD.plug(vbd)
- LOG.debug(_('Plugging VBD %s done.'), vbd)
- orig_dev = session.get_xenapi().VBD.get_device(vbd)
- LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals())
+ LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
+ session.get_xenapi().VBD.plug(vbd_ref)
+ LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
+ orig_dev = session.get_xenapi().VBD.get_device(vbd_ref)
+ LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals())
dev = remap_vbd_dev(orig_dev)
if dev != orig_dev:
- LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
+ LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s') % locals())
return f(dev)
finally:
- LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
- vbd_unplug_with_retry(session, vbd)
- ignore_failure(session.get_xenapi().VBD.destroy, vbd)
- LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
+ LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
+ vbd_unplug_with_retry(session, vbd_ref)
+ ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref)
+ LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
-def vbd_unplug_with_retry(session, vbd):
+def vbd_unplug_with_retry(session, vbd_ref):
"""Call VBD.unplug on the given VBD, with a retry if we get
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
@@ -848,7 +917,7 @@ def vbd_unplug_with_retry(session, vbd):
# FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
- session.get_xenapi().VBD.unplug(vbd)
+ session.get_xenapi().VBD.unplug(vbd_ref)
LOG.debug(_('VBD.unplug successful first time.'))
return
except VMHelper.XenAPI.Failure, e:
@@ -917,14 +986,13 @@ def _write_partition(virtual_size, dev):
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dest)s...') % locals())
- def execute(cmd, process_input=None, check_exit_code=True):
- return utils.execute(cmd=cmd,
- process_input=process_input,
- check_exit_code=check_exit_code)
+ def execute(*cmd, **kwargs):
+ return utils.execute(*cmd, **kwargs)
- execute('parted --script %s mklabel msdos' % dest)
- execute('parted --script %s mkpart primary %ds %ds' %
- (dest, primary_first, primary_last))
+ execute('parted', '--script', dest, 'mklabel', 'msdos')
+ execute('parted', '--script', dest, 'mkpart', 'primary',
+ '%ds' % primary_first,
+ '%ds' % primary_last)
LOG.debug(_('Writing partition table %s done.'), dest)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 562ecd4d5..488a61e8e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -19,6 +19,7 @@
Management class for VM-related functions (spawn, reboot, etc).
"""
+import base64
import json
import M2Crypto
import os
@@ -55,12 +56,12 @@ class VMOps(object):
def list_instances(self):
"""List VM instances"""
- vms = []
- for vm in self._session.get_xenapi().VM.get_all():
- rec = self._session.get_xenapi().VM.get_record(vm)
- if not rec["is_a_template"] and not rec["is_control_domain"]:
- vms.append(rec["name_label"])
- return vms
+ vm_refs = []
+ for vm_ref in self._session.get_xenapi().VM.get_all():
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
+ vm_refs.append(vm_rec["name_label"])
+ return vm_refs
def _start(self, instance, vm_ref=None):
"""Power on a VM instance"""
@@ -87,8 +88,8 @@ class VMOps(object):
def _spawn_with_disk(self, instance, vdi_uuid):
"""Create VM instance"""
instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is not None:
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is not None:
raise exception.Duplicate(_('Attempted to create'
' non-unique name %s') % instance_name)
@@ -104,31 +105,26 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- kernel = ramdisk = pv_kernel = None
-
# Are we building from a pre-existing disk?
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
disk_image_type = VMHelper.determine_disk_image_type(instance)
- if disk_image_type == ImageType.DISK_RAW:
- # Have a look at the VDI and see if it has a PV kernel
- pv_kernel = VMHelper.lookup_image(self._session, instance.id,
- vdi_ref)
- elif disk_image_type == ImageType.DISK_VHD:
- # TODO(sirp): Assuming PV for now; this will need to be
- # configurable as Windows will use HVM.
- pv_kernel = True
+ kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+ ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
- vm_ref = VMHelper.create_vm(self._session,
- instance, kernel, ramdisk, pv_kernel)
+ use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
+ vdi_ref, disk_image_type, instance.os_type)
+ vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
+ use_pv_kernel)
+
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=0, bootable=True)
@@ -141,19 +137,20 @@ class VMOps(object):
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
- def _inject_onset_files():
- onset_files = instance.onset_files
- if onset_files:
+ def _inject_files():
+ injected_files = instance.injected_files
+ if injected_files:
# Check if this is a JSON-encoded string and convert if needed.
- if isinstance(onset_files, basestring):
+ if isinstance(injected_files, basestring):
try:
- onset_files = json.loads(onset_files)
+ injected_files = json.loads(injected_files)
except ValueError:
- LOG.exception(_("Invalid value for onset_files: '%s'")
- % onset_files)
- onset_files = []
+ LOG.exception(
+ _("Invalid value for injected_files: '%s'")
+ % injected_files)
+ injected_files = []
# Inject any files, if specified
- for path, contents in instance.onset_files:
+ for path, contents in instance.injected_files:
LOG.debug(_("Injecting file path: '%s'") % path)
self.inject_file(instance, path, contents)
# NOTE(armando): Do we really need to do this in virt?
@@ -169,7 +166,7 @@ class VMOps(object):
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
- _inject_onset_files()
+ _inject_files()
return True
except Exception, exc:
LOG.warn(exc)
@@ -266,7 +263,7 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
# call plugin to ship snapshot off to glance
VMHelper.upload_image(
- self._session, instance.id, template_vdi_uuids, image_id)
+ self._session, instance, template_vdi_uuids, image_id)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,
@@ -371,8 +368,8 @@ class VMOps(object):
def reboot(self, instance):
"""Reboot VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
@@ -413,17 +410,16 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def inject_file(self, instance, b64_path, b64_contents):
+ def inject_file(self, instance, path, contents):
"""Write a file to the VM instance. The path to which it is to be
- written and the contents of the file need to be supplied; both should
+ written and the contents of the file need to be supplied; both will
be base64-encoded to prevent errors with non-ASCII characters being
transmitted. If the agent does not support file injection, or the user
has disabled it, a NotImplementedError will be raised.
"""
- # Files/paths *should* be base64-encoded at this point, but
- # double-check to make sure.
- b64_path = utils.ensure_b64_encoding(b64_path)
- b64_contents = utils.ensure_b64_encoding(b64_contents)
+ # Files/paths must be base64-encoded for transmission to agent
+ b64_path = base64.b64encode(path)
+ b64_contents = base64.b64encode(contents)
# Need to uniquely identify this request.
transaction_id = str(uuid.uuid4())
@@ -439,7 +435,7 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm, hard=True):
+ def _shutdown(self, instance, vm_ref, hard=True):
"""Shutdown an instance"""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
@@ -453,31 +449,33 @@ class VMOps(object):
try:
task = None
if hard:
- task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
+ task = self._session.call_xenapi("Async.VM.hard_shutdown",
+ vm_ref)
else:
- task = self._session.call_xenapi('Async.VM.clean_shutdown', vm)
+ task = self._session.call_xenapi("Async.VM.clean_shutdown",
+ vm_ref)
self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- def _destroy_vdis(self, instance, vm):
- """Destroys all VDIs associated with a VM """
+ def _destroy_vdis(self, instance, vm_ref):
+ """Destroys all VDIs associated with a VM"""
instance_id = instance.id
LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
% locals())
- vdis = VMHelper.lookup_vm_vdis(self._session, vm)
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)
- if not vdis:
+ if not vdi_refs:
return
- for vdi in vdis:
+ for vdi_ref in vdi_refs:
try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- def _destroy_kernel_ramdisk(self, instance, vm):
+ def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""
Three situations can occur:
@@ -504,8 +502,8 @@ class VMOps(object):
"both" % locals()))
# 3. We have both kernel and ramdisk
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
+ vm_ref)
LOG.debug(_("Removing kernel/ramdisk files"))
@@ -516,11 +514,11 @@ class VMOps(object):
LOG.debug(_("kernel/ramdisk files removed"))
- def _destroy_vm(self, instance, vm):
- """Destroys a VM record """
+ def _destroy_vm(self, instance, vm_ref):
+ """Destroys a VM record"""
instance_id = instance.id
try:
- task = self._session.call_xenapi('Async.VM.destroy', vm)
+ task = self._session.call_xenapi('Async.VM.destroy', vm_ref)
self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -536,10 +534,10 @@ class VMOps(object):
"""
instance_id = instance.id
LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm_ref, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True,
+ def _destroy(self, instance, vm_ref, shutdown=True,
destroy_kernel_ramdisk=True):
"""
Destroys VM instance by performing:
@@ -549,17 +547,17 @@ class VMOps(object):
3. Destroying kernel and ramdisk files (if necessary)
4. Destroying that actual VM record
"""
- if vm is None:
+ if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
- self._shutdown(instance, vm)
+ self._shutdown(instance, vm_ref)
- self._destroy_vdis(instance, vm)
+ self._destroy_vdis(instance, vm_ref)
if destroy_kernel_ramdisk:
- self._destroy_kernel_ramdisk(instance, vm)
- self._destroy_vm(instance, vm)
+ self._destroy_kernel_ramdisk(instance, vm_ref)
+ self._destroy_vm(instance, vm_ref)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
@@ -571,26 +569,27 @@ class VMOps(object):
def pause(self, instance, callback):
"""Pause VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.pause', vm)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.pause', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def unpause(self, instance, callback):
"""Unpause VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.unpause', vm)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.unpause', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def suspend(self, instance, callback):
"""suspend the specified instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.suspend', vm)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.suspend', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def resume(self, instance, callback):
"""resume the specified instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.resume', vm_ref, False,
+ True)
self._wait_with_callback(instance.id, task, callback)
def rescue(self, instance, callback):
@@ -600,29 +599,26 @@ class VMOps(object):
- spawn a rescue VM (the vm name-label will be instance-N-rescue)
"""
- rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
- if rescue_vm:
+ rescue_vm_ref = VMHelper.lookup(self._session,
+ instance.name + "-rescue")
+ if rescue_vm_ref:
raise RuntimeError(_(
"Instance is already in Rescue Mode: %s" % instance.name))
- vm = self._get_vm_opaque_ref(instance)
- self._shutdown(instance, vm)
- self._acquire_bootlock(vm)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm_ref)
+ self._acquire_bootlock(vm_ref)
instance._rescue = True
self.spawn(instance)
- rescue_vm = self._get_vm_opaque_ref(instance)
+ rescue_vm_ref = self._get_vm_opaque_ref(instance)
- vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
- vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
- vbd_ref = VMHelper.create_vbd(
- self._session,
- rescue_vm,
- vdi_ref,
- 1,
- False)
+ vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
+ rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
+ vdi_ref, 1, False)
- self._session.call_xenapi("Async.VBD.plug", vbd_ref)
+ self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
def unrescue(self, instance, callback):
"""Unrescue the specified instance
@@ -631,51 +627,53 @@ class VMOps(object):
- release the bootlock to allow the instance VM to start
"""
- rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+ rescue_vm_ref = VMHelper.lookup(self._session,
+ instance.name + "-rescue")
- if not rescue_vm:
+ if not rescue_vm_ref:
raise exception.NotFound(_(
"Instance is not in Rescue Mode: %s" % instance.name))
- original_vm = self._get_vm_opaque_ref(instance)
- vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
+ original_vm_ref = self._get_vm_opaque_ref(instance)
+ vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
instance._rescue = False
- for vbd_ref in vbds:
- vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
- if vbd["userdevice"] == "1":
+ for vbd_ref in vbd_refs:
+ _vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if _vbd_ref["userdevice"] == "1":
VMHelper.unplug_vbd(self._session, vbd_ref)
VMHelper.destroy_vbd(self._session, vbd_ref)
- task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
+ task1 = self._session.call_xenapi("Async.VM.hard_shutdown",
+ rescue_vm_ref)
self._session.wait_for_task(task1, instance.id)
- vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
- for vdi in vdis:
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
+ for vdi_ref in vdi_refs:
try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure:
continue
- task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
+ task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref)
self._session.wait_for_task(task2, instance.id)
- self._release_bootlock(original_vm)
- self._start(instance, original_vm)
+ self._release_bootlock(original_vm_ref)
+ self._start(instance, original_vm_ref)
def get_info(self, instance):
"""Return data about VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- rec = self._session.get_xenapi().VM.get_record(vm)
- return VMHelper.compile_info(rec)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ return VMHelper.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
- vm = self._get_vm_opaque_ref(instance)
- rec = self._session.get_xenapi().VM.get_record(vm)
- return VMHelper.compile_diagnostics(self._session, rec)
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ return VMHelper.compile_diagnostics(self._session, vm_rec)
def get_console_output(self, instance):
"""Return snapshot of console"""
@@ -698,9 +696,9 @@ class VMOps(object):
# at this stage even though they aren't implemented because these will
# be needed for multi-nic and there was no sense writing it for single
# network/single IP and then having to turn around and re-write it
- vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
+ vm_ref = self._get_vm_opaque_ref(instance.id)
logging.debug(_("injecting network info to xenstore for vm: |%s|"),
- vm_opaque_ref)
+ vm_ref)
admin_context = context.get_admin_context()
IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
networks = db.network_get_all_by_instance(admin_context,
@@ -731,11 +729,10 @@ class VMOps(object):
'ips': [ip_dict(ip) for ip in network_IPs],
'ip6s': [ip6_dict(ip) for ip in network_IPs]}
- self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
+ self.write_to_param_xenstore(vm_ref, {location: mapping})
try:
- self.write_to_xenstore(vm_opaque_ref, location,
- mapping['location'])
+ self.write_to_xenstore(vm_ref, location, mapping['location'])
except KeyError:
# catch KeyError for domid if instance isn't running
pass
@@ -747,8 +744,8 @@ class VMOps(object):
Creates vifs for an instance
"""
- vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
- logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref)
+ vm_ref = self._get_vm_opaque_ref(instance.id)
+ logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)
if networks is None:
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
@@ -768,12 +765,8 @@ class VMOps(object):
except AttributeError:
device = "0"
- VMHelper.create_vif(
- self._session,
- vm_opaque_ref,
- network_ref,
- instance.mac_address,
- device)
+ VMHelper.create_vif(self._session, vm_ref, network_ref,
+ instance.mac_address, device)
def reset_network(self, instance):
"""
@@ -837,9 +830,9 @@ class VMOps(object):
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
instance_id = vm.id
- vm = self._get_vm_opaque_ref(vm)
- rec = self._session.get_xenapi().VM.get_record(vm)
- args = {'dom_id': rec['domid'], 'path': path}
+ vm_ref = self._get_vm_opaque_ref(vm)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ args = {'dom_id': vm_rec['domid'], 'path': path}
args.update(addl_args)
try:
task = self._session.async_call_plugin(plugin, method, args)
@@ -919,9 +912,9 @@ class VMOps(object):
value for 'keys' is passed, the returned dict is filtered to only
return the values for those keys.
"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
data = self._session.call_xenapi_request('VM.get_xenstore_data',
- (vm, ))
+ (vm_ref, ))
ret = {}
if keys is None:
keys = data.keys()
@@ -939,11 +932,11 @@ class VMOps(object):
"""Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
self.remove_from_param_xenstore(instance_or_vm, key)
jsonval = json.dumps(val)
self._session.call_xenapi_request('VM.add_to_xenstore_data',
- (vm, key, jsonval))
+ (vm_ref, key, jsonval))
def write_to_param_xenstore(self, instance_or_vm, mapping):
"""Takes a dict and writes each key/value pair to the xenstore
@@ -958,14 +951,14 @@ class VMOps(object):
them from the xenstore parameter record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
if isinstance(key_or_keys, basestring):
keys = [key_or_keys]
else:
keys = key_or_keys
for key in keys:
self._session.call_xenapi_request('VM.remove_from_xenstore_data',
- (vm, key))
+ (vm_ref, key))
def clear_param_xenstore(self, instance_or_vm):
"""Removes all data from the xenstore parameter record for this VM."""
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index d5ebd29d5..72284ac02 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -117,16 +117,16 @@ class VolumeHelper(HelperBase):
def introduce_vdi(cls, session, sr_ref):
"""Introduce VDI in the host"""
try:
- vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
+ vdi_refs = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
- vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_refs[0])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
- ' of VDI %s on') % vdis[0])
+ ' of VDI %s on') % vdi_refs[0])
else:
try:
return session.get_xenapi().VDI.introduce(
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index b63a5f8c3..da42a83b6 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -49,6 +49,12 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
+
+**Variable Naming Scheme**
+
+- suffix "_ref" for opaque references
+- suffix "_uuid" for UUIDs
+- suffix "_rec" for record objects
"""
import sys
@@ -263,6 +269,27 @@ class XenAPIConnection(object):
'username': FLAGS.xenapi_connection_username,
'password': FLAGS.xenapi_connection_password}
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+ def compare_cpu(self, xml):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only libvirt."""
+ return
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only by libvirt."""
+ return
+
+ def unfilter_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""