From d3de6cd1b2997e495a000b998b321346e2a75306 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Fri, 8 Apr 2011 14:46:26 -0700 Subject: Fixes euca-attach-volume for iscsi using Xenserver Minor changes required to xenapi functions to get correct format for volume-id, iscsi-host, etc. --- nova/virt/xenapi/volume_utils.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 72284ac02..27964cac0 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -209,9 +209,9 @@ def _get_volume_id(path_or_id): # see compute/manager->setup_compute_volume volume_id = path_or_id[path_or_id.find('/vol-') + 1:] if volume_id == path_or_id: - volume_id = path_or_id[path_or_id.find('-vol-') + 1:] - volume_id = volume_id.replace('--', '-') - return volume_id + volume_id = path_or_id[path_or_id.find('-volume--') + 1:] + volume_id = volume_id.replace('volume--', '') + return int(volume_id) def _get_target_host(iscsi_string): @@ -244,25 +244,21 @@ def _get_target(volume_id): Gets iscsi name and portal from volume name and host. For this method to work the following are needed: 1) volume_ref['host'] must resolve to something rather than loopback - 2) ietd must bind only to the address as resolved above - If any of the two conditions are not met, fall back on Flags. """ - volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), + volume_ref = db.volume_get(context.get_admin_context(), volume_id) result = (None, None) try: - (r, _e) = utils.execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % - volume_ref['host']) + (r, _e) = utils.execute('sudo', 'iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: - targets = r.splitlines() - if len(_e) == 0 and len(targets) == 1: - for target in targets: - if volume_id in target: + volume_name = "volume-%08x" % volume_id + for target in r.splitlines(): + if FLAGS.iscsi_ip_prefix in target and volume_name in target: (location, _sep, iscsi_name) = target.partition(" ") break - iscsi_portal = location.split(",")[0] - result = (iscsi_name, iscsi_portal) + iscsi_portal = location.split(",")[0] + result = (iscsi_name, iscsi_portal) return result -- cgit From 12ec5f5c0d6a88779780b15b6ef38a016d6aae4a Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 12 Apr 2011 08:04:55 -0700 Subject: Add new flag 'max_kernel_ramdisk_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest --- nova/virt/xenapi/vm_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d2045a557..dd1fd9383 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -51,6 +51,8 @@ FLAGS = flags.FLAGS flags.DEFINE_string('default_os_type', 'linux', 'Default OS type') flags.DEFINE_integer('block_device_creation_timeout', 10, 'time to wait for a block device to be created') +flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024, + 'maximum size in bytes of kernel or ramdisk images') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -448,6 +450,12 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES + elif image_type == ImageType.KERNEL_RAMDISK and \ + vdi_size > FLAGS.max_kernel_ramdisk_size: + max_size = FLAGS.max_kernel_ramdisk_size + raise exception.Error( + _("Kernel/Ramdisk image is too large, %(vdi_size)d bytes " + "(max %(max_size)d bytes)") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) -- cgit From bc953f37560b7353b9b8c86e8d0bdaa5672d3acd Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Tue, 12 Apr 2011 15:20:30 -0700 Subject: Minor fixes --- nova/virt/xenapi/volume_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 27964cac0..819c48be5 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -250,7 +250,7 @@ def _get_target(volume_id): result = (None, None) try: (r, _e) = utils.execute('sudo', 'iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', volume_ref['host']) + '-t', 'sendtargets', '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: -- cgit From c04b0caca4a725be390271be30bf8a034aa5ca9d Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 13 Apr 2011 10:10:40 -0700 Subject: Minor formatting cleanup --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index dd1fd9383..32a617ef4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -454,8 +454,8 @@ class VMHelper(HelperBase): vdi_size > FLAGS.max_kernel_ramdisk_size: max_size = FLAGS.max_kernel_ramdisk_size raise exception.Error( - _("Kernel/Ramdisk image is too large, %(vdi_size)d bytes " - "(max %(max_size)d bytes)") % locals()) + _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " + "max %(max_size)d bytes") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) -- cgit From 4eac8d2c4252eb866e99ef260c0c5d7df1d927d2 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 22 Apr 2011 12:47:09 -0400 Subject: Created new libvirt directory, moved libvirt_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities. --- nova/virt/connection.py | 2 +- nova/virt/cpuinfo.xml.template | 9 - nova/virt/libvirt.xml.template | 122 -- nova/virt/libvirt/__init__.py | 0 nova/virt/libvirt/connection.py | 1527 ++++++++++++++++++++++ nova/virt/libvirt/cpuinfo.xml.template | 9 + nova/virt/libvirt/firewall.py | 630 ++++++++++ nova/virt/libvirt/libvirt.xml.template | 122 ++ nova/virt/libvirt/netutils.py | 95 ++ nova/virt/libvirt_conn.py | 2168 -------------------------------- 10 files changed, 2384 insertions(+), 2300 deletions(-) delete mode 100644 nova/virt/cpuinfo.xml.template delete mode 100644 nova/virt/libvirt.xml.template create mode 100644 nova/virt/libvirt/__init__.py create mode 100644 nova/virt/libvirt/connection.py create mode 100644 nova/virt/libvirt/cpuinfo.xml.template create mode 100644 nova/virt/libvirt/firewall.py create mode 100644 nova/virt/libvirt/libvirt.xml.template create mode 100644 nova/virt/libvirt/netutils.py delete mode 100644 nova/virt/libvirt_conn.py (limited to 'nova/virt') diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 99a8849f1..aeec17c98 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -27,9 +27,9 @@ from nova import utils from nova.virt import driver from nova.virt import fake from nova.virt import hyperv -from nova.virt import libvirt_conn from nova.virt import vmwareapi_conn from nova.virt import xenapi_conn +from nova.virt.libvirt import connection as libvirt_conn LOG = logging.getLogger("nova.virt.connection") diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template deleted file mode 100644 index 48842b29d..000000000 --- a/nova/virt/cpuinfo.xml.template +++ /dev/null @@ -1,9 +0,0 @@ - - $arch - $model - $vendor - -#for $var in $features - -#end for - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template deleted file mode 100644 index de2497a76..000000000 --- a/nova/virt/libvirt.xml.template +++ /dev/null @@ -1,122 +0,0 @@ - - ${name} - ${memory_kb} - -#if $type == 'lxc' - #set $disk_prefix = '' - #set $disk_bus = '' - exe - /sbin/init -#else if $type == 'uml' - #set $disk_prefix = 'ubd' - #set $disk_bus = 'uml' - uml - /usr/bin/linux - /dev/ubda -#else - #if $type == 'xen' - #set $disk_prefix = 'sd' - #set $disk_bus = 'scsi' - linux - /dev/xvda - #else - #set $disk_prefix = 'vd' - #set $disk_bus = 'virtio' - hvm - #end if - #if $getVar('rescue', False) - ${basepath}/kernel.rescue - ${basepath}/ramdisk.rescue - #else - #if $getVar('kernel', None) - ${kernel} - #if $type == 'xen' - ro - #else - root=/dev/vda console=ttyS0 - #end if - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - #else - - #end if - #end if -#end if - - - - - ${vcpus} - -#if $type == 'lxc' - - - - -#else - #if $getVar('rescue', False) - - - - - - - - - - - #else - - - - - - #if $getVar('local', False) - - - - - - #end if - #end if -#end if - -#for $nic in $nics - - - - - - - -#if $getVar('nic.extra_params', False) - ${nic.extra_params} -#end if -#if $getVar('nic.gateway_v6', False) - -#end if - - -#end for - - - - - - - - - - - - - - - - -#if $getVar('vncserver_host', False) - -#end if - - diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py new file mode 100644 index 000000000..972ac1bb9 --- /dev/null +++ b/nova/virt/libvirt/connection.py @@ -0,0 +1,1527 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to a hypervisor through libvirt. + +Supports KVM, LXC, QEMU, UML, and XEN. + +**Related Flags** + +:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen + (default: kvm). +:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). +:libvirt_xml_template: Libvirt XML Template. +:rescue_image_id: Rescue ami image (default: ami-rescue). +:rescue_kernel_id: Rescue aki image (default: aki-rescue). +:rescue_ramdisk_id: Rescue ari image (default: ari-rescue). +:injected_network_template: Template file for injected network +:allow_project_net_traffic: Whether to allow in project network traffic + +""" + +import multiprocessing +import os +import random +import shutil +import subprocess +import sys +import tempfile +import time +import uuid +from xml.dom import minidom +from xml.etree import ElementTree + +from eventlet import greenthread +from eventlet import tpool + +import IPy + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils +from nova import vnc +from nova.auth import manager +from nova.compute import instance_types +from nova.compute import power_state +from nova.virt import disk +from nova.virt import driver +from nova.virt import images +from nova.virt.libvirt import netutils + + +libvirt = None +libxml2 = None +Template = None + + +LOG = logging.getLogger('nova.virt.libvirt_conn') + + +FLAGS = flags.FLAGS +flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') +# TODO(vish): These flags should probably go into a shared location +flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') +flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') +flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('virt/libvirt/libvirt.xml.template'), + 'Libvirt XML Template') +flags.DEFINE_string('libvirt_type', + 'kvm', + 'Libvirt domain type (valid options are: ' + 'kvm, lxc, qemu, uml, xen)') +flags.DEFINE_string('libvirt_uri', + '', + 'Override the default libvirt URI (which is dependent' + ' on libvirt_type)') +flags.DEFINE_bool('allow_project_net_traffic', + True, + 'Whether to allow in project network traffic') +flags.DEFINE_bool('use_cow_images', + True, + 'Whether to use cow images') +flags.DEFINE_string('ajaxterm_portrange', + '10000-12000', + 'Range of ports that ajaxterm should randomly try to bind') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt.firewall.IptablesFirewallDriver', + 'Firewall driver (defaults to iptables)') +flags.DEFINE_string('cpuinfo_xml_template', + utils.abspath('virt/libvirt/cpuinfo.xml.template'), + 'CpuInfo XML Template (Used only live migration now)') +flags.DEFINE_string('live_migration_uri', + "qemu+tcp://%s/system", + 'Define protocol used by live_migration feature') +flags.DEFINE_string('live_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", + 'Define live migration behavior.') +flags.DEFINE_integer('live_migration_bandwidth', 0, + 'Define live migration behavior') +flags.DEFINE_string('qemu_img', 'qemu-img', + 'binary to use for qemu-img commands') +flags.DEFINE_bool('start_guests_on_host_boot', False, + 'Whether to restart guests when the host reboots') + + +def get_connection(read_only): + # These are loaded late so that there's no need to install these + # libraries when not using libvirt. + # Cheetah is separate because the unit tests want to load Cheetah, + # but not libvirt. + global libvirt + global libxml2 + if libvirt is None: + libvirt = __import__('libvirt') + if libxml2 is None: + libxml2 = __import__('libxml2') + _late_load_cheetah() + return LibvirtConnection(read_only) + + +def _late_load_cheetah(): + global Template + if Template is None: + t = __import__('Cheetah.Template', globals(), locals(), + ['Template'], -1) + Template = t.Template + + +class LibvirtConnection(driver.ComputeDriver): + + def __init__(self, read_only): + super(LibvirtConnection, self).__init__() + self.libvirt_uri = self.get_uri() + + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() + self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() + self._wrapped_conn = None + self.read_only = read_only + + fw_class = utils.import_class(FLAGS.firewall_driver) + self.firewall_driver = fw_class(get_connection=self._get_connection) + + def init_host(self, host): + # Adopt existing VM's running here + ctxt = context.get_admin_context() + for instance in db.instance_get_all_by_host(ctxt, host): + try: + LOG.debug(_('Checking state of %s'), instance['name']) + state = self.get_info(instance['name'])['state'] + except exception.NotFound: + state = power_state.SHUTOFF + + LOG.debug(_('Current state of %(name)s was %(state)s.'), + {'name': instance['name'], 'state': state}) + db.instance_set_state(ctxt, instance['id'], state) + + # NOTE(justinsb): We no longer delete SHUTOFF instances, + # the user may want to power them back on + + if state != power_state.RUNNING: + continue + self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance) + + def _get_connection(self): + if not self._wrapped_conn or not self._test_connection(): + LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) + self._wrapped_conn = self._connect(self.libvirt_uri, + self.read_only) + return self._wrapped_conn + _conn = property(_get_connection) + + def _test_connection(self): + try: + self._wrapped_conn.getInfo() + return True + except libvirt.libvirtError as e: + if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ + e.get_error_domain() == libvirt.VIR_FROM_REMOTE: + LOG.debug(_('Connection to libvirt broke')) + return False + raise + + def get_uri(self): + if FLAGS.libvirt_type == 'uml': + uri = FLAGS.libvirt_uri or 'uml:///system' + elif FLAGS.libvirt_type == 'xen': + uri = FLAGS.libvirt_uri or 'xen:///' + elif FLAGS.libvirt_type == 'lxc': + uri = FLAGS.libvirt_uri or 'lxc:///' + else: + uri = FLAGS.libvirt_uri or 'qemu:///system' + return uri + + def _connect(self, uri, read_only): + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + + if read_only: + return libvirt.openReadOnly(uri) + else: + return libvirt.openAuth(uri, auth, 0) + + def list_instances(self): + return [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + + def _map_to_instance_info(self, domain): + """Gets info from a virsh domain object into an InstanceInfo""" + + # domain.info() returns a list of: + # state: one of the state values (virDomainState) + # maxMemory: the maximum memory used by the domain + # memory: the current amount of memory used by the domain + # nbVirtCPU: the number of virtual CPU + # puTime: the time used by the domain in nanoseconds + + (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info() + name = domain.name() + + return driver.InstanceInfo(name, state) + + def list_instances_detail(self): + infos = [] + for domain_id in self._conn.listDomainsID(): + domain = self._conn.lookupByID(domain_id) + info = self._map_to_instance_info(domain) + infos.append(info) + return infos + + def destroy(self, instance, cleanup=True): + instance_name = instance['name'] + + try: + virt_dom = self._lookup_by_name(instance_name) + except exception.NotFound: + virt_dom = None + + # If the instance is already terminated, we're still happy + # Otherwise, destroy it + if virt_dom is not None: + try: + virt_dom.destroy() + except libvirt.libvirtError as e: + is_okay = False + errcode = e.get_error_code() + if errcode == libvirt.VIR_ERR_OPERATION_INVALID: + # If the instance if already shut off, we get this: + # Code=55 Error=Requested operation is not valid: + # domain is not running + (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() + if state == power_state.SHUTOFF: + is_okay = True + + if not is_okay: + LOG.warning(_("Error from libvirt during destroy of " + "%(instance_name)s. Code=%(errcode)s " + "Error=%(e)s") % + locals()) + raise + + try: + # NOTE(justinsb): We remove the domain definition. We probably + # would do better to keep it if cleanup=False (e.g. volumes?) + # (e.g. #2 - not losing machines on failure) + virt_dom.undefine() + except libvirt.libvirtError as e: + errcode = e.get_error_code() + LOG.warning(_("Error from libvirt during undefine of " + "%(instance_name)s. Code=%(errcode)s " + "Error=%(e)s") % + locals()) + raise + + def _wait_for_destroy(): + """Called at an interval until the VM is gone.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("Instance %s destroyed successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_destroy) + timer.start(interval=0.5, now=True) + + self.firewall_driver.unfilter_instance(instance) + + if cleanup: + self._cleanup(instance) + + return True + + def _cleanup(self, instance): + target = os.path.join(FLAGS.instances_path, instance['name']) + instance_name = instance['name'] + LOG.info(_('instance %(instance_name)s: deleting instance files' + ' %(target)s') % locals()) + if FLAGS.libvirt_type == 'lxc': + disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images) + if os.path.exists(target): + shutil.rmtree(target) + + @exception.wrap_exception + def attach_volume(self, instance_name, device_path, mountpoint): + virt_dom = self._lookup_by_name(instance_name) + mount_device = mountpoint.rpartition("/")[2] + if device_path.startswith('/dev/'): + xml = """ + + + + """ % (device_path, mount_device) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + xml = """ + + + + """ % (protocol, + name, + mount_device) + else: + raise exception.Invalid(_("Invalid device path %s") % device_path) + + virt_dom.attachDevice(xml) + + def _get_disk_xml(self, xml, device): + """Returns the xml for the disk mounted at device""" + try: + doc = libxml2.parseDoc(xml) + except: + return None + ctx = doc.xpathNewContext() + try: + ret = ctx.xpathEval('/domain/devices/disk') + for node in ret: + for child in node.children: + if child.name == 'target': + if child.prop('dev') == device: + return str(node) + finally: + if ctx is not None: + ctx.xpathFreeContext() + if doc is not None: + doc.freeDoc() + + @exception.wrap_exception + def detach_volume(self, instance_name, mountpoint): + virt_dom = self._lookup_by_name(instance_name) + mount_device = mountpoint.rpartition("/")[2] + xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) + if not xml: + raise exception.NotFound(_("No disk at %s") % mount_device) + virt_dom.detachDevice(xml) + + @exception.wrap_exception + def snapshot(self, instance, image_id): + """Create snapshot from a running VM instance. + + This command only works with qemu 0.14+, the qemu_img flag is + provided so that a locally compiled binary of qemu-img can be used + to support this command. + + """ + image_service = utils.import_object(FLAGS.image_service) + virt_dom = self._lookup_by_name(instance['name']) + elevated = context.get_admin_context() + + base = image_service.show(elevated, instance['image_id']) + + metadata = {'disk_format': base['disk_format'], + 'container_format': base['container_format'], + 'is_public': False, + 'name': '%s.%s' % (base['name'], image_id), + 'properties': {'architecture': base['architecture'], + 'kernel_id': instance['kernel_id'], + 'image_location': 'snapshot', + 'image_state': 'available', + 'owner_id': instance['project_id'], + 'ramdisk_id': instance['ramdisk_id'], + } + } + + # Make the snapshot + snapshot_name = uuid.uuid4().hex + snapshot_xml = """ + + %s + + """ % snapshot_name + snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0) + + # Find the disk + xml_desc = virt_dom.XMLDesc(0) + domain = ElementTree.fromstring(xml_desc) + source = domain.find('devices/disk/source') + disk_path = source.get('file') + + # Export the snapshot to a raw image + temp_dir = tempfile.mkdtemp() + out_path = os.path.join(temp_dir, snapshot_name) + qemu_img_cmd = (FLAGS.qemu_img, + 'convert', + '-f', + 'qcow2', + '-O', + 'raw', + '-s', + snapshot_name, + disk_path, + out_path) + utils.execute(*qemu_img_cmd) + + # Upload that image to the image service + with open(out_path) as image_file: + image_service.update(elevated, + image_id, + metadata, + image_file) + + # Clean up + shutil.rmtree(temp_dir) + + @exception.wrap_exception + def reboot(self, instance): + """Reboot a virtual machine, given an instance reference. + + This method actually destroys and re-creates the domain to ensure the + reboot happens, as the guest OS cannot ignore this action. + + """ + self.destroy(instance, False) + xml = self.to_xml(instance) + self.firewall_driver.setup_basic_filtering(instance) + self.firewall_driver.prepare_instance_filter(instance) + self._create_new_domain(xml) + self.firewall_driver.apply_instance_filter(instance) + + def _wait_for_reboot(): + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rebooted successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_reboot) + return timer.start(interval=0.5, now=True) + + @exception.wrap_exception + def pause(self, instance, callback): + raise exception.ApiError("pause not supported for libvirt.") + + @exception.wrap_exception + def unpause(self, instance, callback): + raise exception.ApiError("unpause not supported for libvirt.") + + @exception.wrap_exception + def suspend(self, instance, callback): + raise exception.ApiError("suspend not supported for libvirt") + + @exception.wrap_exception + def resume(self, instance, callback): + raise exception.ApiError("resume not supported for libvirt") + + @exception.wrap_exception + def rescue(self, instance): + """Loads a VM using rescue images. + + A rescue is normally performed when something goes wrong with the + primary images and data needs to be corrected/recovered. Rescuing + should not edit or over-ride the original image, only allow for + data recovery. + + """ + self.destroy(instance, False) + + xml = self.to_xml(instance, rescue=True) + rescue_images = {'image_id': FLAGS.rescue_image_id, + 'kernel_id': FLAGS.rescue_kernel_id, + 'ramdisk_id': FLAGS.rescue_ramdisk_id} + self._create_image(instance, xml, '.rescue', rescue_images) + self._create_new_domain(xml) + + def _wait_for_rescue(): + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rescued successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_rescue) + return timer.start(interval=0.5, now=True) + + @exception.wrap_exception + def unrescue(self, instance): + """Reboot the VM which is being rescued back into primary images. + + Because reboot destroys and re-creates instances, unresue should + simply call reboot. + + """ + self.reboot(instance) + + @exception.wrap_exception + def poll_rescued_instances(self, timeout): + pass + + # NOTE(ilyaalekseyev): Implementation like in multinics + # for xenapi(tr3buchet) + @exception.wrap_exception + def spawn(self, instance, network_info=None): + xml = self.to_xml(instance, False, network_info) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) + self._create_image(instance, xml, network_info) + domain = self._create_new_domain(xml) + LOG.debug(_("instance %s: is running"), instance['name']) + self.firewall_driver.apply_instance_filter(instance) + + if FLAGS.start_guests_on_host_boot: + LOG.debug(_("instance %s: setting autostart ON") % + instance['name']) + domain.setAutostart(1) + + def _wait_for_boot(): + """Called at an interval until the VM is running.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s spawned successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_boot) + return timer.start(interval=0.5, now=True) + + def _flush_xen_console(self, virsh_output): + LOG.info(_('virsh said: %r'), virsh_output) + virsh_output = virsh_output[0].strip() + + if virsh_output.startswith('/dev/'): + LOG.info(_("cool, it's a device")) + out, err = utils.execute('sudo', 'dd', + "if=%s" % virsh_output, + 'iflag=nonblock', + check_exit_code=False) + return out + else: + return '' + + def _append_to_file(self, data, fpath): + LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals()) + fp = open(fpath, 'a+') + fp.write(data) + return fpath + + def _dump_file(self, fpath): + fp = open(fpath, 'r+') + contents = fp.read() + LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) + return contents + + @exception.wrap_exception + def get_console_output(self, instance): + console_log = os.path.join(FLAGS.instances_path, instance['name'], + 'console.log') + + utils.execute('sudo', 'chown', os.getuid(), console_log) + + if FLAGS.libvirt_type == 'xen': + # Xen is special + virsh_output = utils.execute('virsh', 'ttyconsole', + instance['name']) + data = self._flush_xen_console(virsh_output) + fpath = self._append_to_file(data, console_log) + elif FLAGS.libvirt_type == 'lxc': + # LXC is also special + LOG.info(_("Unable to read LXC console")) + else: + fpath = console_log + + return self._dump_file(fpath) + + @exception.wrap_exception + def get_ajax_console(self, instance): + def get_open_port(): + start_port, end_port = FLAGS.ajaxterm_portrange.split("-") + for i in xrange(0, 100): # don't loop forever + port = random.randint(int(start_port), int(end_port)) + # netcat will exit with 0 only if the port is in use, + # so a nonzero return value implies it is unused + cmd = 'netcat', '0.0.0.0', port, '-w', '1' + try: + stdout, stderr = utils.execute(*cmd, process_input='') + except exception.ProcessExecutionError: + return port + raise Exception(_('Unable to find an open port')) + + def get_pty_for_instance(instance_name): + virt_dom = self._lookup_by_name(instance_name) + xml = virt_dom.XMLDesc(0) + dom = minidom.parseString(xml) + + for serial in dom.getElementsByTagName('serial'): + if serial.getAttribute('type') == 'pty': + source = serial.getElementsByTagName('source')[0] + return source.getAttribute('path') + + port = get_open_port() + token = str(uuid.uuid4()) + host = instance['host'] + + ajaxterm_cmd = 'sudo socat - %s' \ + % get_pty_for_instance(instance['name']) + + cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ + % (utils.novadir(), ajaxterm_cmd, token, port) + + subprocess.Popen(cmd, shell=True) + return {'token': token, 'host': host, 'port': port} + + @exception.wrap_exception + def get_vnc_console(self, instance): + def get_vnc_port_for_instance(instance_name): + virt_dom = self._lookup_by_name(instance_name) + xml = virt_dom.XMLDesc(0) + # TODO: use etree instead of minidom + dom = minidom.parseString(xml) + + for graphic in dom.getElementsByTagName('graphics'): + if graphic.getAttribute('type') == 'vnc': + return graphic.getAttribute('port') + + port = get_vnc_port_for_instance(instance['name']) + token = str(uuid.uuid4()) + host = instance['host'] + + return {'token': token, 'host': host, 'port': port} + + @staticmethod + def _cache_image(fn, target, fname, cow=False, *args, **kwargs): + """Wrapper for a method that creates an image that caches the image. + + This wrapper will save the image into a common store and create a + copy for use by the hypervisor. + + The underlying method should specify a kwarg of target representing + where the image will be saved. + + fname is used as the filename of the base image. The filename needs + to be unique to a given image. + + If cow is True, it will make a CoW image instead of a copy. + """ + if not os.path.exists(target): + base_dir = os.path.join(FLAGS.instances_path, '_base') + if not os.path.exists(base_dir): + os.mkdir(base_dir) + base = os.path.join(base_dir, fname) + + @utils.synchronized(fname) + def call_if_not_exists(base, fn, *args, **kwargs): + if not os.path.exists(base): + fn(target=base, *args, **kwargs) + + call_if_not_exists(base, fn, *args, **kwargs) + + if cow: + utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', + 'cluster_size=2M,backing_file=%s' % base, + target) + else: + utils.execute('cp', base, target) + + def _fetch_image(self, target, image_id, user, project, size=None): + """Grab image and optionally attempt to resize it""" + images.fetch(image_id, target, user, project) + if size: + disk.extend(target, size) + + def _create_local(self, target, local_gb): + """Create a blank image of specified size""" + utils.execute('truncate', target, '-s', "%dG" % local_gb) + # TODO(vish): should we format disk by default? + + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, + network_info=None): + if not network_info: + network_info = netutils.get_network_info(inst) + + if not suffix: + suffix = '' + + # syntactic nicety + def basepath(fname='', suffix=suffix): + return os.path.join(FLAGS.instances_path, + inst['name'], + fname + suffix) + + # ensure directories exist and are writable + utils.execute('mkdir', '-p', basepath(suffix='')) + + LOG.info(_('instance %s: Creating image'), inst['name']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + + if FLAGS.libvirt_type == 'lxc': + container_dir = '%s/rootfs' % basepath(suffix='') + utils.execute('mkdir', '-p', container_dir) + + # NOTE(vish): No need add the suffix to console.log + os.close(os.open(basepath('console.log', ''), + os.O_CREAT | os.O_WRONLY, 0660)) + + user = manager.AuthManager().get_user(inst['user_id']) + project = manager.AuthManager().get_project(inst['project_id']) + + if not disk_images: + disk_images = {'image_id': inst['image_id'], + 'kernel_id': inst['kernel_id'], + 'ramdisk_id': inst['ramdisk_id']} + + if disk_images['kernel_id']: + fname = '%08x' % int(disk_images['kernel_id']) + self._cache_image(fn=self._fetch_image, + target=basepath('kernel'), + fname=fname, + image_id=disk_images['kernel_id'], + user=user, + project=project) + if disk_images['ramdisk_id']: + fname = '%08x' % int(disk_images['ramdisk_id']) + self._cache_image(fn=self._fetch_image, + target=basepath('ramdisk'), + fname=fname, + image_id=disk_images['ramdisk_id'], + user=user, + project=project) + + root_fname = '%08x' % int(disk_images['image_id']) + size = FLAGS.minimum_root_size + + inst_type_id = inst['instance_type_id'] + inst_type = instance_types.get_instance_type(inst_type_id) + if inst_type['name'] == 'm1.tiny' or suffix == '.rescue': + size = None + root_fname += "_sm" + + self._cache_image(fn=self._fetch_image, + target=basepath('disk'), + fname=root_fname, + cow=FLAGS.use_cow_images, + image_id=disk_images['image_id'], + user=user, + project=project, + size=size) + + if inst_type['local_gb']: + self._cache_image(fn=self._create_local, + target=basepath('disk.local'), + fname="local_%s" % inst_type['local_gb'], + cow=FLAGS.use_cow_images, + local_gb=inst_type['local_gb']) + + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first + # partition + target_partition = None + if not inst['kernel_id']: + target_partition = "1" + + if FLAGS.libvirt_type == 'lxc': + target_partition = None + + if inst['key_data']: + key = str(inst['key_data']) + else: + key = None + net = None + + nets = [] + ifc_template = open(FLAGS.injected_network_template).read() + ifc_num = -1 + have_injected_networks = False + admin_context = context.get_admin_context() + for (network_ref, mapping) in network_info: + ifc_num += 1 + + if not network_ref['injected']: + continue + + have_injected_networks = True + address = mapping['ips'][0]['ip'] + address_v6 = None + if FLAGS.use_ipv6: + address_v6 = mapping['ip6s'][0]['ip'] + net_info = {'name': 'eth%d' % ifc_num, + 'address': address, + 'netmask': network_ref['netmask'], + 'gateway': network_ref['gateway'], + 'broadcast': network_ref['broadcast'], + 'dns': network_ref['dns'], + 'address_v6': address_v6, + 'gateway_v6': network_ref['gateway_v6'], + 'netmask_v6': network_ref['netmask_v6']} + nets.append(net_info) + + if have_injected_networks: + net = str(Template(ifc_template, + searchList=[{'interfaces': nets, + 'use_ipv6': FLAGS.use_ipv6}])) + + if key or net: + inst_name = inst['name'] + img_id = inst.image_id + if key: + LOG.info(_('instance %(inst_name)s: injecting key into' + ' image %(img_id)s') % locals()) + if net: + LOG.info(_('instance %(inst_name)s: injecting net into' + ' image %(img_id)s') % locals()) + try: + disk.inject_data(basepath('disk'), key, net, + partition=target_partition, + nbd=FLAGS.use_cow_images) + + if FLAGS.libvirt_type == 'lxc': + disk.setup_container(basepath('disk'), + container_dir=container_dir, + nbd=FLAGS.use_cow_images) + except Exception as e: + # This could be a windows image, or a vmdk format disk + LOG.warn(_('instance %(inst_name)s: ignoring error injecting' + ' data into image %(img_id)s (%(e)s)') % locals()) + + if FLAGS.libvirt_type == 'uml': + utils.execute('sudo', 'chown', 'root', basepath('disk')) + + def _get_nic_for_xml(self, network, mapping): + # Assume that the gateway also acts as the dhcp server. + dhcp_server = network['gateway'] + gateway_v6 = network['gateway_v6'] + mac_id = mapping['mac'].replace(':', '') + + if FLAGS.allow_project_net_traffic: + if FLAGS.use_ipv6: + net, mask = netutils.get_net_and_mask(network['cidr']) + net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( + network['cidr_v6']) + extra_params = ("\n" + "\n" + "\n" + "\n") % \ + (net, mask, net_v6, prefixlen_v6) + else: + net, mask = netutils.get_net_and_mask(network['cidr']) + extra_params = ("\n" + "\n") % \ + (net, mask) + else: + extra_params = "\n" + + result = { + 'id': mac_id, + 'bridge_name': network['bridge'], + 'mac_address': mapping['mac'], + 'ip_address': mapping['ips'][0]['ip'], + 'dhcp_server': dhcp_server, + 'extra_params': extra_params, + } + + if gateway_v6: + result['gateway_v6'] = gateway_v6 + "/128" + + return result + + def to_xml(self, instance, rescue=False, network_info=None): + # TODO(termie): cache? + LOG.debug(_('instance %s: starting toXML method'), instance['name']) + + # TODO(adiantum) remove network_info creation code + # when multinics will be completed + if not network_info: + network_info = netutils.get_network_info(instance) + + nics = [] + for (network, mapping) in network_info: + nics.append(self._get_nic_for_xml(network, + mapping)) + # FIXME(vish): stick this in db + inst_type_id = instance['instance_type_id'] + inst_type = instance_types.get_instance_type(inst_type_id) + + if FLAGS.use_cow_images: + driver_type = 'qcow2' + else: + driver_type = 'raw' + + xml_info = {'type': FLAGS.libvirt_type, + 'name': instance['name'], + 'basepath': os.path.join(FLAGS.instances_path, + instance['name']), + 'memory_kb': inst_type['memory_mb'] * 1024, + 'vcpus': inst_type['vcpus'], + 'rescue': rescue, + 'local': inst_type['local_gb'], + 'driver_type': driver_type, + 'nics': nics} + + if FLAGS.vnc_enabled: + if FLAGS.libvirt_type != 'lxc': + xml_info['vncserver_host'] = FLAGS.vncserver_host + if not rescue: + if instance['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" + + if instance['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + xml_info['disk'] = xml_info['basepath'] + "/disk" + + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) + LOG.debug(_('instance %s: finished toXML method'), + instance['name']) + return xml + + def _lookup_by_name(self, instance_name): + """Retrieve libvirt domain object given an instance name. + + All libvirt error handling should be handled in this method and + relevant nova exceptions should be raised in response. + + """ + try: + return self._conn.lookupByName(instance_name) + except libvirt.libvirtError as ex: + error_code = ex.get_error_code() + if error_code == libvirt.VIR_ERR_NO_DOMAIN: + msg = _("Instance %s not found") % instance_name + raise exception.NotFound(msg) + + msg = _("Error from libvirt while looking up %(instance_name)s: " + "[Error Code %(error_code)s] %(ex)s") % locals() + raise exception.Error(msg) + + def get_info(self, instance_name): + """Retrieve information from libvirt for a specific instance name. + + If a libvirt error is encountered during lookup, we might raise a + NotFound exception or Error exception depending on how severe the + libvirt error is. + + """ + virt_dom = self._lookup_by_name(instance_name) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time} + + def _create_new_domain(self, xml, persistent=True, launch_flags=0): + # NOTE(justinsb): libvirt has two types of domain: + # * a transient domain disappears when the guest is shutdown + # or the host is rebooted. + # * a permanent domain is not automatically deleted + # NOTE(justinsb): Even for ephemeral instances, transient seems risky + + if persistent: + # To create a persistent domain, first define it, then launch it. + domain = self._conn.defineXML(xml) + + domain.createWithFlags(launch_flags) + else: + # createXML call creates a transient domain + domain = self._conn.createXML(xml, launch_flags) + + return domain + + def get_diagnostics(self, instance_name): + raise exception.ApiError(_("diagnostics are not supported " + "for libvirt")) + + def get_disks(self, instance_name): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + + Returns a list of all block devices for this domain. + """ + domain = self._lookup_by_name(instance_name) + # TODO(devcamcar): Replace libxml2 with etree. + xml = domain.XMLDesc(0) + doc = None + + try: + doc = libxml2.parseDoc(xml) + except: + return [] + + ctx = doc.xpathNewContext() + disks = [] + + try: + ret = ctx.xpathEval('/domain/devices/disk') + + for node in ret: + devdst = None + + for child in node.children: + if child.name == 'target': + devdst = child.prop('dev') + + if devdst is None: + continue + + disks.append(devdst) + finally: + if ctx is not None: + ctx.xpathFreeContext() + if doc is not None: + doc.freeDoc() + + return disks + + def get_interfaces(self, instance_name): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + + Returns a list of all network interfaces for this instance. + """ + domain = self._lookup_by_name(instance_name) + # TODO(devcamcar): Replace libxml2 with etree. + xml = domain.XMLDesc(0) + doc = None + + try: + doc = libxml2.parseDoc(xml) + except: + return [] + + ctx = doc.xpathNewContext() + interfaces = [] + + try: + ret = ctx.xpathEval('/domain/devices/interface') + + for node in ret: + devdst = None + + for child in node.children: + if child.name == 'target': + devdst = child.prop('dev') + + if devdst is None: + continue + + interfaces.append(devdst) + finally: + if ctx is not None: + ctx.xpathFreeContext() + if doc is not None: + doc.freeDoc() + + return interfaces + + def get_vcpu_total(self): + """Get vcpu number of physical computer. + + :returns: the number of cpu core. + + """ + + # On certain platforms, this will raise a NotImplementedError. + try: + return multiprocessing.cpu_count() + except NotImplementedError: + LOG.warn(_("Cannot get the number of cpu, because this " + "function is not implemented for this platform. " + "This error can be safely ignored for now.")) + return 0 + + def get_memory_mb_total(self): + """Get the total memory size(MB) of physical computer. + + :returns: the total amount of memory(MB). + + """ + + if sys.platform.upper() != 'LINUX2': + return 0 + + meminfo = open('/proc/meminfo').read().split() + idx = meminfo.index('MemTotal:') + # transforming kb to mb. + return int(meminfo[idx + 1]) / 1024 + + def get_local_gb_total(self): + """Get the total hdd size(GB) of physical computer. + + :returns: + The total amount of HDD(GB). + Note that this value shows a partition where + NOVA-INST-DIR/instances mounts. + + """ + + hddinfo = os.statvfs(FLAGS.instances_path) + return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024 + + def get_vcpu_used(self): + """ Get vcpu usage number of physical computer. + + :returns: The total number of vcpu that currently used. + + """ + + total = 0 + for dom_id in self._conn.listDomainsID(): + dom = self._conn.lookupByID(dom_id) + total += len(dom.vcpus()[1]) + return total + + def get_memory_mb_used(self): + """Get the free memory size(MB) of physical computer. + + :returns: the total usage of memory(MB). + + """ + + if sys.platform.upper() != 'LINUX2': + return 0 + + m = open('/proc/meminfo').read().split() + idx1 = m.index('MemFree:') + idx2 = m.index('Buffers:') + idx3 = m.index('Cached:') + avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024 + return self.get_memory_mb_total() - avail + + def get_local_gb_used(self): + """Get the free hdd size(GB) of physical computer. + + :returns: + The total usage of HDD(GB). + Note that this value shows a partition where + NOVA-INST-DIR/instances mounts. + + """ + + hddinfo = os.statvfs(FLAGS.instances_path) + avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024 + return self.get_local_gb_total() - avail + + def get_hypervisor_type(self): + """Get hypervisor type. + + :returns: hypervisor type (ex. qemu) + + """ + + return self._conn.getType() + + def get_hypervisor_version(self): + """Get hypervisor version. + + :returns: hypervisor version (ex. 12003) + + """ + + # NOTE(justinsb): getVersion moved between libvirt versions + # Trying to do be compatible with older versions is a lost cause + # But ... we can at least give the user a nice message + method = getattr(self._conn, 'getVersion', None) + if method is None: + raise exception.Error(_("libvirt version is too old" + " (does not support getVersion)")) + # NOTE(justinsb): If we wanted to get the version, we could: + # method = getattr(libvirt, 'getVersion', None) + # NOTE(justinsb): This would then rely on a proper version check + + return method() + + def get_cpu_info(self): + """Get cpuinfo information. + + Obtains cpu feature from virConnect.getCapabilities, + and returns as a json string. + + :return: see above description + + """ + + xml = self._conn.getCapabilities() + xml = libxml2.parseDoc(xml) + nodes = xml.xpathEval('//host/cpu') + if len(nodes) != 1: + raise exception.Invalid(_("Invalid xml. '' must be 1," + "but %d\n") % len(nodes) + + xml.serialize()) + + cpu_info = dict() + + arch_nodes = xml.xpathEval('//host/cpu/arch') + if arch_nodes: + cpu_info['arch'] = arch_nodes[0].getContent() + + model_nodes = xml.xpathEval('//host/cpu/model') + if model_nodes: + cpu_info['model'] = model_nodes[0].getContent() + + vendor_nodes = xml.xpathEval('//host/cpu/vendor') + if vendor_nodes: + cpu_info['vendor'] = vendor_nodes[0].getContent() + + topology_nodes = xml.xpathEval('//host/cpu/topology') + topology = dict() + if topology_nodes: + topology_node = topology_nodes[0].get_properties() + while topology_node: + name = topology_node.get_name() + topology[name] = topology_node.getContent() + topology_node = topology_node.get_next() + + keys = ['cores', 'sockets', 'threads'] + tkeys = topology.keys() + if set(tkeys) != set(keys): + ks = ', '.join(keys) + raise exception.Invalid(_("Invalid xml: topology" + "(%(topology)s) must have " + "%(ks)s") % locals()) + + feature_nodes = xml.xpathEval('//host/cpu/feature') + features = list() + for nodes in feature_nodes: + features.append(nodes.get_properties().getContent()) + + cpu_info['topology'] = topology + cpu_info['features'] = features + return utils.dumps(cpu_info) + + def block_stats(self, instance_name, disk): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + """ + domain = self._lookup_by_name(instance_name) + return domain.blockStats(disk) + + def interface_stats(self, instance_name, interface): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + """ + domain = self._lookup_by_name(instance_name) + return domain.interfaceStats(interface) + + def get_console_pool_info(self, console_type): + #TODO(mdragon): console proxy should be implemented for libvirt, + # in case someone wants to use it with kvm or + # such. For now return fake data. + return {'address': '127.0.0.1', + 'username': 'fakeuser', + 'password': 'fakepassword'} + + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) + + def update_available_resource(self, ctxt, host): + """Updates compute manager resource info on ComputeNode table. + + This method is called when nova-coompute launches, and + whenever admin executes "nova-manage service update_resource". + + :param ctxt: security context + :param host: hostname that compute manager is currently running + + """ + + try: + service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] + except exception.NotFound: + raise exception.Invalid(_("Cannot update compute manager " + "specific info, because no service " + "record was found.")) + + # Updating host information + dic = {'vcpus': self.get_vcpu_total(), + 'memory_mb': self.get_memory_mb_total(), + 'local_gb': self.get_local_gb_total(), + 'vcpus_used': self.get_vcpu_used(), + 'memory_mb_used': self.get_memory_mb_used(), + 'local_gb_used': self.get_local_gb_used(), + 'hypervisor_type': self.get_hypervisor_type(), + 'hypervisor_version': self.get_hypervisor_version(), + 'cpu_info': self.get_cpu_info()} + + compute_node_ref = service_ref['compute_node'] + if not compute_node_ref: + LOG.info(_('Compute_service record created for %s ') % host) + dic['service_id'] = service_ref['id'] + db.compute_node_create(ctxt, dic) + else: + LOG.info(_('Compute_service record updated for %s ') % host) + db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic) + + def compare_cpu(self, cpu_info): + """Checks the host cpu is compatible to a cpu given by xml. + + "xml" must be a part of libvirt.openReadonly().getCapabilities(). + return values follows by virCPUCompareResult. + if 0 > return value, do live migration. + 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + + :param cpu_info: json string that shows cpu feature(see get_cpu_info()) + :returns: + None. if given cpu info is not compatible to this server, + raise exception. + + """ + + LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) + dic = utils.loads(cpu_info) + xml = str(Template(self.cpuinfo_xml, searchList=dic)) + LOG.info(_('to xml...\n:%s ' % xml)) + + u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" + m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") + # unknown character exists in xml, then libvirt complains + try: + ret = self._conn.compareCPU(xml, 0) + except libvirt.libvirtError, e: + ret = e.message + LOG.error(m % locals()) + raise + + if ret <= 0: + raise exception.Invalid(m % locals()) + + return + + def ensure_filtering_rules_for_instance(self, instance_ref, + time=None): + """Setting up filtering rules and waiting for its completion. + + To migrate an instance, filtering rules to hypervisors + and firewalls are inevitable on destination host. + ( Waiting only for filterling rules to hypervisor, + since filtering rules to firewall rules can be set faster). + + Concretely, the below method must be called. + - setup_basic_filtering (for nova-basic, etc.) + - prepare_instance_filter(for nova-instance-instance-xxx, etc.) + + to_xml may have to be called since it defines PROJNET, PROJMASK. + but libvirt migrates those value through migrateToURI(), + so , no need to be called. + + Don't use thread for this method since migration should + not be started when setting-up filtering rules operations + are not completed. + + :params instance_ref: nova.db.sqlalchemy.models.Instance object + + """ + + if not time: + time = greenthread + + # If any instances never launch at destination host, + # basic-filtering must be set here. + self.firewall_driver.setup_basic_filtering(instance_ref) + # setting up n)ova-instance-instance-xx mainly. + self.firewall_driver.prepare_instance_filter(instance_ref) + + # wait for completion + timeout_count = range(FLAGS.live_migration_retry_count) + while timeout_count: + if self.firewall_driver.instance_filter_exists(instance_ref): + break + timeout_count.pop() + if len(timeout_count) == 0: + msg = _('Timeout migrating for %s. nwfilter not found.') + raise exception.Error(msg % instance_ref.name) + time.sleep(1) + + def live_migration(self, ctxt, instance_ref, dest, + post_method, recover_method): + """Spawning live_migration operation for distributing high-load. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + + """ + + greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, + post_method, recover_method) + + def _live_migration(self, ctxt, instance_ref, dest, + post_method, recover_method): + """Do live migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + + """ + + # Do live migration. + try: + flaglist = FLAGS.live_migration_flag.split(',') + flagvals = [getattr(libvirt, x.strip()) for x in flaglist] + logical_sum = reduce(lambda x, y: x | y, flagvals) + + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) + + except Exception: + recover_method(ctxt, instance_ref, dest=dest) + raise + + # Waiting for completion of live_migration. + timer = utils.LoopingCall(f=None) + + def wait_for_live_migration(): + """waiting for live migration completion""" + try: + self.get_info(instance_ref.name)['state'] + except exception.NotFound: + timer.stop() + post_method(ctxt, instance_ref, dest) + + timer.f = wait_for_live_migration + timer.start(interval=0.5, now=True) + + def unfilter_instance(self, instance_ref): + """See comments of same method in firewall_driver.""" + self.firewall_driver.unfilter_instance(instance_ref) diff --git a/nova/virt/libvirt/cpuinfo.xml.template b/nova/virt/libvirt/cpuinfo.xml.template new file mode 100644 index 000000000..48842b29d --- /dev/null +++ b/nova/virt/libvirt/cpuinfo.xml.template @@ -0,0 +1,9 @@ + + $arch + $model + $vendor + +#for $var in $features + +#end for + diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py new file mode 100644 index 000000000..99ba02aaa --- /dev/null +++ b/nova/virt/libvirt/firewall.py @@ -0,0 +1,630 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from eventlet import tpool + +from nova import context +from nova import db +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.libvirt import netutils + + +LOG = logging.getLogger("nova.virt.libvirt.firewall") +FLAGS = flags.FLAGS + + +try: + import libvirt +except ImportError: + LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will " + "not work correctly.")) + + +class FirewallDriver(object): + + def prepare_instance_filter(self, instance, network_info=None): + """Prepare filters for the instance. + + At this point, the instance isn't running yet. + + """ + raise NotImplementedError() + + def unfilter_instance(self, instance): + """Stop filtering instance.""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store. + + Gets called when a rule has been added to or removed from + the security group. + + """ + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store. + + Gets called when an instance gets added to or removed from + the security group. + + """ + raise NotImplementedError() + + def setup_basic_filtering(self, instance, network_info=None): + """Create rules to block spoofing and allow dhcp. + + This gets called when spawning an instance, before + :method:`prepare_instance_filter`. + + """ + raise NotImplementedError() + + def instance_filter_exists(self, instance): + """Check nova-instance-instance-xxx exists.""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): + """Network filter firewall implementation. + + This class implements a network filtering mechanism versatile + enough for EC2 style Security Group filtering by leveraging + libvirt's nwfilter. + + First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + + This filter drops all incoming ipv4 and ipv6 connections. + Outgoing connections are never blocked. + + Second, every security group maps to a nwfilter filter(*). + NWFilters can be updated at runtime and changes are applied + immediately, so changes to security groups can be applied at + runtime (as mandated by the spec). + + Security group rules are named "nova-secgroup-" where + is the internal id of the security group. They're applied only on + hosts that have instances in the security group in question. + + Updates to security groups are done by updating the data model + (in response to API calls) followed by a request sent to all + the nodes with instances in the security group to refresh the + security group. + + Each instance has its own NWFilter, which references the above + mentioned security group NWFilters. This was done because + interfaces can only reference one filter while filters can + reference multiple other filters. This has the added benefit of + actually being able to add and remove security groups from an + instance at run time. This functionality is not exposed anywhere, + though. + + Outstanding questions: + + The name is unique, so would there be any good reason to sync + the uuid across the nodes (by assigning it from the datamodel)? + + + (*) This sentence brought to you by the redundancy department of + redundancy. + + """ + + def __init__(self, get_connection, **kwargs): + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False + + def apply_instance_filter(self, instance): + pass + + def _get_connection(self): + return self._libvirt_get_connection() + + _conn = property(_get_connection) + + def nova_dhcp_filter(self): + """Defines nova DHCP filter. + + The standard allow-dhcp-server filter is an one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway. + + """ + return ''' + 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc + + + + + + + ''' + + def nova_ra_filter(self): + return ''' + d707fa71-4fb5-4b27-9ab7-ba5ca19c8804 + + + + ''' + + def setup_basic_filtering(self, instance, network_info=None): + """Set up basic filtering (MAC, IP, and ARP spoofing protection).""" + logging.info('called setup_basic_filtering in nwfilter') + + if not network_info: + network_info = netutils.get_network_info(instance) + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + self._define_filter(self._filter_container(instance_filter_name, + [base_filter])) + + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self._filter_container('nova-vpn', + ['allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_ra_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + if FLAGS.use_ipv6: + self._define_filter(self.nova_project_filter_v6) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''%s''' % ( + name, + ''.join(["" % (f,) for f in filters])) + return xml + + def nova_base_ipv4_filter(self): + retval = "" + for protocol in ['tcp', 'udp', 'icmp']: + for direction, action, priority in [('out', 'accept', 399), + ('in', 'drop', 400)]: + retval += """ + <%s /> + """ % (action, direction, + priority, protocol) + retval += '' + return retval + + def nova_base_ipv6_filter(self): + retval = "" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + for direction, action, priority in [('out', 'accept', 399), + ('in', 'drop', 400)]: + retval += """ + <%s /> + """ % (action, direction, + priority, protocol) + retval += '' + return retval + + def nova_project_filter(self): + retval = "" + for protocol in ['tcp', 'udp', 'icmp']: + retval += """ + <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> + """ % protocol + retval += '' + return retval + + def nova_project_filter_v6(self): + retval = "" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + retval += """ + <%s srcipaddr='$PROJNETV6' + srcipmask='$PROJMASKV6' /> + """ % (protocol) + retval += '' + return retval + + def _define_filter(self, xml): + if callable(xml): + xml = xml() + # execute in a native thread and block current greenthread until done + tpool.execute(self._conn.nwfilterDefineXML, xml) + + def unfilter_instance(self, instance): + # Nothing to do + pass + + def prepare_instance_filter(self, instance, network_info=None): + """Creates an NWFilter for the given instance. + + In the process, it makes sure the filters for the security groups as + well as the base filter are all in place. + + """ + if not network_info: + network_info = netutils.get_network_info(instance) + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + + ctxt = context.get_admin_context() + + instance_secgroup_filter_name = \ + '%s-secgroup' % (self._instance_filter_name(instance)) + #% (instance_filter_name,) + + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + + for security_group in \ + db.security_group_get_by_instance(ctxt, instance['id']): + + self.refresh_security_group_rules(security_group['id']) + + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] + + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) + + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + instance_filter_children = \ + [base_filter, instance_secgroup_filter_name] + + if FLAGS.use_ipv6: + gateway_v6 = network['gateway_v6'] + + if gateway_v6: + instance_secgroup_filter_children += \ + ['nova-allow-ra-server'] + + if FLAGS.allow_project_net_traffic: + instance_filter_children += ['nova-project'] + if FLAGS.use_ipv6: + instance_filter_children += ['nova-project-v6'] + + self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) + + return + + def refresh_security_group_rules(self, security_group_id): + return self._define_filter( + self.security_group_to_nwfilter_xml(security_group_id)) + + def security_group_to_nwfilter_xml(self, security_group_id): + security_group = db.security_group_get(context.get_admin_context(), + security_group_id) + rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} + for rule in security_group.rules: + rule_xml += "" + if rule.cidr: + version = netutils.get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = netutils.get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) + if rule.protocol in ['tcp', 'udp']: + rule_xml += "dstportstart='%s' dstportend='%s' " % \ + (rule.from_port, rule.to_port) + elif rule.protocol == 'icmp': + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) + if rule.from_port != -1: + rule_xml += "type='%s' " % rule.from_port + if rule.to_port != -1: + rule_xml += "code='%s' " % rule.to_port + + rule_xml += '/>\n' + rule_xml += "\n" + xml = " + ${name} + ${memory_kb} + +#if $type == 'lxc' + #set $disk_prefix = '' + #set $disk_bus = '' + exe + /sbin/init +#else if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $type == 'lxc' + + + + +#else + #if $getVar('rescue', False) + + + + + + + + + + + #else + + + + + + #if $getVar('local', False) + + + + + + #end if + #end if +#end if + +#for $nic in $nics + + + + + + + +#if $getVar('nic.extra_params', False) + ${nic.extra_params} +#end if +#if $getVar('nic.gateway_v6', False) + +#end if + + +#end for + + + + + + + + + + + + + + + + +#if $getVar('vncserver_host', False) + +#end if + + diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py new file mode 100644 index 000000000..3ed9a0fdc --- /dev/null +++ b/nova/virt/libvirt/netutils.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Network-releated utilities for supporting libvirt connection code.""" + + +import IPy + +from nova import context +from nova import db +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS + + +def get_net_and_mask(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.netmask()) + + +def get_net_and_prefixlen(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.prefixlen()) + + +def get_ip_version(cidr): + net = IPy.IP(cidr) + return int(net.version()) + + +def get_network_info(instance): + # TODO(adiantum) If we will keep this function + # we should cache network_info + admin_context = context.get_admin_context() + + ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, + instance['id']) + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + flavor = db.instance_type_get_by_id(admin_context, + instance['instance_type_id']) + network_info = [] + + for network in networks: + network_ips = [ip for ip in ip_addresses + if ip['network_id'] == network['id']] + + def ip_dict(ip): + return { + 'ip': ip['address'], + 'netmask': network['netmask'], + 'enabled': '1'} + + def ip6_dict(): + prefix = network['cidr_v6'] + mac = instance['mac_address'] + return { + 'ip': utils.to_global_ipv6(prefix, mac), + 'netmask': network['netmask_v6'], + 'enabled': '1'} + + mapping = { + 'label': network['label'], + 'gateway': network['gateway'], + 'broadcast': network['broadcast'], + 'mac': instance['mac_address'], + 'rxtx_cap': flavor['rxtx_cap'], + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_ips]} + + if FLAGS.use_ipv6: + mapping['ip6s'] = [ip6_dict()] + mapping['gateway6'] = network['gateway_v6'] + + network_info.append((network, mapping)) + return network_info diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py deleted file mode 100644 index e76de47db..000000000 --- a/nova/virt/libvirt_conn.py +++ /dev/null @@ -1,2168 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A connection to a hypervisor through libvirt. - -Supports KVM, LXC, QEMU, UML, and XEN. - -**Related Flags** - -:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen - (default: kvm). -:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template. -:rescue_image_id: Rescue ami image (default: ami-rescue). -:rescue_kernel_id: Rescue aki image (default: aki-rescue). -:rescue_ramdisk_id: Rescue ari image (default: ari-rescue). -:injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic - -""" - -import multiprocessing -import os -import random -import shutil -import subprocess -import sys -import tempfile -import time -import uuid -from xml.dom import minidom -from xml.etree import ElementTree - -from eventlet import greenthread -from eventlet import tpool - -import IPy - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils -from nova import vnc -from nova.auth import manager -from nova.compute import instance_types -from nova.compute import power_state -from nova.virt import disk -from nova.virt import driver -from nova.virt import images - -libvirt = None -libxml2 = None -Template = None - -LOG = logging.getLogger('nova.virt.libvirt_conn') - -FLAGS = flags.FLAGS -flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') -# TODO(vish): These flags should probably go into a shared location -flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') -flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') -flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') - -flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.xml.template'), - 'Libvirt XML Template') -flags.DEFINE_string('libvirt_type', - 'kvm', - 'Libvirt domain type (valid options are: ' - 'kvm, lxc, qemu, uml, xen)') -flags.DEFINE_string('libvirt_uri', - '', - 'Override the default libvirt URI (which is dependent' - ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', - True, - 'Whether to allow in project network traffic') -flags.DEFINE_bool('use_cow_images', - True, - 'Whether to use cow images') -flags.DEFINE_string('ajaxterm_portrange', - '10000-12000', - 'Range of ports that ajaxterm should randomly try to bind') -flags.DEFINE_string('firewall_driver', - 'nova.virt.libvirt_conn.IptablesFirewallDriver', - 'Firewall driver (defaults to iptables)') -flags.DEFINE_string('cpuinfo_xml_template', - utils.abspath('virt/cpuinfo.xml.template'), - 'CpuInfo XML Template (Used only live migration now)') -flags.DEFINE_string('live_migration_uri', - "qemu+tcp://%s/system", - 'Define protocol used by live_migration feature') -flags.DEFINE_string('live_migration_flag', - "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", - 'Define live migration behavior.') -flags.DEFINE_integer('live_migration_bandwidth', 0, - 'Define live migration behavior') -flags.DEFINE_string('qemu_img', 'qemu-img', - 'binary to use for qemu-img commands') -flags.DEFINE_bool('start_guests_on_host_boot', False, - 'Whether to restart guests when the host reboots') - - -def get_connection(read_only): - # These are loaded late so that there's no need to install these - # libraries when not using libvirt. - # Cheetah is separate because the unit tests want to load Cheetah, - # but not libvirt. - global libvirt - global libxml2 - if libvirt is None: - libvirt = __import__('libvirt') - if libxml2 is None: - libxml2 = __import__('libxml2') - _late_load_cheetah() - return LibvirtConnection(read_only) - - -def _late_load_cheetah(): - global Template - if Template is None: - t = __import__('Cheetah.Template', globals(), locals(), - ['Template'], -1) - Template = t.Template - - -def _get_net_and_mask(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.netmask()) - - -def _get_net_and_prefixlen(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.prefixlen()) - - -def _get_ip_version(cidr): - net = IPy.IP(cidr) - return int(net.version()) - - -def _get_network_info(instance): - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) - networks = db.network_get_all_by_instance(admin_context, - instance['id']) - flavor = db.instance_type_get_by_id(admin_context, - instance['instance_type_id']) - network_info = [] - - for network in networks: - network_ips = [ip for ip in ip_addresses - if ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip['address'], - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = instance['mac_address'] - return { - 'ip': utils.to_global_ipv6(prefix, mac), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'mac': instance['mac_address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info - - -class LibvirtConnection(driver.ComputeDriver): - - def __init__(self, read_only): - super(LibvirtConnection, self).__init__() - self.libvirt_uri = self.get_uri() - - self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() - self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() - self._wrapped_conn = None - self.read_only = read_only - - fw_class = utils.import_class(FLAGS.firewall_driver) - self.firewall_driver = fw_class(get_connection=self._get_connection) - - def init_host(self, host): - # Adopt existing VM's running here - ctxt = context.get_admin_context() - for instance in db.instance_get_all_by_host(ctxt, host): - try: - LOG.debug(_('Checking state of %s'), instance['name']) - state = self.get_info(instance['name'])['state'] - except exception.NotFound: - state = power_state.SHUTOFF - - LOG.debug(_('Current state of %(name)s was %(state)s.'), - {'name': instance['name'], 'state': state}) - db.instance_set_state(ctxt, instance['id'], state) - - # NOTE(justinsb): We no longer delete SHUTOFF instances, - # the user may want to power them back on - - if state != power_state.RUNNING: - continue - self.firewall_driver.prepare_instance_filter(instance) - self.firewall_driver.apply_instance_filter(instance) - - def _get_connection(self): - if not self._wrapped_conn or not self._test_connection(): - LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) - self._wrapped_conn = self._connect(self.libvirt_uri, - self.read_only) - return self._wrapped_conn - _conn = property(_get_connection) - - def _test_connection(self): - try: - self._wrapped_conn.getInfo() - return True - except libvirt.libvirtError as e: - if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ - e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - LOG.debug(_('Connection to libvirt broke')) - return False - raise - - def get_uri(self): - if FLAGS.libvirt_type == 'uml': - uri = FLAGS.libvirt_uri or 'uml:///system' - elif FLAGS.libvirt_type == 'xen': - uri = FLAGS.libvirt_uri or 'xen:///' - elif FLAGS.libvirt_type == 'lxc': - uri = FLAGS.libvirt_uri or 'lxc:///' - else: - uri = FLAGS.libvirt_uri or 'qemu:///system' - return uri - - def _connect(self, uri, read_only): - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] - - if read_only: - return libvirt.openReadOnly(uri) - else: - return libvirt.openAuth(uri, auth, 0) - - def list_instances(self): - return [self._conn.lookupByID(x).name() - for x in self._conn.listDomainsID()] - - def _map_to_instance_info(self, domain): - """Gets info from a virsh domain object into an InstanceInfo""" - - # domain.info() returns a list of: - # state: one of the state values (virDomainState) - # maxMemory: the maximum memory used by the domain - # memory: the current amount of memory used by the domain - # nbVirtCPU: the number of virtual CPU - # puTime: the time used by the domain in nanoseconds - - (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info() - name = domain.name() - - return driver.InstanceInfo(name, state) - - def list_instances_detail(self): - infos = [] - for domain_id in self._conn.listDomainsID(): - domain = self._conn.lookupByID(domain_id) - info = self._map_to_instance_info(domain) - infos.append(info) - return infos - - def destroy(self, instance, cleanup=True): - instance_name = instance['name'] - - try: - virt_dom = self._lookup_by_name(instance_name) - except exception.NotFound: - virt_dom = None - - # If the instance is already terminated, we're still happy - # Otherwise, destroy it - if virt_dom is not None: - try: - virt_dom.destroy() - except libvirt.libvirtError as e: - is_okay = False - errcode = e.get_error_code() - if errcode == libvirt.VIR_ERR_OPERATION_INVALID: - # If the instance if already shut off, we get this: - # Code=55 Error=Requested operation is not valid: - # domain is not running - (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() - if state == power_state.SHUTOFF: - is_okay = True - - if not is_okay: - LOG.warning(_("Error from libvirt during destroy of " - "%(instance_name)s. Code=%(errcode)s " - "Error=%(e)s") % - locals()) - raise - - try: - # NOTE(justinsb): We remove the domain definition. We probably - # would do better to keep it if cleanup=False (e.g. volumes?) - # (e.g. #2 - not losing machines on failure) - virt_dom.undefine() - except libvirt.libvirtError as e: - errcode = e.get_error_code() - LOG.warning(_("Error from libvirt during undefine of " - "%(instance_name)s. Code=%(errcode)s " - "Error=%(e)s") % - locals()) - raise - - def _wait_for_destroy(): - """Called at an interval until the VM is gone.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("Instance %s destroyed successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_destroy) - timer.start(interval=0.5, now=True) - - self.firewall_driver.unfilter_instance(instance) - - if cleanup: - self._cleanup(instance) - - return True - - def _cleanup(self, instance): - target = os.path.join(FLAGS.instances_path, instance['name']) - instance_name = instance['name'] - LOG.info(_('instance %(instance_name)s: deleting instance files' - ' %(target)s') % locals()) - if FLAGS.libvirt_type == 'lxc': - disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images) - if os.path.exists(target): - shutil.rmtree(target) - - @exception.wrap_exception - def attach_volume(self, instance_name, device_path, mountpoint): - virt_dom = self._lookup_by_name(instance_name) - mount_device = mountpoint.rpartition("/")[2] - if device_path.startswith('/dev/'): - xml = """ - - - - """ % (device_path, mount_device) - elif ':' in device_path: - (protocol, name) = device_path.split(':') - xml = """ - - - - """ % (protocol, - name, - mount_device) - else: - raise exception.Invalid(_("Invalid device path %s") % device_path) - - virt_dom.attachDevice(xml) - - def _get_disk_xml(self, xml, device): - """Returns the xml for the disk mounted at device""" - try: - doc = libxml2.parseDoc(xml) - except: - return None - ctx = doc.xpathNewContext() - try: - ret = ctx.xpathEval('/domain/devices/disk') - for node in ret: - for child in node.children: - if child.name == 'target': - if child.prop('dev') == device: - return str(node) - finally: - if ctx is not None: - ctx.xpathFreeContext() - if doc is not None: - doc.freeDoc() - - @exception.wrap_exception - def detach_volume(self, instance_name, mountpoint): - virt_dom = self._lookup_by_name(instance_name) - mount_device = mountpoint.rpartition("/")[2] - xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) - if not xml: - raise exception.NotFound(_("No disk at %s") % mount_device) - virt_dom.detachDevice(xml) - - @exception.wrap_exception - def snapshot(self, instance, image_id): - """Create snapshot from a running VM instance. - - This command only works with qemu 0.14+, the qemu_img flag is - provided so that a locally compiled binary of qemu-img can be used - to support this command. - - """ - image_service = utils.import_object(FLAGS.image_service) - virt_dom = self._lookup_by_name(instance['name']) - elevated = context.get_admin_context() - - base = image_service.show(elevated, instance['image_id']) - - metadata = {'disk_format': base['disk_format'], - 'container_format': base['container_format'], - 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), - 'properties': {'architecture': base['architecture'], - 'kernel_id': instance['kernel_id'], - 'image_location': 'snapshot', - 'image_state': 'available', - 'owner_id': instance['project_id'], - 'ramdisk_id': instance['ramdisk_id'], - } - } - - # Make the snapshot - snapshot_name = uuid.uuid4().hex - snapshot_xml = """ - - %s - - """ % snapshot_name - snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0) - - # Find the disk - xml_desc = virt_dom.XMLDesc(0) - domain = ElementTree.fromstring(xml_desc) - source = domain.find('devices/disk/source') - disk_path = source.get('file') - - # Export the snapshot to a raw image - temp_dir = tempfile.mkdtemp() - out_path = os.path.join(temp_dir, snapshot_name) - qemu_img_cmd = (FLAGS.qemu_img, - 'convert', - '-f', - 'qcow2', - '-O', - 'raw', - '-s', - snapshot_name, - disk_path, - out_path) - utils.execute(*qemu_img_cmd) - - # Upload that image to the image service - with open(out_path) as image_file: - image_service.update(elevated, - image_id, - metadata, - image_file) - - # Clean up - shutil.rmtree(temp_dir) - - @exception.wrap_exception - def reboot(self, instance): - """Reboot a virtual machine, given an instance reference. - - This method actually destroys and re-creates the domain to ensure the - reboot happens, as the guest OS cannot ignore this action. - - """ - self.destroy(instance, False) - xml = self.to_xml(instance) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) - self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) - - def _wait_for_reboot(): - """Called at an interval until the VM is running again.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.RUNNING: - msg = _("Instance %s rebooted successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_reboot) - return timer.start(interval=0.5, now=True) - - @exception.wrap_exception - def pause(self, instance, callback): - raise exception.ApiError("pause not supported for libvirt.") - - @exception.wrap_exception - def unpause(self, instance, callback): - raise exception.ApiError("unpause not supported for libvirt.") - - @exception.wrap_exception - def suspend(self, instance, callback): - raise exception.ApiError("suspend not supported for libvirt") - - @exception.wrap_exception - def resume(self, instance, callback): - raise exception.ApiError("resume not supported for libvirt") - - @exception.wrap_exception - def rescue(self, instance): - """Loads a VM using rescue images. - - A rescue is normally performed when something goes wrong with the - primary images and data needs to be corrected/recovered. Rescuing - should not edit or over-ride the original image, only allow for - data recovery. - - """ - self.destroy(instance, False) - - xml = self.to_xml(instance, rescue=True) - rescue_images = {'image_id': FLAGS.rescue_image_id, - 'kernel_id': FLAGS.rescue_kernel_id, - 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, '.rescue', rescue_images) - self._create_new_domain(xml) - - def _wait_for_rescue(): - """Called at an interval until the VM is running again.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.RUNNING: - msg = _("Instance %s rescued successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_rescue) - return timer.start(interval=0.5, now=True) - - @exception.wrap_exception - def unrescue(self, instance): - """Reboot the VM which is being rescued back into primary images. - - Because reboot destroys and re-creates instances, unresue should - simply call reboot. - - """ - self.reboot(instance) - - @exception.wrap_exception - def poll_rescued_instances(self, timeout): - pass - - # NOTE(ilyaalekseyev): Implementation like in multinics - # for xenapi(tr3buchet) - @exception.wrap_exception - def spawn(self, instance, network_info=None): - xml = self.to_xml(instance, False, network_info) - self.firewall_driver.setup_basic_filtering(instance, network_info) - self.firewall_driver.prepare_instance_filter(instance, network_info) - self._create_image(instance, xml, network_info) - domain = self._create_new_domain(xml) - LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) - - if FLAGS.start_guests_on_host_boot: - LOG.debug(_("instance %s: setting autostart ON") % - instance['name']) - domain.setAutostart(1) - - def _wait_for_boot(): - """Called at an interval until the VM is running.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.RUNNING: - msg = _("Instance %s spawned successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_boot) - return timer.start(interval=0.5, now=True) - - def _flush_xen_console(self, virsh_output): - LOG.info(_('virsh said: %r'), virsh_output) - virsh_output = virsh_output[0].strip() - - if virsh_output.startswith('/dev/'): - LOG.info(_("cool, it's a device")) - out, err = utils.execute('sudo', 'dd', - "if=%s" % virsh_output, - 'iflag=nonblock', - check_exit_code=False) - return out - else: - return '' - - def _append_to_file(self, data, fpath): - LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals()) - fp = open(fpath, 'a+') - fp.write(data) - return fpath - - def _dump_file(self, fpath): - fp = open(fpath, 'r+') - contents = fp.read() - LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) - return contents - - @exception.wrap_exception - def get_console_output(self, instance): - console_log = os.path.join(FLAGS.instances_path, instance['name'], - 'console.log') - - utils.execute('sudo', 'chown', os.getuid(), console_log) - - if FLAGS.libvirt_type == 'xen': - # Xen is special - virsh_output = utils.execute('virsh', 'ttyconsole', - instance['name']) - data = self._flush_xen_console(virsh_output) - fpath = self._append_to_file(data, console_log) - elif FLAGS.libvirt_type == 'lxc': - # LXC is also special - LOG.info(_("Unable to read LXC console")) - else: - fpath = console_log - - return self._dump_file(fpath) - - @exception.wrap_exception - def get_ajax_console(self, instance): - def get_open_port(): - start_port, end_port = FLAGS.ajaxterm_portrange.split("-") - for i in xrange(0, 100): # don't loop forever - port = random.randint(int(start_port), int(end_port)) - # netcat will exit with 0 only if the port is in use, - # so a nonzero return value implies it is unused - cmd = 'netcat', '0.0.0.0', port, '-w', '1' - try: - stdout, stderr = utils.execute(*cmd, process_input='') - except exception.ProcessExecutionError: - return port - raise Exception(_('Unable to find an open port')) - - def get_pty_for_instance(instance_name): - virt_dom = self._lookup_by_name(instance_name) - xml = virt_dom.XMLDesc(0) - dom = minidom.parseString(xml) - - for serial in dom.getElementsByTagName('serial'): - if serial.getAttribute('type') == 'pty': - source = serial.getElementsByTagName('source')[0] - return source.getAttribute('path') - - port = get_open_port() - token = str(uuid.uuid4()) - host = instance['host'] - - ajaxterm_cmd = 'sudo socat - %s' \ - % get_pty_for_instance(instance['name']) - - cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ - % (utils.novadir(), ajaxterm_cmd, token, port) - - subprocess.Popen(cmd, shell=True) - return {'token': token, 'host': host, 'port': port} - - @exception.wrap_exception - def get_vnc_console(self, instance): - def get_vnc_port_for_instance(instance_name): - virt_dom = self._lookup_by_name(instance_name) - xml = virt_dom.XMLDesc(0) - # TODO: use etree instead of minidom - dom = minidom.parseString(xml) - - for graphic in dom.getElementsByTagName('graphics'): - if graphic.getAttribute('type') == 'vnc': - return graphic.getAttribute('port') - - port = get_vnc_port_for_instance(instance['name']) - token = str(uuid.uuid4()) - host = instance['host'] - - return {'token': token, 'host': host, 'port': port} - - @staticmethod - def _cache_image(fn, target, fname, cow=False, *args, **kwargs): - """Wrapper for a method that creates an image that caches the image. - - This wrapper will save the image into a common store and create a - copy for use by the hypervisor. - - The underlying method should specify a kwarg of target representing - where the image will be saved. - - fname is used as the filename of the base image. The filename needs - to be unique to a given image. - - If cow is True, it will make a CoW image instead of a copy. - """ - if not os.path.exists(target): - base_dir = os.path.join(FLAGS.instances_path, '_base') - if not os.path.exists(base_dir): - os.mkdir(base_dir) - base = os.path.join(base_dir, fname) - - @utils.synchronized(fname) - def call_if_not_exists(base, fn, *args, **kwargs): - if not os.path.exists(base): - fn(target=base, *args, **kwargs) - - call_if_not_exists(base, fn, *args, **kwargs) - - if cow: - utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', - 'cluster_size=2M,backing_file=%s' % base, - target) - else: - utils.execute('cp', base, target) - - def _fetch_image(self, target, image_id, user, project, size=None): - """Grab image and optionally attempt to resize it""" - images.fetch(image_id, target, user, project) - if size: - disk.extend(target, size) - - def _create_local(self, target, local_gb): - """Create a blank image of specified size""" - utils.execute('truncate', target, '-s', "%dG" % local_gb) - # TODO(vish): should we format disk by default? - - def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, - network_info=None): - if not network_info: - network_info = _get_network_info(inst) - - if not suffix: - suffix = '' - - # syntactic nicety - def basepath(fname='', suffix=suffix): - return os.path.join(FLAGS.instances_path, - inst['name'], - fname + suffix) - - # ensure directories exist and are writable - utils.execute('mkdir', '-p', basepath(suffix='')) - - LOG.info(_('instance %s: Creating image'), inst['name']) - f = open(basepath('libvirt.xml'), 'w') - f.write(libvirt_xml) - f.close() - - if FLAGS.libvirt_type == 'lxc': - container_dir = '%s/rootfs' % basepath(suffix='') - utils.execute('mkdir', '-p', container_dir) - - # NOTE(vish): No need add the suffix to console.log - os.close(os.open(basepath('console.log', ''), - os.O_CREAT | os.O_WRONLY, 0660)) - - user = manager.AuthManager().get_user(inst['user_id']) - project = manager.AuthManager().get_project(inst['project_id']) - - if not disk_images: - disk_images = {'image_id': inst['image_id'], - 'kernel_id': inst['kernel_id'], - 'ramdisk_id': inst['ramdisk_id']} - - if disk_images['kernel_id']: - fname = '%08x' % int(disk_images['kernel_id']) - self._cache_image(fn=self._fetch_image, - target=basepath('kernel'), - fname=fname, - image_id=disk_images['kernel_id'], - user=user, - project=project) - if disk_images['ramdisk_id']: - fname = '%08x' % int(disk_images['ramdisk_id']) - self._cache_image(fn=self._fetch_image, - target=basepath('ramdisk'), - fname=fname, - image_id=disk_images['ramdisk_id'], - user=user, - project=project) - - root_fname = '%08x' % int(disk_images['image_id']) - size = FLAGS.minimum_root_size - - inst_type_id = inst['instance_type_id'] - inst_type = instance_types.get_instance_type(inst_type_id) - if inst_type['name'] == 'm1.tiny' or suffix == '.rescue': - size = None - root_fname += "_sm" - - self._cache_image(fn=self._fetch_image, - target=basepath('disk'), - fname=root_fname, - cow=FLAGS.use_cow_images, - image_id=disk_images['image_id'], - user=user, - project=project, - size=size) - - if inst_type['local_gb']: - self._cache_image(fn=self._create_local, - target=basepath('disk.local'), - fname="local_%s" % inst_type['local_gb'], - cow=FLAGS.use_cow_images, - local_gb=inst_type['local_gb']) - - # For now, we assume that if we're not using a kernel, we're using a - # partitioned disk image where the target partition is the first - # partition - target_partition = None - if not inst['kernel_id']: - target_partition = "1" - - if FLAGS.libvirt_type == 'lxc': - target_partition = None - - if inst['key_data']: - key = str(inst['key_data']) - else: - key = None - net = None - - nets = [] - ifc_template = open(FLAGS.injected_network_template).read() - ifc_num = -1 - have_injected_networks = False - admin_context = context.get_admin_context() - for (network_ref, mapping) in network_info: - ifc_num += 1 - - if not network_ref['injected']: - continue - - have_injected_networks = True - address = mapping['ips'][0]['ip'] - address_v6 = None - if FLAGS.use_ipv6: - address_v6 = mapping['ip6s'][0]['ip'] - net_info = {'name': 'eth%d' % ifc_num, - 'address': address, - 'netmask': network_ref['netmask'], - 'gateway': network_ref['gateway'], - 'broadcast': network_ref['broadcast'], - 'dns': network_ref['dns'], - 'address_v6': address_v6, - 'gateway_v6': network_ref['gateway_v6'], - 'netmask_v6': network_ref['netmask_v6']} - nets.append(net_info) - - if have_injected_networks: - net = str(Template(ifc_template, - searchList=[{'interfaces': nets, - 'use_ipv6': FLAGS.use_ipv6}])) - - if key or net: - inst_name = inst['name'] - img_id = inst.image_id - if key: - LOG.info(_('instance %(inst_name)s: injecting key into' - ' image %(img_id)s') % locals()) - if net: - LOG.info(_('instance %(inst_name)s: injecting net into' - ' image %(img_id)s') % locals()) - try: - disk.inject_data(basepath('disk'), key, net, - partition=target_partition, - nbd=FLAGS.use_cow_images) - - if FLAGS.libvirt_type == 'lxc': - disk.setup_container(basepath('disk'), - container_dir=container_dir, - nbd=FLAGS.use_cow_images) - except Exception as e: - # This could be a windows image, or a vmdk format disk - LOG.warn(_('instance %(inst_name)s: ignoring error injecting' - ' data into image %(img_id)s (%(e)s)') % locals()) - - if FLAGS.libvirt_type == 'uml': - utils.execute('sudo', 'chown', 'root', basepath('disk')) - - def _get_nic_for_xml(self, network, mapping): - # Assume that the gateway also acts as the dhcp server. - dhcp_server = network['gateway'] - gateway_v6 = network['gateway_v6'] - mac_id = mapping['mac'].replace(':', '') - - if FLAGS.allow_project_net_traffic: - if FLAGS.use_ipv6: - net, mask = _get_net_and_mask(network['cidr']) - net_v6, prefixlen_v6 = _get_net_and_prefixlen( - network['cidr_v6']) - extra_params = ("\n" - "\n" - "\n" - "\n") % \ - (net, mask, net_v6, prefixlen_v6) - else: - net, mask = _get_net_and_mask(network['cidr']) - extra_params = ("\n" - "\n") % \ - (net, mask) - else: - extra_params = "\n" - - result = { - 'id': mac_id, - 'bridge_name': network['bridge'], - 'mac_address': mapping['mac'], - 'ip_address': mapping['ips'][0]['ip'], - 'dhcp_server': dhcp_server, - 'extra_params': extra_params, - } - - if gateway_v6: - result['gateway_v6'] = gateway_v6 + "/128" - - return result - - def to_xml(self, instance, rescue=False, network_info=None): - # TODO(termie): cache? - LOG.debug(_('instance %s: starting toXML method'), instance['name']) - - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = _get_network_info(instance) - - nics = [] - for (network, mapping) in network_info: - nics.append(self._get_nic_for_xml(network, - mapping)) - # FIXME(vish): stick this in db - inst_type_id = instance['instance_type_id'] - inst_type = instance_types.get_instance_type(inst_type_id) - - if FLAGS.use_cow_images: - driver_type = 'qcow2' - else: - driver_type = 'raw' - - xml_info = {'type': FLAGS.libvirt_type, - 'name': instance['name'], - 'basepath': os.path.join(FLAGS.instances_path, - instance['name']), - 'memory_kb': inst_type['memory_mb'] * 1024, - 'vcpus': inst_type['vcpus'], - 'rescue': rescue, - 'local': inst_type['local_gb'], - 'driver_type': driver_type, - 'nics': nics} - - if FLAGS.vnc_enabled: - if FLAGS.libvirt_type != 'lxc': - xml_info['vncserver_host'] = FLAGS.vncserver_host - if not rescue: - if instance['kernel_id']: - xml_info['kernel'] = xml_info['basepath'] + "/kernel" - - if instance['ramdisk_id']: - xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" - - xml_info['disk'] = xml_info['basepath'] + "/disk" - - xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - LOG.debug(_('instance %s: finished toXML method'), - instance['name']) - return xml - - def _lookup_by_name(self, instance_name): - """Retrieve libvirt domain object given an instance name. - - All libvirt error handling should be handled in this method and - relevant nova exceptions should be raised in response. - - """ - try: - return self._conn.lookupByName(instance_name) - except libvirt.libvirtError as ex: - error_code = ex.get_error_code() - if error_code == libvirt.VIR_ERR_NO_DOMAIN: - msg = _("Instance %s not found") % instance_name - raise exception.NotFound(msg) - - msg = _("Error from libvirt while looking up %(instance_name)s: " - "[Error Code %(error_code)s] %(ex)s") % locals() - raise exception.Error(msg) - - def get_info(self, instance_name): - """Retrieve information from libvirt for a specific instance name. - - If a libvirt error is encountered during lookup, we might raise a - NotFound exception or Error exception depending on how severe the - libvirt error is. - - """ - virt_dom = self._lookup_by_name(instance_name) - (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() - return {'state': state, - 'max_mem': max_mem, - 'mem': mem, - 'num_cpu': num_cpu, - 'cpu_time': cpu_time} - - def _create_new_domain(self, xml, persistent=True, launch_flags=0): - # NOTE(justinsb): libvirt has two types of domain: - # * a transient domain disappears when the guest is shutdown - # or the host is rebooted. - # * a permanent domain is not automatically deleted - # NOTE(justinsb): Even for ephemeral instances, transient seems risky - - if persistent: - # To create a persistent domain, first define it, then launch it. - domain = self._conn.defineXML(xml) - - domain.createWithFlags(launch_flags) - else: - # createXML call creates a transient domain - domain = self._conn.createXML(xml, launch_flags) - - return domain - - def get_diagnostics(self, instance_name): - raise exception.ApiError(_("diagnostics are not supported " - "for libvirt")) - - def get_disks(self, instance_name): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - - Returns a list of all block devices for this domain. - """ - domain = self._lookup_by_name(instance_name) - # TODO(devcamcar): Replace libxml2 with etree. - xml = domain.XMLDesc(0) - doc = None - - try: - doc = libxml2.parseDoc(xml) - except: - return [] - - ctx = doc.xpathNewContext() - disks = [] - - try: - ret = ctx.xpathEval('/domain/devices/disk') - - for node in ret: - devdst = None - - for child in node.children: - if child.name == 'target': - devdst = child.prop('dev') - - if devdst is None: - continue - - disks.append(devdst) - finally: - if ctx is not None: - ctx.xpathFreeContext() - if doc is not None: - doc.freeDoc() - - return disks - - def get_interfaces(self, instance_name): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - - Returns a list of all network interfaces for this instance. - """ - domain = self._lookup_by_name(instance_name) - # TODO(devcamcar): Replace libxml2 with etree. - xml = domain.XMLDesc(0) - doc = None - - try: - doc = libxml2.parseDoc(xml) - except: - return [] - - ctx = doc.xpathNewContext() - interfaces = [] - - try: - ret = ctx.xpathEval('/domain/devices/interface') - - for node in ret: - devdst = None - - for child in node.children: - if child.name == 'target': - devdst = child.prop('dev') - - if devdst is None: - continue - - interfaces.append(devdst) - finally: - if ctx is not None: - ctx.xpathFreeContext() - if doc is not None: - doc.freeDoc() - - return interfaces - - def get_vcpu_total(self): - """Get vcpu number of physical computer. - - :returns: the number of cpu core. - - """ - - # On certain platforms, this will raise a NotImplementedError. - try: - return multiprocessing.cpu_count() - except NotImplementedError: - LOG.warn(_("Cannot get the number of cpu, because this " - "function is not implemented for this platform. " - "This error can be safely ignored for now.")) - return 0 - - def get_memory_mb_total(self): - """Get the total memory size(MB) of physical computer. - - :returns: the total amount of memory(MB). - - """ - - if sys.platform.upper() != 'LINUX2': - return 0 - - meminfo = open('/proc/meminfo').read().split() - idx = meminfo.index('MemTotal:') - # transforming kb to mb. - return int(meminfo[idx + 1]) / 1024 - - def get_local_gb_total(self): - """Get the total hdd size(GB) of physical computer. - - :returns: - The total amount of HDD(GB). - Note that this value shows a partition where - NOVA-INST-DIR/instances mounts. - - """ - - hddinfo = os.statvfs(FLAGS.instances_path) - return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024 - - def get_vcpu_used(self): - """ Get vcpu usage number of physical computer. - - :returns: The total number of vcpu that currently used. - - """ - - total = 0 - for dom_id in self._conn.listDomainsID(): - dom = self._conn.lookupByID(dom_id) - total += len(dom.vcpus()[1]) - return total - - def get_memory_mb_used(self): - """Get the free memory size(MB) of physical computer. - - :returns: the total usage of memory(MB). - - """ - - if sys.platform.upper() != 'LINUX2': - return 0 - - m = open('/proc/meminfo').read().split() - idx1 = m.index('MemFree:') - idx2 = m.index('Buffers:') - idx3 = m.index('Cached:') - avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024 - return self.get_memory_mb_total() - avail - - def get_local_gb_used(self): - """Get the free hdd size(GB) of physical computer. - - :returns: - The total usage of HDD(GB). - Note that this value shows a partition where - NOVA-INST-DIR/instances mounts. - - """ - - hddinfo = os.statvfs(FLAGS.instances_path) - avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024 - return self.get_local_gb_total() - avail - - def get_hypervisor_type(self): - """Get hypervisor type. - - :returns: hypervisor type (ex. qemu) - - """ - - return self._conn.getType() - - def get_hypervisor_version(self): - """Get hypervisor version. - - :returns: hypervisor version (ex. 12003) - - """ - - # NOTE(justinsb): getVersion moved between libvirt versions - # Trying to do be compatible with older versions is a lost cause - # But ... we can at least give the user a nice message - method = getattr(self._conn, 'getVersion', None) - if method is None: - raise exception.Error(_("libvirt version is too old" - " (does not support getVersion)")) - # NOTE(justinsb): If we wanted to get the version, we could: - # method = getattr(libvirt, 'getVersion', None) - # NOTE(justinsb): This would then rely on a proper version check - - return method() - - def get_cpu_info(self): - """Get cpuinfo information. - - Obtains cpu feature from virConnect.getCapabilities, - and returns as a json string. - - :return: see above description - - """ - - xml = self._conn.getCapabilities() - xml = libxml2.parseDoc(xml) - nodes = xml.xpathEval('//host/cpu') - if len(nodes) != 1: - raise exception.Invalid(_("Invalid xml. '' must be 1," - "but %d\n") % len(nodes) - + xml.serialize()) - - cpu_info = dict() - - arch_nodes = xml.xpathEval('//host/cpu/arch') - if arch_nodes: - cpu_info['arch'] = arch_nodes[0].getContent() - - model_nodes = xml.xpathEval('//host/cpu/model') - if model_nodes: - cpu_info['model'] = model_nodes[0].getContent() - - vendor_nodes = xml.xpathEval('//host/cpu/vendor') - if vendor_nodes: - cpu_info['vendor'] = vendor_nodes[0].getContent() - - topology_nodes = xml.xpathEval('//host/cpu/topology') - topology = dict() - if topology_nodes: - topology_node = topology_nodes[0].get_properties() - while topology_node: - name = topology_node.get_name() - topology[name] = topology_node.getContent() - topology_node = topology_node.get_next() - - keys = ['cores', 'sockets', 'threads'] - tkeys = topology.keys() - if set(tkeys) != set(keys): - ks = ', '.join(keys) - raise exception.Invalid(_("Invalid xml: topology" - "(%(topology)s) must have " - "%(ks)s") % locals()) - - feature_nodes = xml.xpathEval('//host/cpu/feature') - features = list() - for nodes in feature_nodes: - features.append(nodes.get_properties().getContent()) - - cpu_info['topology'] = topology - cpu_info['features'] = features - return utils.dumps(cpu_info) - - def block_stats(self, instance_name, disk): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - """ - domain = self._lookup_by_name(instance_name) - return domain.blockStats(disk) - - def interface_stats(self, instance_name, interface): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - """ - domain = self._lookup_by_name(instance_name) - return domain.interfaceStats(interface) - - def get_console_pool_info(self, console_type): - #TODO(mdragon): console proxy should be implemented for libvirt, - # in case someone wants to use it with kvm or - # such. For now return fake data. - return {'address': '127.0.0.1', - 'username': 'fakeuser', - 'password': 'fakepassword'} - - def refresh_security_group_rules(self, security_group_id): - self.firewall_driver.refresh_security_group_rules(security_group_id) - - def refresh_security_group_members(self, security_group_id): - self.firewall_driver.refresh_security_group_members(security_group_id) - - def update_available_resource(self, ctxt, host): - """Updates compute manager resource info on ComputeNode table. - - This method is called when nova-coompute launches, and - whenever admin executes "nova-manage service update_resource". - - :param ctxt: security context - :param host: hostname that compute manager is currently running - - """ - - try: - service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] - except exception.NotFound: - raise exception.Invalid(_("Cannot update compute manager " - "specific info, because no service " - "record was found.")) - - # Updating host information - dic = {'vcpus': self.get_vcpu_total(), - 'memory_mb': self.get_memory_mb_total(), - 'local_gb': self.get_local_gb_total(), - 'vcpus_used': self.get_vcpu_used(), - 'memory_mb_used': self.get_memory_mb_used(), - 'local_gb_used': self.get_local_gb_used(), - 'hypervisor_type': self.get_hypervisor_type(), - 'hypervisor_version': self.get_hypervisor_version(), - 'cpu_info': self.get_cpu_info()} - - compute_node_ref = service_ref['compute_node'] - if not compute_node_ref: - LOG.info(_('Compute_service record created for %s ') % host) - dic['service_id'] = service_ref['id'] - db.compute_node_create(ctxt, dic) - else: - LOG.info(_('Compute_service record updated for %s ') % host) - db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic) - - def compare_cpu(self, cpu_info): - """Checks the host cpu is compatible to a cpu given by xml. - - "xml" must be a part of libvirt.openReadonly().getCapabilities(). - return values follows by virCPUCompareResult. - if 0 > return value, do live migration. - 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' - - :param cpu_info: json string that shows cpu feature(see get_cpu_info()) - :returns: - None. if given cpu info is not compatible to this server, - raise exception. - - """ - - LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) - dic = utils.loads(cpu_info) - xml = str(Template(self.cpuinfo_xml, searchList=dic)) - LOG.info(_('to xml...\n:%s ' % xml)) - - u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" - m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") - # unknown character exists in xml, then libvirt complains - try: - ret = self._conn.compareCPU(xml, 0) - except libvirt.libvirtError, e: - ret = e.message - LOG.error(m % locals()) - raise - - if ret <= 0: - raise exception.Invalid(m % locals()) - - return - - def ensure_filtering_rules_for_instance(self, instance_ref, - time=None): - """Setting up filtering rules and waiting for its completion. - - To migrate an instance, filtering rules to hypervisors - and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, - since filtering rules to firewall rules can be set faster). - - Concretely, the below method must be called. - - setup_basic_filtering (for nova-basic, etc.) - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - - to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), - so , no need to be called. - - Don't use thread for this method since migration should - not be started when setting-up filtering rules operations - are not completed. - - :params instance_ref: nova.db.sqlalchemy.models.Instance object - - """ - - if not time: - time = greenthread - - # If any instances never launch at destination host, - # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) - - # wait for completion - timeout_count = range(FLAGS.live_migration_retry_count) - while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): - break - timeout_count.pop() - if len(timeout_count) == 0: - msg = _('Timeout migrating for %s. nwfilter not found.') - raise exception.Error(msg % instance_ref.name) - time.sleep(1) - - def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): - """Spawning live_migration operation for distributing high-load. - - :params ctxt: security context - :params instance_ref: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :params dest: destination host - :params post_method: - post operation method. - expected nova.compute.manager.post_live_migration. - :params recover_method: - recovery method when any exception occurs. - expected nova.compute.manager.recover_live_migration. - - """ - - greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) - - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): - """Do live migration. - - :params ctxt: security context - :params instance_ref: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :params dest: destination host - :params post_method: - post operation method. - expected nova.compute.manager.post_live_migration. - :params recover_method: - recovery method when any exception occurs. - expected nova.compute.manager.recover_live_migration. - - """ - - # Do live migration. - try: - flaglist = FLAGS.live_migration_flag.split(',') - flagvals = [getattr(libvirt, x.strip()) for x in flaglist] - logical_sum = reduce(lambda x, y: x | y, flagvals) - - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - - except Exception: - recover_method(ctxt, instance_ref, dest=dest) - raise - - # Waiting for completion of live_migration. - timer = utils.LoopingCall(f=None) - - def wait_for_live_migration(): - """waiting for live migration completion""" - try: - self.get_info(instance_ref.name)['state'] - except exception.NotFound: - timer.stop() - post_method(ctxt, instance_ref, dest) - - timer.f = wait_for_live_migration - timer.start(interval=0.5, now=True) - - def unfilter_instance(self, instance_ref): - """See comments of same method in firewall_driver.""" - self.firewall_driver.unfilter_instance(instance_ref) - - -class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): - """Prepare filters for the instance. - - At this point, the instance isn't running yet.""" - raise NotImplementedError() - - def unfilter_instance(self, instance): - """Stop filtering instance""" - raise NotImplementedError() - - def apply_instance_filter(self, instance): - """Apply instance filter. - - Once this method returns, the instance should be firewalled - appropriately. This method should as far as possible be a - no-op. It's vastly preferred to get everything set up in - prepare_instance_filter. - """ - raise NotImplementedError() - - def refresh_security_group_rules(self, security_group_id): - """Refresh security group rules from data store - - Gets called when a rule has been added to or removed from - the security group.""" - raise NotImplementedError() - - def refresh_security_group_members(self, security_group_id): - """Refresh security group members from data store - - Gets called when an instance gets added to or removed from - the security group.""" - raise NotImplementedError() - - def setup_basic_filtering(self, instance, network_info=None): - """Create rules to block spoofing and allow dhcp. - - This gets called when spawning an instance, before - :method:`prepare_instance_filter`. - - """ - raise NotImplementedError() - - def instance_filter_exists(self, instance): - """Check nova-instance-instance-xxx exists""" - raise NotImplementedError() - - -class NWFilterFirewall(FirewallDriver): - """ - This class implements a network filtering mechanism versatile - enough for EC2 style Security Group filtering by leveraging - libvirt's nwfilter. - - First, all instances get a filter ("nova-base-filter") applied. - This filter provides some basic security such as protection against - MAC spoofing, IP spoofing, and ARP spoofing. - - This filter drops all incoming ipv4 and ipv6 connections. - Outgoing connections are never blocked. - - Second, every security group maps to a nwfilter filter(*). - NWFilters can be updated at runtime and changes are applied - immediately, so changes to security groups can be applied at - runtime (as mandated by the spec). - - Security group rules are named "nova-secgroup-" where - is the internal id of the security group. They're applied only on - hosts that have instances in the security group in question. - - Updates to security groups are done by updating the data model - (in response to API calls) followed by a request sent to all - the nodes with instances in the security group to refresh the - security group. - - Each instance has its own NWFilter, which references the above - mentioned security group NWFilters. This was done because - interfaces can only reference one filter while filters can - reference multiple other filters. This has the added benefit of - actually being able to add and remove security groups from an - instance at run time. This functionality is not exposed anywhere, - though. - - Outstanding questions: - - The name is unique, so would there be any good reason to sync - the uuid across the nodes (by assigning it from the datamodel)? - - - (*) This sentence brought to you by the redundancy department of - redundancy. - - """ - - def __init__(self, get_connection, **kwargs): - self._libvirt_get_connection = get_connection - self.static_filters_configured = False - self.handle_security_groups = False - - def apply_instance_filter(self, instance): - """No-op. Everything is done in prepare_instance_filter""" - pass - - def _get_connection(self): - return self._libvirt_get_connection() - _conn = property(_get_connection) - - def nova_dhcp_filter(self): - """The standard allow-dhcp-server filter is an one, so it uses - ebtables to allow traffic through. Without a corresponding rule in - iptables, it'll get blocked anyway.""" - - return ''' - 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc - - - - - - - ''' - - def nova_ra_filter(self): - return ''' - d707fa71-4fb5-4b27-9ab7-ba5ca19c8804 - - - - ''' - - def setup_basic_filtering(self, instance, network_info=None): - """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" - logging.info('called setup_basic_filtering in nwfilter') - - if not network_info: - network_info = _get_network_info(instance) - - if self.handle_security_groups: - # No point in setting up a filter set that we'll be overriding - # anyway. - return - - logging.info('ensuring static filters') - self._ensure_static_filters() - - if instance['image_id'] == str(FLAGS.vpn_image_id): - base_filter = 'nova-vpn' - else: - base_filter = 'nova-base' - - for (network, mapping) in network_info: - nic_id = mapping['mac'].replace(':', '') - instance_filter_name = self._instance_filter_name(instance, nic_id) - self._define_filter(self._filter_container(instance_filter_name, - [base_filter])) - - def _ensure_static_filters(self): - if self.static_filters_configured: - return - - self._define_filter(self._filter_container('nova-base', - ['no-mac-spoofing', - 'no-ip-spoofing', - 'no-arp-spoofing', - 'allow-dhcp-server'])) - self._define_filter(self._filter_container('nova-vpn', - ['allow-dhcp-server'])) - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) - if FLAGS.use_ipv6: - self._define_filter(self.nova_project_filter_v6) - - self.static_filters_configured = True - - def _filter_container(self, name, filters): - xml = '''%s''' % ( - name, - ''.join(["" % (f,) for f in filters])) - return xml - - def nova_base_ipv4_filter(self): - retval = "" - for protocol in ['tcp', 'udp', 'icmp']: - for direction, action, priority in [('out', 'accept', 399), - ('in', 'drop', 400)]: - retval += """ - <%s /> - """ % (action, direction, - priority, protocol) - retval += '' - return retval - - def nova_base_ipv6_filter(self): - retval = "" - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: - for direction, action, priority in [('out', 'accept', 399), - ('in', 'drop', 400)]: - retval += """ - <%s /> - """ % (action, direction, - priority, protocol) - retval += '' - return retval - - def nova_project_filter(self): - retval = "" - for protocol in ['tcp', 'udp', 'icmp']: - retval += """ - <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> - """ % protocol - retval += '' - return retval - - def nova_project_filter_v6(self): - retval = "" - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: - retval += """ - <%s srcipaddr='$PROJNETV6' - srcipmask='$PROJMASKV6' /> - """ % (protocol) - retval += '' - return retval - - def _define_filter(self, xml): - if callable(xml): - xml = xml() - # execute in a native thread and block current greenthread until done - tpool.execute(self._conn.nwfilterDefineXML, xml) - - def unfilter_instance(self, instance): - # Nothing to do - pass - - def prepare_instance_filter(self, instance, network_info=None): - """ - Creates an NWFilter for the given instance. In the process, - it makes sure the filters for the security groups as well as - the base filter are all in place. - """ - if not network_info: - network_info = _get_network_info(instance) - if instance['image_id'] == str(FLAGS.vpn_image_id): - base_filter = 'nova-vpn' - else: - base_filter = 'nova-base' - - ctxt = context.get_admin_context() - - instance_secgroup_filter_name = \ - '%s-secgroup' % (self._instance_filter_name(instance)) - #% (instance_filter_name,) - - instance_secgroup_filter_children = ['nova-base-ipv4', - 'nova-base-ipv6', - 'nova-allow-dhcp-server'] - - for security_group in \ - db.security_group_get_by_instance(ctxt, instance['id']): - - self.refresh_security_group_rules(security_group['id']) - - instance_secgroup_filter_children += [('nova-secgroup-%s' % - security_group['id'])] - - self._define_filter( - self._filter_container(instance_secgroup_filter_name, - instance_secgroup_filter_children)) - - for (network, mapping) in network_info: - nic_id = mapping['mac'].replace(':', '') - instance_filter_name = self._instance_filter_name(instance, nic_id) - instance_filter_children = \ - [base_filter, instance_secgroup_filter_name] - - if FLAGS.use_ipv6: - gateway_v6 = network['gateway_v6'] - - if gateway_v6: - instance_secgroup_filter_children += \ - ['nova-allow-ra-server'] - - if FLAGS.allow_project_net_traffic: - instance_filter_children += ['nova-project'] - if FLAGS.use_ipv6: - instance_filter_children += ['nova-project-v6'] - - self._define_filter( - self._filter_container(instance_filter_name, - instance_filter_children)) - - return - - def refresh_security_group_rules(self, security_group_id): - return self._define_filter( - self.security_group_to_nwfilter_xml(security_group_id)) - - def security_group_to_nwfilter_xml(self, security_group_id): - security_group = db.security_group_get(context.get_admin_context(), - security_group_id) - rule_xml = "" - v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} - for rule in security_group.rules: - rule_xml += "" - if rule.cidr: - version = _get_ip_version(rule.cidr) - if(FLAGS.use_ipv6 and version == 6): - net, prefixlen = _get_net_and_prefixlen(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (v6protocol[rule.protocol], net, prefixlen) - else: - net, mask = _get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) - if rule.protocol in ['tcp', 'udp']: - rule_xml += "dstportstart='%s' dstportend='%s' " % \ - (rule.from_port, rule.to_port) - elif rule.protocol == 'icmp': - LOG.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r', rule.protocol, - rule.from_port, rule.to_port) - if rule.from_port != -1: - rule_xml += "type='%s' " % rule.from_port - if rule.to_port != -1: - rule_xml += "code='%s' " % rule.to_port - - rule_xml += '/>\n' - rule_xml += "\n" - xml = " Date: Tue, 3 May 2011 17:08:04 -0700 Subject: Fix indentation. --- nova/virt/xenapi/volume_utils.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 819c48be5..55c11a4ad 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -246,19 +246,21 @@ def _get_target(volume_id): 1) volume_ref['host'] must resolve to something rather than loopback """ volume_ref = db.volume_get(context.get_admin_context(), - volume_id) + volume_id) result = (None, None) try: - (r, _e) = utils.execute('sudo', 'iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', volume_ref['host']) + (r, _e) = utils.execute('sudo', 'iscsiadm', + '-m', 'discovery', + '-t', 'sendtargets', + '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: volume_name = "volume-%08x" % volume_id for target in r.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break + (location, _sep, iscsi_name) = target.partition(" ") + break iscsi_portal = location.split(",")[0] result = (iscsi_name, iscsi_portal) return result -- cgit From fa9eeb65533d897f6e81067986dc614582fb310a Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 6 May 2011 07:19:57 -0700 Subject: grabbed from dist-sched branch --- nova/virt/hyperv.py | 9 +++++++ nova/virt/libvirt_conn.py | 8 ++++++ nova/virt/xenapi_conn.py | 69 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 9026e737e..573e5130e 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -486,3 +486,12 @@ class HyperVConnection(driver.ComputeDriver): def update_available_resource(self, ctxt, host): """This method is supported only by libvirt.""" return + + def update_host_status(self): + """See xenapi_conn.py implementation.""" + pass + + def get_host_stats(self, refresh=False): + """See xenapi_conn.py implementation.""" + pass + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9780c69a6..555e44ce2 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1582,6 +1582,14 @@ class LibvirtConnection(driver.ComputeDriver): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref) + def update_host_status(self): + """See xenapi_conn.py implementation.""" + pass + + def get_host_stats(self, refresh=False): + """See xenapi_conn.py implementation.""" + pass + class FirewallDriver(object): def prepare_instance_filter(self, instance, network_info=None): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 0cabccf08..63a53af2e 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -168,6 +168,13 @@ class XenAPIConnection(driver.ComputeDriver): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) self._volumeops = VolumeOps(session) + self._host_state = None + + @property + def HostState(self): + if not self._host_state: + self._host_state = HostState(self.session) + return self._host_state def init_host(self, host): #FIXME(armando): implement this @@ -315,6 +322,16 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') + def update_host_status(self): + """Update the status info of the host, and return those values + to the calling program.""" + return self.HostState.update_status() + + def get_host_stats(self, refresh=False): + """Return the current state of the host. If 'refresh' is + True, run the update first.""" + return self.HostState.get_host_stats(refresh=refresh) + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" @@ -436,6 +453,58 @@ class XenAPISession(object): raise +class HostState(object): + """Manages information about the XenServer host this compute + node is running on. + """ + def __init__(self, session): + super(HostState, self).__init__() + self._session = session + self._stats = {} + self.update_status() + + def get_host_stats(self, refresh=False): + """Return the current state of the host. If 'refresh' is + True, run the update first. + """ + if refresh: + self.update_status() + return self._stats + + def update_status(self): + """Since under Xenserver, a compute node runs on a given host, + we can get host status information using xenapi. + """ + LOG.debug(_("Updating host stats")) + # Make it something unlikely to match any actual instance ID + task_id = random.randint(-80000, -70000) + task = self._session.async_call_plugin("xenhost", "host_data", {}) + task_result = self._session.wait_for_task(task, task_id) + if not task_result: + task_result = json.dumps("") + try: + data = json.loads(task_result) + except ValueError as e: + # Invalid JSON object + LOG.error(_("Unable to get updated status: %s") % e) + return + # Get the SR usage + try: + sr_ref = vm_utils.safe_find_sr(self._session) + except exception.NotFound as e: + # No SR configured + LOG.error(_("Unable to get SR for this host: %s") % e) + return + sr_rec = self._session.get_xenapi().SR.get_record(sr_ref) + total = int(sr_rec["virtual_allocation"]) + used = int(sr_rec["physical_utilisation"]) + data["disk"] = dd = {} + dd["total"] = total + dd["used"] = used + dd["available"] = total - used + self._stats = data + + def _parse_xmlrpc_value(val): """Parse the given value as if it were an XML-RPC value. This is sometimes used as the format for the task.result field.""" -- cgit From 595f742763336bb4edeb55f7556ce618bf85481e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 6 May 2011 11:04:00 -0700 Subject: tests pass again --- nova/virt/hyperv.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 573e5130e..1142e97a4 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -494,4 +494,3 @@ class HyperVConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """See xenapi_conn.py implementation.""" pass - -- cgit From 3c0d31a1ae91e30e06f1b33d35915037472b3691 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 9 May 2011 08:23:25 -0700 Subject: basic test working --- nova/virt/xenapi_conn.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 63a53af2e..0e545150f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -57,6 +57,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. - suffix "_rec" for record objects """ +import json +import random import sys import urlparse import xmlrpclib @@ -67,10 +69,12 @@ from eventlet import timeout from nova import context from nova import db +from nova import exception from nova import utils from nova import flags from nova import log as logging from nova.virt import driver +from nova.virt.xenapi import vm_utils from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps -- cgit From 1c1a06c3731dd82b331f317ba52edbfe2110a40e Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 9 May 2011 11:47:33 -0400 Subject: clean up unused functions from virt/images.py --- nova/virt/images.py | 61 ----------------------------------------------------- 1 file changed, 61 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e3f2ee4d..1eb6f4a5f 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -51,67 +51,6 @@ def fetch(image_id, path, _user, _project): metadata = image_service.get(elevated, image_id, image_file) return metadata - -# NOTE(vish): The methods below should be unnecessary, but I'm leaving -# them in case the glance client does not work on windows. -def _fetch_image_no_curl(url, path, headers): - request = urllib2.Request(url) - for (k, v) in headers.iteritems(): - request.add_header(k, v) - - def urlretrieve(urlfile, fpath): - chunk = 1 * 1024 * 1024 - f = open(fpath, "wb") - while 1: - data = urlfile.read(chunk) - if not data: - break - f.write(data) - - urlopened = urllib2.urlopen(request) - urlretrieve(urlopened, path) - LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals()) - - -def _fetch_s3_image(image, path, user, project): - url = image_url(image) - - # This should probably move somewhere else, like e.g. a download_as - # method on User objects and at the same time get rewritten to use - # a web client. - headers = {} - headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - - (_, _, url_path, _, _, _) = urlparse.urlparse(url) - access = manager.AuthManager().get_access_key(user, project) - signature = signer.Signer(user.secret.encode()).s3_authorization(headers, - 'GET', - url_path) - headers['Authorization'] = 'AWS %s:%s' % (access, signature) - - if sys.platform.startswith('win'): - return _fetch_image_no_curl(url, path, headers) - else: - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '\'%s: %s\'' % (k, v)] - - cmd += ['-o', path] - return utils.execute(*cmd) - - -def _fetch_local_image(image, path, user, project): - source = _image_path(os.path.join(image, 'image')) - if sys.platform.startswith('win'): - return shutil.copy(source, path) - else: - return utils.execute('cp', source, path) - - -def _image_path(path): - return os.path.join(FLAGS.images_path, path) - - # TODO(vish): xenapi should use the glance client code directly instead # of retrieving the image using this method. def image_url(image): -- cgit From d087e1d0f0e235de01a8f140815fbe905008cb36 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 9 May 2011 09:08:56 -0700 Subject: capabilities flattened and tests fixed --- nova/virt/xenapi_conn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 563a1da77..671a340c7 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -505,8 +505,8 @@ class HostState(object): data["disk_total"] = total data["disk_used"] = used data["disk_available"] = total - used - memory = data.get('host_memory', None) - if memory: + host_memory = data.get('host_memory', None) + if host_memory: data["host_memory_total"] = host_memory.get('total', 0) data["host_memory_overhead"] = host_memory.get('overhead', 0) data["host_memory_free"] = host_memory.get('free', 0) -- cgit From 559bba1270378a430cc85abec144c0c574e65294 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 9 May 2011 12:57:56 -0700 Subject: unified underscore/dash issue --- nova/virt/xenapi_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 671a340c7..8e9085277 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -510,7 +510,7 @@ class HostState(object): data["host_memory_total"] = host_memory.get('total', 0) data["host_memory_overhead"] = host_memory.get('overhead', 0) data["host_memory_free"] = host_memory.get('free', 0) - data["host_memory_free-computed"] = \ + data["host_memory_free_computed"] = \ host_memory.get('free-computed', 0) del data['host_memory'] self._stats = data -- cgit From 09b795b8d6f0b925dbd4bcd203f471607c42f368 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 9 May 2011 19:46:15 -0400 Subject: got rid of unnecessary imports --- nova/virt/images.py | 9 --------- 1 file changed, 9 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 1eb6f4a5f..8689c0ed3 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,19 +21,10 @@ Handling of VM disk images. """ -import os.path -import shutil -import sys -import time -import urllib2 -import urlparse - from nova import context from nova import flags from nova import log as logging from nova import utils -from nova.auth import manager -from nova.auth import signer FLAGS = flags.FLAGS -- cgit From aa73995f9ba7e4aaaee00ddd0db0b92dcd92dd54 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 10 May 2011 16:11:59 +0000 Subject: Change xenapi's wait_for_task to handle multiple simultaenous queries to fix lp:766404 --- nova/virt/xenapi_conn.py | 86 ++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 46 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 0cabccf08..5dc6b034f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -326,7 +326,6 @@ class XenAPISession(object): "(is the Dom0 disk full?)")) with timeout.Timeout(FLAGS.xenapi_login_timeout, exception): self._session.login_with_password(user, pw) - self.loop = None def get_imported_xenapi(self): """Stubout point. This can be replaced with a mock xenapi module.""" @@ -363,57 +362,52 @@ class XenAPISession(object): def wait_for_task(self, task, id=None): """Return the result of the given task. The task is polled - until it completes. Not re-entrant.""" + until it completes.""" done = event.Event() - self.loop = utils.LoopingCall(self._poll_task, id, task, done) - self.loop.start(FLAGS.xenapi_task_poll_interval, now=True) - rv = done.wait() - self.loop.stop() - return rv - - def _stop_loop(self): - """Stop polling for task to finish.""" - #NOTE(sandy-walsh) Had to break this call out to support unit tests. - if self.loop: - self.loop.stop() + loop = utils.LoopingCall(f=None) + + def _poll_task(): + """Poll the given XenAPI task, and return the result if the + action was completed successfully or not. + """ + try: + name = self._session.xenapi.task.get_name_label(task) + status = self._session.xenapi.task.get_status(task) + if id: + action = dict( + instance_id=int(id), + action=name[0:255], # Ensure action is never > 255 + error=None) + if status == "pending": + return + elif status == "success": + result = self._session.xenapi.task.get_result(task) + LOG.info(_("Task [%(name)s] %(task)s status:" + " success %(result)s") % locals()) + done.send(_parse_xmlrpc_value(result)) + else: + error_info = self._session.xenapi.task.get_error_info(task) + action["error"] = str(error_info) + LOG.warn(_("Task [%(name)s] %(task)s status:" + " %(status)s %(error_info)s") % locals()) + done.send_exception(self.XenAPI.Failure(error_info)) + + if id: + db.instance_action_create(context.get_admin_context(), + action) + except self.XenAPI.Failure, exc: + LOG.warn(exc) + done.send_exception(*sys.exc_info()) + loop.stop() + + loop.f = _poll_task + loop.start(FLAGS.xenapi_task_poll_interval, now=True) + return done.wait() def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" return self.XenAPI.Session(url) - def _poll_task(self, id, task, done): - """Poll the given XenAPI task, and fire the given action if we - get a result. - """ - try: - name = self._session.xenapi.task.get_name_label(task) - status = self._session.xenapi.task.get_status(task) - if id: - action = dict( - instance_id=int(id), - action=name[0:255], # Ensure action is never > 255 - error=None) - if status == "pending": - return - elif status == "success": - result = self._session.xenapi.task.get_result(task) - LOG.info(_("Task [%(name)s] %(task)s status:" - " success %(result)s") % locals()) - done.send(_parse_xmlrpc_value(result)) - else: - error_info = self._session.xenapi.task.get_error_info(task) - action["error"] = str(error_info) - LOG.warn(_("Task [%(name)s] %(task)s status:" - " %(status)s %(error_info)s") % locals()) - done.send_exception(self.XenAPI.Failure(error_info)) - - if id: - db.instance_action_create(context.get_admin_context(), action) - except self.XenAPI.Failure, exc: - LOG.warn(exc) - done.send_exception(*sys.exc_info()) - self._stop_loop() - def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details""" try: -- cgit From 43fa5afac9e5af74e2e3977a5dafd9640d064cf1 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 11 May 2011 15:12:12 +0000 Subject: Abstract out IPv6 address generation to pluggable backends --- nova/virt/libvirt_conn.py | 3 ++- nova/virt/xenapi/vmops.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9780c69a6..4dce3b41f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -60,6 +60,7 @@ from nova import flags from nova import log as logging from nova import utils from nova import vnc +from nova import ipv6 from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state @@ -185,7 +186,7 @@ def _get_network_info(instance): prefix = network['cidr_v6'] mac = instance['mac_address'] return { - 'ip': utils.to_global_ipv6(prefix, mac), + 'ip': ipv6.to_global(prefix, mac), 'netmask': network['netmask_v6'], 'enabled': '1'} diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index fe9a74dd6..0b05e702a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -34,6 +34,7 @@ from nova import log as logging from nova import exception from nova import utils from nova import flags +from nova import ipv6 from nova.auth.manager import AuthManager from nova.compute import power_state @@ -808,7 +809,7 @@ class VMOps(object): def ip6_dict(): return { - "ip": utils.to_global_ipv6(network['cidr_v6'], + "ip": ipv6.to_global(network['cidr_v6'], instance['mac_address']), "netmask": network['netmask_v6'], "enabled": "1"} -- cgit From d2b8350a026e0f00eae7cadbacaa15d4b44331af Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 11 May 2011 21:04:40 +0000 Subject: Implement IPv6 address generation that includes account identifier --- nova/virt/libvirt_conn.py | 3 ++- nova/virt/xenapi/vmops.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index cde864b0d..80e1a1f85 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -185,8 +185,9 @@ def _get_network_info(instance): def ip6_dict(): prefix = network['cidr_v6'] mac = instance['mac_address'] + project_id = instance['project_id'] return { - 'ip': ipv6.to_global(prefix, mac), + 'ip': ipv6.to_global(prefix, mac, project_id), 'netmask': network['netmask_v6'], 'enabled': '1'} diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0b05e702a..cc2b54331 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -810,7 +810,8 @@ class VMOps(object): def ip6_dict(): return { "ip": ipv6.to_global(network['cidr_v6'], - instance['mac_address']), + instance['mac_address'], + instance['project_id']), "netmask": network['netmask_v6'], "enabled": "1"} -- cgit From fd8b9eb204b77da583f1aee4022920367730823f Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Wed, 11 May 2011 17:06:56 -0700 Subject: Fix remote volume code --- nova/virt/xenapi/volume_utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 55c11a4ad..7821a4f7e 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -204,13 +204,16 @@ def _get_volume_id(path_or_id): if isinstance(path_or_id, int): return path_or_id # n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes + # :volume- is for remote volumes + # -volume- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + volume_id = path_or_id[path_or_id.find(':volume-') + 1:] if volume_id == path_or_id: volume_id = path_or_id[path_or_id.find('-volume--') + 1:] volume_id = volume_id.replace('volume--', '') + else: + volume_id = volume_id.replace('volume-', '') + volume_id = volume_id[0:volume_id.find('-')] return int(volume_id) -- cgit From ad3f578a37001957361014c7400dbe2e8ddd0baf Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Thu, 12 May 2011 17:44:07 +0400 Subject: Added network_info into refresh_security_group_rules --- nova/virt/libvirt_conn.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..1e0a25a17 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1612,7 +1612,9 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, security_group_id): + def refresh_security_group_rules(self, + security_group_id, + network_info=None): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -1911,7 +1913,9 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, security_group_id): + def refresh_security_group_rules(self, + security_group_id, + network_info=None): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -2169,15 +2173,19 @@ class IptablesFirewallDriver(FirewallDriver): def refresh_security_group_members(self, security_group): pass - def refresh_security_group_rules(self, security_group): - self.do_refresh_security_group_rules(security_group) + def refresh_security_group_rules(self, security_group, network_info=None): + self.do_refresh_security_group_rules(security_group, network_info) self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, security_group): + def do_refresh_security_group_rules(self, + security_group, + network_info=None): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - self.add_filters_for_instance(instance) + if not network_info: + network_info = _get_network_info(instance) + self.add_filters_for_instance(instance, network_info) def _security_group_chain_name(self, security_group_id): return 'nova-sg-%s' % (security_group_id,) -- cgit From e7662bfcead8df8cc1fc655af6da15dc47777565 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 12 May 2011 13:42:04 -0700 Subject: fix for lp760921. Previously, if tune2fs failed, as it does on windows hosts, kpartx -d also failed to be called which leaves mapped partitions that retain holds on the nbd device. These holds cause the observed errors. --- nova/virt/disk.py | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/disk.py b/nova/virt/disk.py index ddea1a1f7..f8aea1f34 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): else: mapped_device = device - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc - if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found (we can' - ' only inject raw disk images): %s' % - mapped_device) - - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo', 'tune2fs', - '-c', 0, '-i', 0, mapped_device) - - tmpdir = tempfile.mkdtemp() try: - # mount loopback to dir - out, err = utils.execute( - 'sudo', 'mount', mapped_device, tmpdir) - if err: - raise exception.Error(_('Failed to mount filesystem: %s') - % err) - + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) + + tmpdir = tempfile.mkdtemp() try: - inject_data_into_fs(tmpdir, key, net, utils.execute) + # mount loopback to dir + out, err = utils.execute( + 'sudo', 'mount', mapped_device, tmpdir) + if err: + raise exception.Error(_('Failed to mount filesystem: %s') + % err) + + try: + inject_data_into_fs(tmpdir, key, net, utils.execute) + finally: + # unmount device + utils.execute('sudo', 'umount', mapped_device) finally: - # unmount device - utils.execute('sudo', 'umount', mapped_device) + # remove temporary directory + utils.execute('rmdir', tmpdir) finally: - # remove temporary directory - utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions utils.execute('sudo', 'kpartx', '-d', device) -- cgit From 1aad930383fa425b88e59929aa1698e31978eb62 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Thu, 12 May 2011 22:19:52 +0000 Subject: Make sure imports are in alphabetical order --- nova/virt/libvirt_conn.py | 2 +- nova/virt/xenapi/vmops.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 80e1a1f85..6ee23d1df 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -57,10 +57,10 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import ipv6 from nova import log as logging from nova import utils from nova import vnc -from nova import ipv6 from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index cc2b54331..13d7d215b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -28,13 +28,13 @@ import subprocess import tempfile import uuid -from nova import db from nova import context -from nova import log as logging +from nova import db from nova import exception -from nova import utils from nova import flags from nova import ipv6 +from nova import log as logging +from nova import utils from nova.auth.manager import AuthManager from nova.compute import power_state -- cgit From f51bd03c9ce5f4248cb6f10e3ed662ae6ba33ebd Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 17 May 2011 15:49:31 +0000 Subject: Instead of using a temp file with openssl, just write directly to stdin --- nova/virt/xenapi/vmops.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13d7d215b..0074444f8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,7 +25,6 @@ import M2Crypto import os import pickle import subprocess -import tempfile import uuid from nova import context @@ -1163,18 +1162,17 @@ class SimpleDH(object): return mpi def _run_ssl(self, text, which): - base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc ' - '-a -pass pass:%(shared)s -nosalt %(dec_flag)s') + base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' + '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' else: dec_flag = '' - fd, tmpfile = tempfile.mkstemp() - os.close(fd) - file(tmpfile, 'w').write(text) shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) + proc.stdin.write(text) + proc.stdin.close() proc.wait() err = proc.stderr.read() if err: -- cgit From 4d025ef1d2b2b97c13d710cb5080b78e246215bc Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 18 May 2011 11:27:39 -0500 Subject: Added missing xenhost plugin. --- nova/virt/xenapi_conn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index eb572f295..6d828e109 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -169,15 +169,15 @@ class XenAPIConnection(driver.ComputeDriver): def __init__(self, url, user, pw): super(XenAPIConnection, self).__init__() - session = XenAPISession(url, user, pw) - self._vmops = VMOps(session) - self._volumeops = VolumeOps(session) + self._session = XenAPISession(url, user, pw) + self._vmops = VMOps(self._session) + self._volumeops = VolumeOps(self._session) self._host_state = None @property def HostState(self): if not self._host_state: - self._host_state = HostState(self.session) + self._host_state = HostState(self._session) return self._host_state def init_host(self, host): -- cgit From 99bab1b99bf4388a0dba89300c4fb71095681276 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 19 May 2011 16:25:57 -0400 Subject: Moved back templates and fixed pep8 issue. Template move was due to breaking packaging with template moves. That will need to happen in a later merge. --- nova/virt/cpuinfo.xml.template | 9 +++ nova/virt/libvirt.xml.template | 122 +++++++++++++++++++++++++++++++++ nova/virt/libvirt/connection.py | 5 +- nova/virt/libvirt/cpuinfo.xml.template | 9 --- nova/virt/libvirt/libvirt.xml.template | 122 --------------------------------- nova/virt/libvirt/netutils.py | 4 +- 6 files changed, 135 insertions(+), 136 deletions(-) create mode 100644 nova/virt/cpuinfo.xml.template create mode 100644 nova/virt/libvirt.xml.template delete mode 100644 nova/virt/libvirt/cpuinfo.xml.template delete mode 100644 nova/virt/libvirt/libvirt.xml.template (limited to 'nova/virt') diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template new file mode 100644 index 000000000..48842b29d --- /dev/null +++ b/nova/virt/cpuinfo.xml.template @@ -0,0 +1,9 @@ + + $arch + $model + $vendor + +#for $var in $features + +#end for + diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..de2497a76 --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,122 @@ + + ${name} + ${memory_kb} + +#if $type == 'lxc' + #set $disk_prefix = '' + #set $disk_bus = '' + exe + /sbin/init +#else if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $type == 'lxc' + + + + +#else + #if $getVar('rescue', False) + + + + + + + + + + + #else + + + + + + #if $getVar('local', False) + + + + + + #end if + #end if +#end if + +#for $nic in $nics + + + + + + + +#if $getVar('nic.extra_params', False) + ${nic.extra_params} +#end if +#if $getVar('nic.gateway_v6', False) + +#end if + + +#end for + + + + + + + + + + + + + + + + +#if $getVar('vncserver_host', False) + +#end if + + diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 87ba3fec3..94a703954 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -57,7 +57,6 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import ipv6 from nova import log as logging from nova import utils from nova import vnc @@ -85,7 +84,7 @@ flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt/libvirt.xml.template'), + utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', @@ -108,7 +107,7 @@ flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt.firewall.IptablesFirewallDriver', 'Firewall driver (defaults to iptables)') flags.DEFINE_string('cpuinfo_xml_template', - utils.abspath('virt/libvirt/cpuinfo.xml.template'), + utils.abspath('virt/cpuinfo.xml.template'), 'CpuInfo XML Template (Used only live migration now)') flags.DEFINE_string('live_migration_uri', "qemu+tcp://%s/system", diff --git a/nova/virt/libvirt/cpuinfo.xml.template b/nova/virt/libvirt/cpuinfo.xml.template deleted file mode 100644 index 48842b29d..000000000 --- a/nova/virt/libvirt/cpuinfo.xml.template +++ /dev/null @@ -1,9 +0,0 @@ - - $arch - $model - $vendor - -#for $var in $features - -#end for - diff --git a/nova/virt/libvirt/libvirt.xml.template b/nova/virt/libvirt/libvirt.xml.template deleted file mode 100644 index de2497a76..000000000 --- a/nova/virt/libvirt/libvirt.xml.template +++ /dev/null @@ -1,122 +0,0 @@ - - ${name} - ${memory_kb} - -#if $type == 'lxc' - #set $disk_prefix = '' - #set $disk_bus = '' - exe - /sbin/init -#else if $type == 'uml' - #set $disk_prefix = 'ubd' - #set $disk_bus = 'uml' - uml - /usr/bin/linux - /dev/ubda -#else - #if $type == 'xen' - #set $disk_prefix = 'sd' - #set $disk_bus = 'scsi' - linux - /dev/xvda - #else - #set $disk_prefix = 'vd' - #set $disk_bus = 'virtio' - hvm - #end if - #if $getVar('rescue', False) - ${basepath}/kernel.rescue - ${basepath}/ramdisk.rescue - #else - #if $getVar('kernel', None) - ${kernel} - #if $type == 'xen' - ro - #else - root=/dev/vda console=ttyS0 - #end if - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - #else - - #end if - #end if -#end if - - - - - ${vcpus} - -#if $type == 'lxc' - - - - -#else - #if $getVar('rescue', False) - - - - - - - - - - - #else - - - - - - #if $getVar('local', False) - - - - - - #end if - #end if -#end if - -#for $nic in $nics - - - - - - - -#if $getVar('nic.extra_params', False) - ${nic.extra_params} -#end if -#if $getVar('nic.gateway_v6', False) - -#end if - - -#end for - - - - - - - - - - - - - - - - -#if $getVar('vncserver_host', False) - -#end if - - diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index 9225d8929..4d596078a 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -26,6 +26,7 @@ import IPy from nova import context from nova import db from nova import flags +from nova import ipv6 from nova import utils @@ -46,6 +47,7 @@ def get_ip_version(cidr): net = IPy.IP(cidr) return int(net.version()) + def get_network_info(instance): # TODO(adiantum) If we will keep this function # we should cache network_info @@ -93,5 +95,3 @@ def get_network_info(instance): network_info.append((network, mapping)) return network_info - - -- cgit From b2db9895c271825d1a58ade9c6de85ac90f760a7 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 19 May 2011 22:56:23 -0400 Subject: fixed pep8 issue --- nova/virt/images.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 8689c0ed3..02c898fda 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -42,6 +42,7 @@ def fetch(image_id, path, _user, _project): metadata = image_service.get(elevated, image_id, image_file) return metadata + # TODO(vish): xenapi should use the glance client code directly instead # of retrieving the image using this method. def image_url(image): -- cgit From 36a3b9dc172a109e1f17dbc531a574ebf9e37453 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 21:41:44 +0000 Subject: need to strip newline from openssl stdout data --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0074444f8..8c80ce7b1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1177,7 +1177,7 @@ class SimpleDH(object): err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read() + return proc.stdout.read().strip('\n') def encrypt(self, text): return self._run_ssl(text, 'enc') -- cgit From 0acbf6d77f02ca0fa3a11e29a55bbb617c33a816 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 22:13:59 +0000 Subject: DHSimple's decrypt needs to append \n when writing to stdin --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 8c80ce7b1..45b04351d 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1171,7 +1171,7 @@ class SimpleDH(object): shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) - proc.stdin.write(text) + proc.stdin.write(text + '\n') proc.stdin.close() proc.wait() err = proc.stderr.read() -- cgit From e0aa1369d8050f023fee1e60b276d44a6298feb9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 21:09:43 -0700 Subject: instead of the API spawning a greenthread to wait for a host to be picked, the instance to boot, etc for setting the admin password... let's push the admin password down to the scheduler so that compute can just take care of setting the password as a part of the build process. --- nova/virt/xenapi/vmops.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 45b04351d..a16c6a0d8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -202,6 +202,13 @@ class VMOps(object): for path, contents in instance.injected_files: LOG.debug(_("Injecting file path: '%s'") % path) self.inject_file(instance, path, contents) + + def _set_admin_password(): + admin_password = instance.admin_password + if admin_password: + LOG.debug(_("Setting admin password")) + self.set_admin_password(instance, admin_password) + # NOTE(armando): Do we really need to do this in virt? # NOTE(tr3buchet): not sure but wherever we do it, we need to call # reset_network afterwards @@ -214,6 +221,7 @@ class VMOps(object): LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() _inject_files() + _set_admin_password() return True except Exception, exc: LOG.warn(exc) -- cgit From 7387af3ab5a310f7c427f0257e531871f62f398d Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Wed, 25 May 2011 14:57:52 +0000 Subject: Changed the exception type to not require an instance ID --- nova/virt/xenapi/vmops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 45b04351d..aaf5585b1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -253,7 +253,8 @@ class VMOps(object): instance_name = instance_or_vm.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance_obj.id) + raise exception.NotFound(_("No opaque_ref could be determined " + "for '%s'.") % instance_or_vm) return vm_ref def _acquire_bootlock(self, vm): -- cgit From ed582a8b86f81140affd88805ba9989b591577cd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 17:01:20 +0000 Subject: change install_ref.admin_password to instance_ref.admin_pass to match the DB --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a16c6a0d8..e2d453d21 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -204,7 +204,7 @@ class VMOps(object): self.inject_file(instance, path, contents) def _set_admin_password(): - admin_password = instance.admin_password + admin_password = instance.admin_pass if admin_password: LOG.debug(_("Setting admin password")) self.set_admin_password(instance, admin_password) -- cgit From b933f90faecaddf7281455f4824577b586e07f0c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 17:55:51 +0000 Subject: updating admin_pass moved down to compute where the password is actually reset. only update if it succeeds. --- nova/virt/xenapi/vmops.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c9396cffe..be6ef48ea 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -466,6 +466,9 @@ class VMOps(object): # Successful return code from password is '0' if resp_dict['returncode'] != '0': raise RuntimeError(resp_dict['message']) + db.instance_update(context.get_admin_context(), + instance['id'], + dict(admin_pass=new_pass)) return resp_dict['message'] def inject_file(self, instance, path, contents): -- cgit