diff options
| author | Isaku Yamahata <yamahata@valinux.co.jp> | 2011-08-02 11:36:58 +0900 |
|---|---|---|
| committer | Isaku Yamahata <yamahata@valinux.co.jp> | 2011-08-02 11:36:58 +0900 |
| commit | 0ffc3b80490c24ee116f5be070249dcbbed9357b (patch) | |
| tree | 74b5a542119899a9b483288d2fa6a354751d4090 /nova/virt | |
| parent | a52b643b18e1bac18b642ecfd781809eb5612763 (diff) | |
| parent | bdcfaa5b927a096f507fb0f7e2d81989173957f8 (diff) | |
merged with nova trunk
Diffstat (limited to 'nova/virt')
| -rw-r--r-- | nova/virt/driver.py | 19 | ||||
| -rw-r--r-- | nova/virt/fake.py | 10 | ||||
| -rw-r--r-- | nova/virt/hyperv.py | 10 | ||||
| -rw-r--r-- | nova/virt/images.py | 2 | ||||
| -rw-r--r-- | nova/virt/libvirt.xml.template | 24 | ||||
| -rw-r--r-- | nova/virt/libvirt/connection.py | 117 | ||||
| -rw-r--r-- | nova/virt/libvirt/firewall.py | 11 | ||||
| -rw-r--r-- | nova/virt/libvirt/netutils.py | 10 | ||||
| -rw-r--r-- | nova/virt/libvirt/vif.py | 134 | ||||
| -rw-r--r-- | nova/virt/vif.py | 30 | ||||
| -rw-r--r-- | nova/virt/vmwareapi/fake.py | 10 | ||||
| -rw-r--r-- | nova/virt/vmwareapi/network_utils.py | 29 | ||||
| -rw-r--r-- | nova/virt/vmwareapi/vif.py | 95 | ||||
| -rw-r--r-- | nova/virt/vmwareapi/vm_util.py | 33 | ||||
| -rw-r--r-- | nova/virt/vmwareapi/vmops.py | 48 | ||||
| -rw-r--r-- | nova/virt/vmwareapi_conn.py | 16 | ||||
| -rw-r--r-- | nova/virt/xenapi/vif.py | 140 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 73 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 88 | ||||
| -rw-r--r-- | nova/virt/xenapi_conn.py | 32 |
20 files changed, 705 insertions, 226 deletions
diff --git a/nova/virt/driver.py b/nova/virt/driver.py index b2406e306..561115e3f 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -92,7 +92,7 @@ class ComputeDriver(object): """Launch a VM for the specified instance""" raise NotImplementedError() - def destroy(self, instance, cleanup=True): + def destroy(self, instance, network_info, cleanup=True): """Destroy (shutdown and delete) the specified instance. The given parameter is an instance of nova.compute.service.Instance, @@ -108,7 +108,7 @@ class ComputeDriver(object): """ raise NotImplementedError() - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot specified VM""" raise NotImplementedError() @@ -149,11 +149,12 @@ class ComputeDriver(object): """Create snapshot from a running VM instance.""" raise NotImplementedError() - def finish_resize(self, instance, disk_info): + def finish_migration(self, instance, disk_info, network_info, + resize_instance): """Completes a resize, turning on the migrated instance""" raise NotImplementedError() - def revert_resize(self, instance): + def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" raise NotImplementedError() @@ -173,11 +174,11 @@ class ComputeDriver(object): """resume the specified instance""" raise NotImplementedError() - def rescue(self, instance, callback): + def rescue(self, instance, callback, network_info): """Rescue the specified instance""" raise NotImplementedError() - def unrescue(self, instance, callback): + def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" raise NotImplementedError() @@ -251,7 +252,7 @@ class ComputeDriver(object): """ raise NotImplementedError() - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() @@ -280,3 +281,7 @@ class ComputeDriver(object): def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" raise NotImplementedError() + + def plug_vifs(self, instance, network_info): + """Plugs in VIFs to networks.""" + raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 48a03dac8..4957fbaf7 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -167,7 +167,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def reboot(self, instance): + def reboot(self, instance, network_info): """ Reboot the specified instance. @@ -240,13 +240,13 @@ class FakeConnection(driver.ComputeDriver): """ pass - def rescue(self, instance): + def rescue(self, instance, callback, network_info): """ Rescue the specified instance. """ pass - def unrescue(self, instance): + def unrescue(self, instance, callback, network_info): """ Unrescue the specified instance. """ @@ -293,7 +293,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def destroy(self, instance): + def destroy(self, instance, network_info): key = instance.name if key in self.instances: del self.instances[key] @@ -499,7 +499,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info=None): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index f0ce71392..a97446c1a 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -66,7 +66,6 @@ import time from nova import exception from nova import flags from nova import log as logging -from nova.auth import manager from nova.compute import power_state from nova.virt import driver from nova.virt import images @@ -145,13 +144,12 @@ class HyperVConnection(driver.ComputeDriver): if vm is not None: raise exception.InstanceExists(name=instance.name) - user = manager.AuthManager().get_user(instance['user_id']) - project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) - images.fetch(instance['image_ref'], vhdfile, user, project) + images.fetch(instance['image_ref'], vhdfile, + instance['user_id'], instance['project_id']) try: self._create_vm(instance) @@ -368,14 +366,14 @@ class HyperVConnection(driver.ComputeDriver): wmi_obj.Properties_.Item(prop).Value return newinst - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot the specified instance.""" vm = self._lookup(instance.name) if vm is None: raise exception.InstanceNotFound(instance_id=instance.id) self._set_vm_state(instance.name, 'Reboot') - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy the VM. Also destroy the associated VHD disk files""" LOG.debug(_("Got request to destroy vm %s"), instance.name) vm = self._lookup(instance.name) diff --git a/nova/virt/images.py b/nova/virt/images.py index 40bf6107c..2e9fca3d6 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -33,7 +33,7 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.virt.images') -def fetch(image_href, path, _user, _project): +def fetch(image_href, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index f7cf306bc..210e2b0fb 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -95,9 +95,13 @@ </disk> #end if #for $vol in $volumes - <disk type='block'> + <disk type='${vol.type}'> <driver type='raw'/> + #if $vol.type == 'network' + <source protocol='${vol.protocol}' name='${vol.name}'/> + #else <source dev='${vol.device_path}'/> + #end if <target dev='${vol.mount_device}' bus='${disk_bus}'/> </disk> #end for @@ -105,6 +109,22 @@ #end if #for $nic in $nics + #if $vif_type == 'ethernet' + <interface type='ethernet'> + <target dev='${nic.name}' /> + <mac address='${nic.mac_address}' /> + <script path='${nic.script}' /> + </interface> + #else if $vif_type == '802.1Qbh' + <interface type='direct'> + <mac address='${nic.mac_address}'/> + <source dev='${nic.device_name}' mode='private'/> + <virtualport type='802.1Qbh'> + <parameters profileid='${nic.profile_name}'/> + </virtualport> + <model type='virtio'/> + </interface> + #else <interface type='bridge'> <source bridge='${nic.bridge_name}'/> <mac address='${nic.mac_address}'/> @@ -120,6 +140,8 @@ #end if </filterref> </interface> + #end if + #end for <!-- The order is significant here. File must be defined first --> <serial type="file"> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index cf013df30..03ab83cc0 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -124,6 +124,11 @@ flags.DEFINE_string('qemu_img', 'qemu-img', 'binary to use for qemu-img commands') flags.DEFINE_bool('start_guests_on_host_boot', False, 'Whether to restart guests when the host reboots') +flags.DEFINE_string('libvirt_vif_type', 'bridge', + 'Type of VIF to create.') +flags.DEFINE_string('libvirt_vif_driver', + 'nova.virt.libvirt.vif.LibvirtBridgeDriver', + 'The libvirt VIF driver to configure the VIFs.') def get_connection(read_only): @@ -166,6 +171,7 @@ class LibvirtConnection(driver.ComputeDriver): fw_class = utils.import_class(FLAGS.firewall_driver) self.firewall_driver = fw_class(get_connection=self._get_connection) + self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver) def init_host(self, host): # Adopt existing VM's running here @@ -257,7 +263,12 @@ class LibvirtConnection(driver.ComputeDriver): infos.append(info) return infos - def destroy(self, instance, cleanup=True): + def plug_vifs(self, instance, network_info): + """Plugin VIFs into networks.""" + for (network, mapping) in network_info: + self.vif_driver.plug(instance, network, mapping) + + def destroy(self, instance, network_info, cleanup=True): instance_name = instance['name'] try: @@ -301,6 +312,9 @@ class LibvirtConnection(driver.ComputeDriver): locals()) raise + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def _wait_for_destroy(): """Called at an interval until the VM is gone.""" instance_name = instance['name'] @@ -315,7 +329,8 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_destroy) timer.start(interval=0.5, now=True) - self.firewall_driver.unfilter_instance(instance) + self.firewall_driver.unfilter_instance(instance, + network_info=network_info) if cleanup: self._cleanup(instance) @@ -336,24 +351,20 @@ class LibvirtConnection(driver.ComputeDriver): def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] - if device_path.startswith('/dev/'): + (type, protocol, name) = \ + self._get_volume_device_info(device_path) + if type == 'block': xml = """<disk type='block'> <driver name='qemu' type='raw'/> <source dev='%s'/> <target dev='%s' bus='virtio'/> </disk>""" % (device_path, mount_device) - elif ':' in device_path: - (protocol, name) = device_path.split(':') + elif type == 'network': xml = """<disk type='network'> <driver name='qemu' type='raw'/> <source protocol='%s' name='%s'/> <target dev='%s' bus='virtio'/> - </disk>""" % (protocol, - name, - mount_device) - else: - raise exception.InvalidDevicePath(path=device_path) - + </disk>""" % (protocol, name, mount_device) virt_dom.attachDevice(xml) def _get_disk_xml(self, xml, device): @@ -462,7 +473,7 @@ class LibvirtConnection(driver.ComputeDriver): shutil.rmtree(temp_dir) @exception.wrap_exception() - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot a virtual machine, given an instance reference. This method actually destroys and re-creates the domain to ensure the @@ -477,7 +488,8 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... - self.destroy(instance, False) + self.destroy(instance, network_info, cleanup=False) + self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance) self.firewall_driver.prepare_instance_filter(instance) self._create_new_domain(xml) @@ -527,7 +539,7 @@ class LibvirtConnection(driver.ComputeDriver): dom.create() @exception.wrap_exception() - def rescue(self, instance): + def rescue(self, instance, callback, network_info): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the @@ -536,7 +548,7 @@ class LibvirtConnection(driver.ComputeDriver): data recovery. """ - self.destroy(instance, False) + self.destroy(instance, network_info, cleanup=False) xml = self.to_xml(instance, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, @@ -565,14 +577,14 @@ class LibvirtConnection(driver.ComputeDriver): return timer.start(interval=0.5, now=True) @exception.wrap_exception() - def unrescue(self, instance): + def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. Because reboot destroys and re-creates instances, unresue should simply call reboot. """ - self.reboot(instance) + self.reboot(instance, network_info) @exception.wrap_exception() def poll_rescued_instances(self, timeout): @@ -757,9 +769,9 @@ class LibvirtConnection(driver.ComputeDriver): else: utils.execute('cp', base, target) - def _fetch_image(self, target, image_id, user, project, size=None): + def _fetch_image(self, target, image_id, user_id, project_id, size=None): """Grab image and optionally attempt to resize it""" - images.fetch(image_id, target, user, project) + images.fetch(image_id, target, user_id, project_id) if size: disk.extend(target, size) @@ -806,9 +818,6 @@ class LibvirtConnection(driver.ComputeDriver): os.close(os.open(basepath('console.log', ''), os.O_CREAT | os.O_WRONLY, 0660)) - user = manager.AuthManager().get_user(inst['user_id']) - project = manager.AuthManager().get_project(inst['project_id']) - if not disk_images: disk_images = {'image_id': inst['image_ref'], 'kernel_id': inst['kernel_id'], @@ -820,16 +829,16 @@ class LibvirtConnection(driver.ComputeDriver): target=basepath('kernel'), fname=fname, image_id=disk_images['kernel_id'], - user=user, - project=project) + user_id=inst['user_id'], + project_id=inst['project_id']) if disk_images['ramdisk_id']: fname = '%08x' % int(disk_images['ramdisk_id']) self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), fname=fname, image_id=disk_images['ramdisk_id'], - user=user, - project=project) + user_id=inst['user_id'], + project_id=inst['project_id']) root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size @@ -847,8 +856,8 @@ class LibvirtConnection(driver.ComputeDriver): fname=root_fname, cow=FLAGS.use_cow_images, image_id=disk_images['image_id'], - user=user, - project=project, + user_id=inst['user_id'], + project_id=inst['project_id'], size=size) local_gb = inst['local_gb'] @@ -963,39 +972,6 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'uml': utils.execute('sudo', 'chown', 'root', basepath('disk')) - def _get_nic_for_xml(self, network, mapping): - # Assume that the gateway also acts as the dhcp server. - gateway6 = mapping.get('gateway6') - mac_id = mapping['mac'].replace(':', '') - - if FLAGS.allow_project_net_traffic: - template = "<parameter name=\"%s\"value=\"%s\" />\n" - net, mask = netutils.get_net_and_mask(network['cidr']) - values = [("PROJNET", net), ("PROJMASK", mask)] - if FLAGS.use_ipv6: - net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( - network['cidr_v6']) - values.extend([("PROJNETV6", net_v6), - ("PROJMASKV6", prefixlen_v6)]) - - extra_params = "".join([template % value for value in values]) - else: - extra_params = "\n" - - result = { - 'id': mac_id, - 'bridge_name': network['bridge'], - 'mac_address': mapping['mac'], - 'ip_address': mapping['ips'][0]['ip'], - 'dhcp_server': mapping['dhcp_server'], - 'extra_params': extra_params, - } - - if gateway6: - result['gateway6'] = gateway6 + "/128" - - return result - if FLAGS.libvirt_type == 'uml': _disk_prefix = 'ubd' elif FLAGS.libvirt_type == 'xen': @@ -1026,6 +1002,15 @@ class LibvirtConnection(driver.ComputeDriver): LOG.debug(_("block_device_list %s"), block_device_list) return block_device.strip_dev(mount_device) in block_device_list + def _get_volume_device_info(self, device_path): + if device_path.startswith('/dev/'): + return ('block', None, None) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + return ('network', protocol, name) + else: + raise exception.InvalidDevicePath(path=device_path) + def _prepare_xml_info(self, instance, rescue=False, network_info=None, block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( @@ -1037,7 +1022,7 @@ class LibvirtConnection(driver.ComputeDriver): nics = [] for (network, mapping) in network_info: - nics.append(self._get_nic_for_xml(network, mapping)) + nics.append(self.vif_driver.plug(instance, network, mapping)) # FIXME(vish): stick this in db inst_type_id = instance['instance_type_id'] inst_type = instance_types.get_instance_type(inst_type_id) @@ -1049,6 +1034,8 @@ class LibvirtConnection(driver.ComputeDriver): for vol in block_device_mapping: vol['mount_device'] = block_device.strip_dev(vol['mount_device']) + (vol['type'], vol['protocol'], vol['name']) = \ + self._get_volume_device_info(vol['device_path']) ebs_root = self._volume_in_mapping(self.default_root_device, block_device_info) @@ -1077,6 +1064,7 @@ class LibvirtConnection(driver.ComputeDriver): 'rescue': rescue, 'disk_prefix': self._disk_prefix, 'driver_type': driver_type, + 'vif_type': FLAGS.libvirt_vif_type, 'nics': nics, 'ebs_root': ebs_root, 'local_device': local_device, @@ -1667,9 +1655,10 @@ class LibvirtConnection(driver.ComputeDriver): timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info): """See comments of same method in firewall_driver.""" - self.firewall_driver.unfilter_instance(instance_ref) + self.firewall_driver.unfilter_instance(instance_ref, + network_info=network_info) def update_host_status(self): """See xenapi_conn.py implementation.""" diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 379197398..9ce57b6c9 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -46,7 +46,7 @@ class FirewallDriver(object): At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): """Stop filtering instance""" raise NotImplementedError() @@ -300,9 +300,10 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): """Clear out the nwfilter rules.""" - network_info = netutils.get_network_info(instance) + if not network_info: + network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -542,11 +543,11 @@ class IptablesFirewallDriver(FirewallDriver): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance) + self.nwfilter.unfilter_instance(instance, network_info) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index 041eacb2d..a8e88fc07 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -25,6 +25,7 @@ import netaddr from nova import context from nova import db +from nova import exception from nova import flags from nova import ipv6 from nova import utils @@ -55,10 +56,12 @@ def get_network_info(instance): # we should cache network_info admin_context = context.get_admin_context() - fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) + try: + fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) + except exception.FixedIpNotFoundForInstance: + fixed_ips = [] + vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) - networks = db.network_get_all_by_instance(admin_context, - instance['id']) flavor = db.instance_type_get(admin_context, instance['instance_type_id']) network_info = [] @@ -89,6 +92,7 @@ def get_network_info(instance): 'label': network['label'], 'gateway': network['gateway'], 'broadcast': network['broadcast'], + 'dhcp_server': network['gateway'], 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], 'dns': [], diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py new file mode 100644 index 000000000..eef582fac --- /dev/null +++ b/nova/virt/libvirt/vif.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# Copyright (C) 2011 Nicira, Inc +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for libvirt.""" + +from nova import flags +from nova import log as logging +from nova.network import linux_net +from nova.virt.libvirt import netutils +from nova import utils +from nova.virt.vif import VIFDriver + +LOG = logging.getLogger('nova.virt.libvirt.vif') + +FLAGS = flags.FLAGS + +flags.DEFINE_string('libvirt_ovs_bridge', 'br-int', + 'Name of Integration Bridge used by Open vSwitch') + + +class LibvirtBridgeDriver(VIFDriver): + """VIF driver for Linux bridge.""" + + def _get_configurations(self, network, mapping): + """Get a dictionary of VIF configurations for bridge type.""" + # Assume that the gateway also acts as the dhcp server. + gateway6 = mapping.get('gateway6') + mac_id = mapping['mac'].replace(':', '') + + if FLAGS.allow_project_net_traffic: + template = "<parameter name=\"%s\"value=\"%s\" />\n" + net, mask = netutils.get_net_and_mask(network['cidr']) + values = [("PROJNET", net), ("PROJMASK", mask)] + if FLAGS.use_ipv6: + net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( + network['cidr_v6']) + values.extend([("PROJNETV6", net_v6), + ("PROJMASKV6", prefixlen_v6)]) + + extra_params = "".join([template % value for value in values]) + else: + extra_params = "\n" + + result = { + 'id': mac_id, + 'bridge_name': network['bridge'], + 'mac_address': mapping['mac'], + 'ip_address': mapping['ips'][0]['ip'], + 'dhcp_server': mapping['dhcp_server'], + 'extra_params': extra_params, + } + + if gateway6: + result['gateway6'] = gateway6 + "/128" + + return result + + def plug(self, instance, network, mapping): + """Ensure that the bridge exists, and add VIF to it.""" + if (not network.get('multi_host') and + mapping.get('should_create_bridge')): + if mapping.get('should_create_vlan'): + LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), + {'vlan': network['vlan'], + 'bridge': network['bridge']}) + linux_net.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface']) + else: + LOG.debug(_("Ensuring bridge %s"), network['bridge']) + linux_net.ensure_bridge(network['bridge'], + network['bridge_interface']) + + return self._get_configurations(network, mapping) + + def unplug(self, instance, network, mapping): + """No manual unplugging required.""" + pass + + +class LibvirtOpenVswitchDriver(VIFDriver): + """VIF driver for Open vSwitch.""" + + def plug(self, instance, network, mapping): + vif_id = str(instance['id']) + "-" + str(network['id']) + dev = "tap-%s" % vif_id + iface_id = "nova-" + vif_id + if not linux_net._device_exists(dev): + utils.execute('sudo', 'ip', 'tuntap', 'add', dev, 'mode', 'tap') + utils.execute('sudo', 'ip', 'link', 'set', dev, 'up') + utils.execute('sudo', 'ovs-vsctl', '--', '--may-exist', 'add-port', + FLAGS.libvirt_ovs_bridge, dev, + '--', 'set', 'Interface', dev, + "external-ids:iface-id=%s" % iface_id, + '--', 'set', 'Interface', dev, + "external-ids:iface-status=active", + '--', 'set', 'Interface', dev, + "external-ids:attached-mac=%s" % mapping['mac']) + + result = { + 'script': '', + 'name': dev, + 'mac_address': mapping['mac']} + return result + + def unplug(self, instance, network, mapping): + """Unplug the VIF from the network by deleting the port from + the bridge.""" + vif_id = str(instance['id']) + "-" + str(network['id']) + dev = "tap-%s" % vif_id + try: + utils.execute('sudo', 'ovs-vsctl', 'del-port', + network['bridge'], dev) + utils.execute('sudo', 'ip', 'link', 'delete', dev) + except: + LOG.warning(_("Failed while unplugging vif of instance '%s'"), + instance['name']) + raise diff --git a/nova/virt/vif.py b/nova/virt/vif.py new file mode 100644 index 000000000..b78689957 --- /dev/null +++ b/nova/virt/vif.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF module common to all virt layers.""" + + +class VIFDriver(object): + """Abstract class that defines generic interfaces for all VIF drivers.""" + + def plug(self, instance, network, mapping): + """Plug VIF into network.""" + raise NotImplementedError() + + def unplug(self, instance, network, mapping): + """Unplug VIF from network.""" + raise NotImplementedError() diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 7370684bd..4c62d18bb 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -402,6 +402,16 @@ def _remove_file(file_path): lst_files.remove(file)
+def fake_plug_vifs(*args, **kwargs):
+ """Fakes plugging vifs."""
+ pass
+
+
+def fake_get_network(*args, **kwargs):
+ """Fake get network."""
+ return [{'type': 'fake'}]
+
+
def fake_fetch_image(image, instance, **kwargs):
"""Fakes fetch image call. Just adds a reference to the db for the file."""
ds_name = kwargs.get("datastore_name")
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_utils.py index e77842535..ec3b93fe7 100644 --- a/nova/virt/vmwareapi/network_utils.py +++ b/nova/virt/vmwareapi/network_utils.py @@ -45,10 +45,31 @@ def get_network_with_the_name(session, network_name="vmnet0"): networks = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Network", vm_networks, ["summary.name"])
- for network in networks:
- if network.propSet[0].val == network_name:
- return network.obj
- return None
+ network_obj = {}
+ LOG.warn(vm_networks)
+ for network in vm_networks:
+ # Get network properties
+ if network._type == 'DistributedVirtualPortgroup':
+ props = session._call_method(vim_util,
+ "get_dynamic_property", network,
+ "DistributedVirtualPortgroup", "config")
+ # NOTE(asomya): This only works on ESXi if the port binding is
+ # set to ephemeral
+ if props.name == network_name:
+ network_obj['type'] = 'DistributedVirtualPortgroup'
+ network_obj['dvpg'] = props.key
+ network_obj['dvsw'] = props.distributedVirtualSwitch.value
+ else:
+ props = session._call_method(vim_util,
+ "get_dynamic_property", network,
+ "Network", "summary.name")
+ if props == network_name:
+ network_obj['type'] = 'Network'
+ network_obj['name'] = network_name
+ if (len(network_obj) > 0):
+ return network_obj
+ else:
+ return None
def get_vswitch_for_vlan_interface(session, vlan_interface):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py new file mode 100644 index 000000000..b3e43b209 --- /dev/null +++ b/nova/virt/vmwareapi/vif.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for VMWare.""" + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.vif import VIFDriver +from nova.virt.vmwareapi_conn import VMWareAPISession +from nova.virt.vmwareapi import network_utils + + +LOG = logging.getLogger("nova.virt.vmwareapi.vif") + +FLAGS = flags.FLAGS + + +class VMWareVlanBridgeDriver(VIFDriver): + """VIF Driver to setup bridge/VLAN networking using VMWare API.""" + + def plug(self, instance, network, mapping): + """Create a vlan and bridge unless they already exist.""" + vlan_num = network['vlan'] + bridge = network['bridge'] + bridge_interface = network['bridge_interface'] + + # Open vmwareapi session + host_ip = FLAGS.vmwareapi_host_ip + host_username = FLAGS.vmwareapi_host_username + host_password = FLAGS.vmwareapi_host_password + if not host_ip or host_username is None or host_password is None: + raise Exception(_('Must specify vmwareapi_host_ip, ' + 'vmwareapi_host_username ' + 'and vmwareapi_host_password to use ' + 'connection_type=vmwareapi')) + session = VMWareAPISession(host_ip, host_username, host_password, + FLAGS.vmwareapi_api_retry_count) + vlan_interface = bridge_interface + # Check if the vlan_interface physical network adapter exists on the + # host. + if not network_utils.check_if_vlan_interface_exists(session, + vlan_interface): + raise exception.NetworkAdapterNotFound(adapter=vlan_interface) + + # Get the vSwitch associated with the Physical Adapter + vswitch_associated = network_utils.get_vswitch_for_vlan_interface( + session, vlan_interface) + if vswitch_associated is None: + raise exception.SwicthNotFoundForNetworkAdapter( + adapter=vlan_interface) + # Check whether bridge already exists and retrieve the the ref of the + # network whose name_label is "bridge" + network_ref = network_utils.get_network_with_the_name(session, bridge) + if network_ref is None: + # Create a port group on the vSwitch associated with the + # vlan_interface corresponding physical network adapter on the ESX + # host. + network_utils.create_port_group(session, bridge, + vswitch_associated, vlan_num) + else: + # Get the vlan id and vswitch corresponding to the port group + pg_vlanid, pg_vswitch = \ + network_utils.get_vlanid_and_vswitch_for_portgroup(session, + bridge) + + # Check if the vswitch associated is proper + if pg_vswitch != vswitch_associated: + raise exception.InvalidVLANPortGroup( + bridge=bridge, expected=vswitch_associated, + actual=pg_vswitch) + + # Check if the vlan id is proper for the port group + if pg_vlanid != vlan_num: + raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, + pgroup=pg_vlanid) + + def unplug(self, instance, network, mapping): + pass diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 1638149f1..82b5f7214 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -40,7 +40,7 @@ def split_datastore_path(datastore_path): def get_vm_create_spec(client_factory, instance, data_store_name,
network_name="vmnet0",
- os_type="otherGuest"):
+ os_type="otherGuest", network_ref=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.name
@@ -93,7 +93,8 @@ def create_controller_spec(client_factory, key): return virtual_device_config
-def create_network_spec(client_factory, network_name, mac_address):
+def create_network_spec(client_factory, network_name, mac_address,
+ network_ref=None):
"""
Builds a config spec for the addition of a new network
adapter to the VM.
@@ -105,9 +106,25 @@ def create_network_spec(client_factory, network_name, mac_address): # Get the recommended card type for the VM based on the guest OS of the VM
net_device = client_factory.create('ns0:VirtualPCNet32')
- backing = \
- client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
- backing.deviceName = network_name
+ # NOTE(asomya): Only works on ESXi if the portgroup binding is set to
+ # ephemeral. Invalid configuration if set to static and the NIC does
+ # not come up on boot if set to dynamic.
+ backing = None
+ if (network_ref and
+ network_ref['type'] == "DistributedVirtualPortgroup"):
+ backing_name = \
+ 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ backing = \
+ client_factory.create(backing_name)
+ portgroup = \
+ client_factory.create('ns0:DistributedVirtualSwitchPortConnection')
+ portgroup.switchUuid = network_ref['dvsw']
+ portgroup.portgroupKey = network_ref['dvpg']
+ backing.port = portgroup
+ else:
+ backing = \
+ client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
+ backing.deviceName = network_name
connectable_spec = \
client_factory.create('ns0:VirtualDeviceConnectInfo')
@@ -278,9 +295,11 @@ def get_dummy_vm_create_spec(client_factory, name, data_store_name): return config_spec
-def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, gateway):
+def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask,
+ gateway, broadcast, dns):
"""Builds the machine id change config spec."""
- machine_id_str = "%s;%s;%s;%s" % (mac, ip_addr, netmask, gateway)
+ machine_id_str = "%s;%s;%s;%s;%s;%s" % (mac, ip_addr, netmask,
+ gateway, broadcast, dns)
virtual_machine_config_spec = \
client_factory.create('ns0:VirtualMachineConfigSpec')
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 94d9e6226..1ee8fa1c0 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -31,6 +31,7 @@ from nova import db from nova import exception
from nova import flags
from nova import log as logging
+from nova import utils
from nova.compute import power_state
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -38,6 +39,10 @@ from nova.virt.vmwareapi import vmware_images from nova.virt.vmwareapi import network_utils
FLAGS = flags.FLAGS
+flags.DEFINE_string('vmware_vif_driver',
+ 'nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
+ 'The VMWare VIF driver to configure the VIFs.')
+
LOG = logging.getLogger("nova.virt.vmwareapi.vmops")
VMWARE_POWER_STATES = {
@@ -52,6 +57,7 @@ class VMWareVMOps(object): def __init__(self, session):
"""Initializer."""
self._session = session
+ self._vif_driver = utils.import_object(FLAGS.vmware_vif_driver)
def _wait_with_callback(self, instance_id, task, callback):
"""Waits for the task to finish and does a callback after."""
@@ -83,7 +89,7 @@ class VMWareVMOps(object): LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance):
+ def spawn(self, instance, network_info):
"""
Creates a VM instance.
@@ -116,8 +122,10 @@ class VMWareVMOps(object): net_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=net_name)
+ return network_ref
- _check_if_network_bridge_exists()
+ self.plug_vifs(instance, network_info)
+ network_obj = _check_if_network_bridge_exists()
def _get_datastore_ref():
"""Get the datastore list and choose the first local storage."""
@@ -175,8 +183,10 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
# Get the create vm config spec
- config_spec = vm_util.get_vm_create_spec(client_factory, instance,
- data_store_name, net_name, os_type)
+ config_spec = vm_util.get_vm_create_spec(
+ client_factory, instance,
+ data_store_name, net_name, os_type,
+ network_obj)
def _execute_create_vm():
"""Create VM on ESX host."""
@@ -472,11 +482,14 @@ class VMWareVMOps(object): _clean_temp_data()
- def reboot(self, instance):
+ def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
+
+ self.plug_vifs(instance, network_info)
+
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -514,7 +527,7 @@ class VMWareVMOps(object): self._session._wait_for_task(instance.id, reset_task)
LOG.debug(_("Did hard reboot of VM %s") % instance.name)
- def destroy(self, instance):
+ def destroy(self, instance, network_info):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
@@ -560,6 +573,8 @@ class VMWareVMOps(object): LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep))
+ self._unplug_vifs(instance, network_info)
+
# Delete the folder holding the VM related content on
# the datastore.
try:
@@ -718,20 +733,25 @@ class VMWareVMOps(object): net_mask = network["netmask"]
gateway = network["gateway"]
+ broadcast = network["broadcast"]
+ # TODO(vish): add support for dns2
+ dns = network["dns1"]
+
addresses = db.instance_get_fixed_addresses(admin_context,
instance['id'])
ip_addr = addresses[0] if addresses else None
- machine_id_chanfge_spec = \
+ machine_id_change_spec = \
vm_util.get_machine_id_change_spec(client_factory, mac_address,
- ip_addr, net_mask, gateway)
+ ip_addr, net_mask, gateway,
+ broadcast, dns)
LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
({'name': instance.name,
'ip_addr': ip_addr}))
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
- spec=machine_id_chanfge_spec)
+ spec=machine_id_change_spec)
self._session._wait_for_task(instance.id, reconfig_task)
LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
@@ -784,3 +804,13 @@ class VMWareVMOps(object): if vm.propSet[0].val == vm_name:
return vm.obj
return None
+
+ def plug_vifs(self, instance, network_info):
+ """Plug VIFs into networks."""
+ for (network, mapping) in network_info:
+ self._vif_driver.plug(instance, network, mapping)
+
+ def _unplug_vifs(self, instance, network_info):
+ """Unplug VIFs from networks."""
+ for (network, mapping) in network_info:
+ self._vif_driver.unplug(instance, network, mapping)
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index d80e14931..ce57847b2 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -124,21 +124,21 @@ class VMWareESXConnection(driver.ComputeDriver): """List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info=None, block_device_mapping=None):
+ def spawn(self, instance, network_info, block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance)
+ self._vmops.spawn(instance, network_info)
def snapshot(self, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(instance, name)
- def reboot(self, instance):
+ def reboot(self, instance, network_info):
"""Reboot VM instance."""
- self._vmops.reboot(instance)
+ self._vmops.reboot(instance, network_info)
- def destroy(self, instance):
+ def destroy(self, instance, network_info):
"""Destroy VM instance."""
- self._vmops.destroy(instance)
+ self._vmops.destroy(instance, network_info)
def pause(self, instance, callback):
"""Pause VM instance."""
@@ -194,6 +194,10 @@ class VMWareESXConnection(driver.ComputeDriver): """Sets the specified host's ability to accept new instances."""
pass
+ def plug_vifs(self, instance, network_info):
+ """Plugs in VIFs to networks."""
+ self._vmops.plug_vifs(instance, network_info)
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py new file mode 100644 index 000000000..527602243 --- /dev/null +++ b/nova/virt/xenapi/vif.py @@ -0,0 +1,140 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# Copyright (C) 2011 Nicira, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for XenAPI.""" + +from nova import flags +from nova import log as logging +from nova.virt.vif import VIFDriver +from nova.virt.xenapi.network_utils import NetworkHelper + +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_ovs_integration_bridge', 'xapi1', + 'Name of Integration Bridge used by Open vSwitch') + +LOG = logging.getLogger("nova.virt.xenapi.vif") + + +class XenAPIBridgeDriver(VIFDriver): + """VIF Driver for XenAPI that uses XenAPI to create Networks.""" + + def plug(self, xenapi_session, vm_ref, instance, device, network, + network_mapping): + if network_mapping.get('should_create_vlan'): + network_ref = self.ensure_vlan_bridge(xenapi_session, network) + else: + network_ref = NetworkHelper.find_network_with_bridge( + xenapi_session, network['bridge']) + rxtx_cap = network_mapping.pop('rxtx_cap') + vif_rec = {} + vif_rec['device'] = str(device) + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = network_mapping['mac'] + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else '' + vif_rec['qos_algorithm_params'] = \ + {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {} + return vif_rec + + def ensure_vlan_bridge(self, xenapi_session, network): + """Ensure that a VLAN bridge exists""" + + vlan_num = network['vlan'] + bridge = network['bridge'] + bridge_interface = network['bridge_interface'] + # Check whether bridge already exists + # Retrieve network whose name_label is "bridge" + network_ref = NetworkHelper.find_network_with_name_label( + xenapi_session, bridge) + if network_ref is None: + # If bridge does not exists + # 1 - create network + description = 'network for nova bridge %s' % bridge + network_rec = {'name_label': bridge, + 'name_description': description, + 'other_config': {}} + network_ref = xenapi_session.call_xenapi('network.create', + network_rec) + # 2 - find PIF for VLAN NOTE(salvatore-orlando): using double + # quotes inside single quotes as xapi filter only support + # tokens in double quotes + expr = 'field "device" = "%s" and \ + field "VLAN" = "-1"' % bridge_interface + pifs = xenapi_session.call_xenapi('PIF.get_all_records_where', + expr) + pif_ref = None + # Multiple PIF are ok: we are dealing with a pool + if len(pifs) == 0: + raise Exception(_('Found no PIF for device %s') % \ + bridge_interface) + for pif_ref in pifs.keys(): + xenapi_session.call_xenapi('VLAN.create', + pif_ref, + str(vlan_num), + network_ref) + else: + # Check VLAN tag is appropriate + network_rec = xenapi_session.call_xenapi('network.get_record', + network_ref) + # Retrieve PIFs from network + for pif_ref in network_rec['PIFs']: + # Retrieve VLAN from PIF + pif_rec = xenapi_session.call_xenapi('PIF.get_record', + pif_ref) + pif_vlan = int(pif_rec['VLAN']) + # Raise an exception if VLAN != vlan_num + if pif_vlan != vlan_num: + raise Exception(_( + "PIF %(pif_rec['uuid'])s for network " + "%(bridge)s has VLAN id %(pif_vlan)d. " + "Expected %(vlan_num)d") % locals()) + + return network_ref + + def unplug(self, instance, network, mapping): + pass + + +class XenAPIOpenVswitchDriver(VIFDriver): + """VIF driver for Open vSwitch with XenAPI.""" + + def plug(self, xenapi_session, vm_ref, instance, device, network, + network_mapping): + # with OVS model, always plug into an OVS integration bridge + # that is already created + network_ref = NetworkHelper.find_network_with_bridge(xenapi_session, + FLAGS.xenapi_ovs_integration_bridge) + vif_rec = {} + vif_rec['device'] = str(device) + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = network_mapping['mac'] + vif_rec['MTU'] = '1500' + vif_id = "nova-" + str(instance['id']) + "-" + str(network['id']) + vif_rec['qos_algorithm_type'] = "" + vif_rec['qos_algorithm_params'] = {} + # OVS on the hypervisor monitors this key and uses it to + # set the iface-id attribute + vif_rec['other_config'] = {"nicira-iface-id": vif_id} + return vif_rec + + def unplug(self, instance, network, mapping): + pass diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 71107aff4..c9bcb801c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -37,7 +37,6 @@ import nova.image from nova.image import glance as glance_image_service from nova import log as logging from nova import utils -from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import disk @@ -85,38 +84,22 @@ class ImageType: DISK = 2 DISK_RAW = 3 DISK_VHD = 4 + _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD) KERNEL_STR = "kernel" RAMDISK_STR = "ramdisk" DISK_STR = "os" DISK_RAW_STR = "os_raw" DISK_VHD_STR = "vhd" + _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR) @classmethod def to_string(cls, image_type): - if image_type == ImageType.KERNEL: - return ImageType.KERNEL_STR - elif image_type == ImageType.RAMDISK: - return ImageType.RAMDISK_STR - elif image_type == ImageType.DISK: - return ImageType.DISK_STR - elif image_type == ImageType.DISK_RAW: - return ImageType.DISK_RAW_STR - elif image_type == ImageType.DISK_VHD: - return ImageType.VHD_STR + return dict(zip(ImageType._ids, ImageType._strs)).get(image_type) @classmethod def from_string(cls, image_type_str): - if image_type_str == ImageType.KERNEL_STR: - return ImageType.KERNEL - elif image_type == ImageType.RAMDISK_STR: - return ImageType.RAMDISK - elif image_type == ImageType.DISK_STR: - return ImageType.DISK - elif image_type == ImageType.DISK_RAW_STR: - return ImageType.DISK_RAW - elif image_type == ImageType.DISK_VHD_STR: - return ImageType.VHD + return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str) class VMHelper(HelperBase): @@ -283,28 +266,6 @@ class VMHelper(HelperBase): raise StorageError(_('Unable to destroy VDI %s') % vdi_ref) @classmethod - def create_vif(cls, session, vm_ref, network_ref, mac_address, - dev, rxtx_cap=0): - """Create a VIF record. Returns a Deferred that gives the new - VIF reference.""" - vif_rec = {} - vif_rec['device'] = str(dev) - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mac_address - vif_rec['MTU'] = '1500' - vif_rec['other_config'] = {} - vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else '' - vif_rec['qos_algorithm_params'] = \ - {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {} - LOG.debug(_('Creating VIF for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) - vif_ref = session.call_xenapi('VIF.create', vif_rec) - LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) - return vif_ref - - @classmethod def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): """Create a VDI record and returns its reference.""" vdi_ref = session.get_xenapi().VDI.create( @@ -406,7 +367,7 @@ class VMHelper(HelperBase): session.wait_for_task(task, instance.id) @classmethod - def fetch_image(cls, session, instance_id, image, user, project, + def fetch_image(cls, session, instance_id, image, user_id, project_id, image_type): """ image_type is interpreted as an ImageType instance @@ -418,18 +379,23 @@ class VMHelper(HelperBase): Returns: A single filename if image_type is KERNEL_RAMDISK A list of dictionaries that describe VDIs, otherwise """ - access = AuthManager().get_access_key(user, project) if FLAGS.xenapi_image_service == 'glance': - return cls._fetch_image_glance(session, instance_id, image, - access, image_type) + return cls._fetch_image_glance(session, instance_id, + image, image_type) else: + # TODO(vish): this shouldn't be used anywhere anymore and + # can probably be removed + from nova.auth.manager import AuthManager + manager = AuthManager() + access = manager.get_access_key(user_id, project_id) + secret = manager.get_user(user_id).secret return cls._fetch_image_objectstore(session, instance_id, image, - access, user.secret, + access, secret, image_type) @classmethod - def _fetch_image_glance_vhd(cls, session, instance_id, image, access, + def _fetch_image_glance_vhd(cls, session, instance_id, image, image_type): """Tell glance to download an image and put the VHDs into the SR @@ -477,7 +443,7 @@ class VMHelper(HelperBase): return vdis @classmethod - def _fetch_image_glance_disk(cls, session, instance_id, image, access, + def _fetch_image_glance_disk(cls, session, instance_id, image, image_type): """Fetch the image from Glance @@ -611,8 +577,7 @@ class VMHelper(HelperBase): return image_type @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, - image_type): + def _fetch_image_glance(cls, session, instance_id, image, image_type): """Fetch image from glance based on image type. Returns: A single filename if image_type is KERNEL or RAMDISK @@ -620,10 +585,10 @@ class VMHelper(HelperBase): """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd( - session, instance_id, image, access, image_type) + session, instance_id, image, image_type) else: return cls._fetch_image_glance_disk( - session, instance_id, image, access, image_type) + session, instance_id, image, image_type) @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7995576a6..7e02e1def 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -38,7 +38,6 @@ from nova import ipv6 from nova import log as logging from nova import utils -from nova.auth.manager import AuthManager from nova.compute import power_state from nova.virt import driver from nova.virt.xenapi.network_utils import NetworkHelper @@ -52,6 +51,9 @@ FLAGS = flags.FLAGS flags.DEFINE_integer('windows_version_timeout', 300, 'number of seconds to wait for windows agent to be ' 'fully operational') +flags.DEFINE_string('xenapi_vif_driver', + 'nova.virt.xenapi.vif.XenAPIBridgeDriver', + 'The XenAPI VIF driver using XenServer Network APIs.') def cmp_version(a, b): @@ -78,6 +80,7 @@ class VMOps(object): self._session = session self.poll_rescue_last_ran = None VMHelper.XenAPI = self.XenAPI + self.vif_driver = utils.import_object(FLAGS.xenapi_vif_driver) def list_instances(self): """List VM instances.""" @@ -106,17 +109,19 @@ class VMOps(object): instance_infos.append(instance_info) return instance_infos - def revert_resize(self, instance): + def revert_migration(self, instance): vm_ref = VMHelper.lookup(self._session, instance.name) self._start(instance, vm_ref) - def finish_resize(self, instance, disk_info, network_info): + def finish_migration(self, instance, disk_info, network_info, + resize_instance): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) vm_ref = self._create_vm(instance, [dict(vdi_type='os', vdi_uuid=vdi_uuid)], network_info) - self.resize_instance(instance, vdi_uuid) + if resize_instance: + self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) def _start(self, instance, vm_ref=None): @@ -130,11 +135,10 @@ class VMOps(object): self._session.call_xenapi('VM.start', vm_ref, False, False) def _create_disks(self, instance): - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(self._session, - instance.id, instance.image_ref, user, project, + instance.id, instance.image_ref, + instance.user_id, instance.project_id, disk_image_type) return vdis @@ -172,21 +176,18 @@ class VMOps(object): power_state.SHUTDOWN) return - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) - disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None ramdisk = None try: if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL)[0] + instance.kernel_id, instance.user_id, + instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.RAMDISK)[0] + instance.kernel_id, instance.user_id, + instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdis[0]['vdi_uuid']) @@ -251,11 +252,11 @@ class VMOps(object): userdevice += 1 # Alter the image before VM start for, e.g. network injection - if FLAGS.xenapi_inject_image: + if FLAGS.flat_injected: VMHelper.preconfigure_instance(self._session, instance, first_vdi_ref, network_info) - self.create_vifs(vm_ref, network_info) + self.create_vifs(vm_ref, instance, network_info) self.inject_network_info(instance, network_info, vm_ref) return vm_ref @@ -564,18 +565,22 @@ class VMOps(object): return new_cow_uuid def resize_instance(self, instance, vdi_uuid): - """Resize a running instance by changing it's RAM and disk size.""" + """Resize a running instance by changing its RAM and disk size.""" #TODO(mdietz): this will need to be adjusted for swap later #The new disk size must be in bytes - new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024) - instance_name = instance.name - instance_local_gb = instance.local_gb - LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s." - " Expanding to %(instance_local_gb)d GB") % locals()) - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size) - LOG.debug(_("Resize instance %s complete") % (instance.name)) + new_disk_size = instance.local_gb * 1024 * 1024 * 1024 + if new_disk_size > 0: + instance_name = instance.name + instance_local_gb = instance.local_gb + LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance" + "%(instance_name)s. Expanding to %(instance_local_gb)d" + " GB") % locals()) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + # for an instance with no local storage + self._session.call_xenapi('VDI.resize_online', vdi_ref, + str(new_disk_size)) + LOG.debug(_("Resize instance %s complete") % (instance.name)) def reboot(self, instance): """Reboot VM instance.""" @@ -837,7 +842,7 @@ class VMOps(object): self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref) - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy VM instance. This is the method exposed by xenapi_conn.destroy(). The rest of the @@ -847,9 +852,9 @@ class VMOps(object): instance_id = instance.id LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) vm_ref = VMHelper.lookup(self._session, instance.name) - return self._destroy(instance, vm_ref, shutdown=True) + return self._destroy(instance, vm_ref, network_info, shutdown=True) - def _destroy(self, instance, vm_ref, shutdown=True, + def _destroy(self, instance, vm_ref, network_info=None, shutdown=True, destroy_kernel_ramdisk=True): """Destroys VM instance by performing: @@ -871,6 +876,10 @@ class VMOps(object): self._destroy_kernel_ramdisk(instance, vm_ref) self._destroy_vm(instance, vm_ref) + if network_info: + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def _wait_with_callback(self, instance_id, task, callback): ret = None try: @@ -1066,7 +1075,7 @@ class VMOps(object): # catch KeyError for domid if instance isn't running pass - def create_vifs(self, vm_ref, network_info): + def create_vifs(self, vm_ref, instance, network_info): """Creates vifs for an instance.""" logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) @@ -1075,14 +1084,19 @@ class VMOps(object): self._session.get_xenapi().VM.get_record(vm_ref) for device, (network, info) in enumerate(network_info): - mac_address = info['mac'] - bridge = network['bridge'] - rxtx_cap = info.pop('rxtx_cap') - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, - bridge) - VMHelper.create_vif(self._session, vm_ref, network_ref, - mac_address, device, rxtx_cap) + vif_rec = self.vif_driver.plug(self._session, + vm_ref, instance, device, network, info) + network_ref = vif_rec['network'] + LOG.debug(_('Creating VIF for VM %(vm_ref)s,' \ + ' network %(network_ref)s.') % locals()) + vif_ref = self._session.call_xenapi('VIF.create', vif_rec) + LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) + + def plug_vifs(self, instance, network_info): + """Set up VIF networking on the host.""" + for (network, mapping) in network_info: + self.vif_driver.plug(self._session, instance, network, mapping) def reset_network(self, instance, vm_ref=None): """Creates uuid arg to pass to make_agent_call and calls it.""" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4c6f9fe46..8704d1f86 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -112,22 +112,15 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', 5, 'Max number of times to poll for VHD to coalesce.' ' Used only if connection_type=xenapi.') -flags.DEFINE_bool('xenapi_inject_image', - True, - 'Specifies whether an attempt to inject network/key' - ' data into the disk image should be made.' - ' Used only if connection_type=xenapi.') flags.DEFINE_string('xenapi_agent_path', 'usr/sbin/xe-update-networking', 'Specifies the path in which the xenapi guest agent' ' should be located. If the agent is present,' ' network configuration is not injected into the image' ' Used only if connection_type=xenapi.' - ' and xenapi_inject_image=True') - + ' and flat_injected=True') flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount', 'Base path to the storage repository') - flags.DEFINE_string('target_host', None, 'iSCSI Target Host') @@ -198,19 +191,21 @@ class XenAPIConnection(driver.ComputeDriver): """Create VM instance""" self._vmops.spawn(instance, network_info) - def revert_resize(self, instance): + def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" self._vmops.revert_resize(instance) - def finish_resize(self, instance, disk_info, network_info): + def finish_migration(self, instance, disk_info, network_info, + resize_instance=False): """Completes a resize, turning on the migrated instance""" - self._vmops.finish_resize(instance, disk_info, network_info) + self._vmops.finish_migration(instance, disk_info, network_info, + resize_instance) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, image_id) - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot VM instance""" self._vmops.reboot(instance) @@ -224,9 +219,9 @@ class XenAPIConnection(driver.ComputeDriver): """ self._vmops.inject_file(instance, b64_path, b64_contents) - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy VM instance""" - self._vmops.destroy(instance) + self._vmops.destroy(instance, network_info) def pause(self, instance, callback): """Pause VM instance""" @@ -249,11 +244,11 @@ class XenAPIConnection(driver.ComputeDriver): """resume the specified instance""" self._vmops.resume(instance, callback) - def rescue(self, instance, callback): + def rescue(self, instance, callback, network_info): """Rescue the specified instance""" self._vmops.rescue(instance, callback) - def unrescue(self, instance, callback): + def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" self._vmops.unrescue(instance, callback) @@ -269,6 +264,9 @@ class XenAPIConnection(driver.ComputeDriver): """inject network info for specified instance""" self._vmops.inject_network_info(instance, network_info) + def plug_vifs(self, instance_ref, network_info): + self._vmops.plug_vifs(instance_ref, network_info) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) @@ -322,7 +320,7 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') |
