summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorTodd Willey <todd@ansolabs.com>2011-05-26 11:30:07 -0400
committerTodd Willey <todd@ansolabs.com>2011-05-26 11:30:07 -0400
commiteb90339f094da74eafcc6555907e247e25a8902b (patch)
treee5ac40e24ead1273497bca3dba6006f51f5a8d73 /nova/virt
parent2278f2886d369af285f914a7b5883d3a7a5727cc (diff)
parentdb18a792414240cbdb1221d0e79e8a63313f103e (diff)
Merge trunk.
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/connection.py2
-rw-r--r--nova/virt/disk.py50
-rw-r--r--nova/virt/fake.py5
-rw-r--r--nova/virt/hyperv.py21
-rw-r--r--nova/virt/images.py69
-rw-r--r--nova/virt/libvirt/__init__.py0
-rw-r--r--nova/virt/libvirt/connection.py (renamed from nova/virt/libvirt_conn.py)1227
-rw-r--r--nova/virt/libvirt/firewall.py798
-rw-r--r--nova/virt/libvirt/netutils.py97
-rw-r--r--nova/virt/vmwareapi/fake.py9
-rw-r--r--nova/virt/vmwareapi/vim.py1
-rw-r--r--nova/virt/vmwareapi/vmops.py43
-rw-r--r--nova/virt/vmwareapi_conn.py4
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py53
-rw-r--r--nova/virt/xenapi/vmops.py123
-rw-r--r--nova/virt/xenapi/volume_utils.py43
-rw-r--r--nova/virt/xenapi/volumeops.py6
-rw-r--r--nova/virt/xenapi_conn.py181
19 files changed, 1495 insertions, 1239 deletions
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 99a8849f1..aeec17c98 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -27,9 +27,9 @@ from nova import utils
from nova.virt import driver
from nova.virt import fake
from nova.virt import hyperv
-from nova.virt import libvirt_conn
from nova.virt import vmwareapi_conn
from nova.virt import xenapi_conn
+from nova.virt.libvirt import connection as libvirt_conn
LOG = logging.getLogger("nova.virt.connection")
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index ddea1a1f7..f8aea1f34 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
else:
mapped_device = device
- # We can only loopback mount raw images. If the device isn't there,
- # it's normally because it's a .vmdk or a .vdi etc
- if not os.path.exists(mapped_device):
- raise exception.Error('Mapped device was not found (we can'
- ' only inject raw disk images): %s' %
- mapped_device)
-
- # Configure ext2fs so that it doesn't auto-check every N boots
- out, err = utils.execute('sudo', 'tune2fs',
- '-c', 0, '-i', 0, mapped_device)
-
- tmpdir = tempfile.mkdtemp()
try:
- # mount loopback to dir
- out, err = utils.execute(
- 'sudo', 'mount', mapped_device, tmpdir)
- if err:
- raise exception.Error(_('Failed to mount filesystem: %s')
- % err)
-
+ # We can only loopback mount raw images. If the device isn't there,
+ # it's normally because it's a .vmdk or a .vdi etc
+ if not os.path.exists(mapped_device):
+ raise exception.Error('Mapped device was not found (we can'
+ ' only inject raw disk images): %s' %
+ mapped_device)
+
+ # Configure ext2fs so that it doesn't auto-check every N boots
+ out, err = utils.execute('sudo', 'tune2fs',
+ '-c', 0, '-i', 0, mapped_device)
+
+ tmpdir = tempfile.mkdtemp()
try:
- inject_data_into_fs(tmpdir, key, net, utils.execute)
+ # mount loopback to dir
+ out, err = utils.execute(
+ 'sudo', 'mount', mapped_device, tmpdir)
+ if err:
+ raise exception.Error(_('Failed to mount filesystem: %s')
+ % err)
+
+ try:
+ inject_data_into_fs(tmpdir, key, net, utils.execute)
+ finally:
+ # unmount device
+ utils.execute('sudo', 'umount', mapped_device)
finally:
- # unmount device
- utils.execute('sudo', 'umount', mapped_device)
+ # remove temporary directory
+ utils.execute('rmdir', tmpdir)
finally:
- # remove temporary directory
- utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
utils.execute('sudo', 'kpartx', '-d', device)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 3f030833e..970434c6c 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -288,8 +288,7 @@ class FakeConnection(driver.ComputeDriver):
knowledge of the instance
"""
if instance_name not in self.instances:
- raise exception.NotFound(_("Instance %s Not Found")
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
i = self.instances[instance_name]
return {'state': i.state,
'max_mem': 0,
@@ -368,7 +367,7 @@ class FakeConnection(driver.ComputeDriver):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
- return 'FAKE CONSOLE OUTPUT'
+ return 'FAKE CONSOLE\xffOUTPUT'
def get_ajax_console(self, instance):
return {'token': 'FAKETOKEN',
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 13f403a66..1142e97a4 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -143,8 +143,7 @@ class HyperVConnection(driver.ComputeDriver):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
- raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ raise exception.InstanceExists(name=instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -368,7 +367,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Reboot the specified instance."""
vm = self._lookup(instance.name)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
self._set_vm_state(instance.name, 'Reboot')
def destroy(self, instance):
@@ -412,7 +411,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Get information about the VM"""
vm = self._lookup(instance_id)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance_id)
+ raise exception.InstanceNotFound(instance_id=instance_id)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
@@ -474,14 +473,12 @@ class HyperVConnection(driver.ComputeDriver):
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm'
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s '
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def poll_rescued_instances(self, timeout):
pass
@@ -489,3 +486,11 @@ class HyperVConnection(driver.ComputeDriver):
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
+
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
+ pass
+
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
+ pass
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 2e3f2ee4d..02c898fda 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,19 +21,10 @@
Handling of VM disk images.
"""
-import os.path
-import shutil
-import sys
-import time
-import urllib2
-import urlparse
-
from nova import context
from nova import flags
from nova import log as logging
from nova import utils
-from nova.auth import manager
-from nova.auth import signer
FLAGS = flags.FLAGS
@@ -52,66 +43,6 @@ def fetch(image_id, path, _user, _project):
return metadata
-# NOTE(vish): The methods below should be unnecessary, but I'm leaving
-# them in case the glance client does not work on windows.
-def _fetch_image_no_curl(url, path, headers):
- request = urllib2.Request(url)
- for (k, v) in headers.iteritems():
- request.add_header(k, v)
-
- def urlretrieve(urlfile, fpath):
- chunk = 1 * 1024 * 1024
- f = open(fpath, "wb")
- while 1:
- data = urlfile.read(chunk)
- if not data:
- break
- f.write(data)
-
- urlopened = urllib2.urlopen(request)
- urlretrieve(urlopened, path)
- LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
-
-
-def _fetch_s3_image(image, path, user, project):
- url = image_url(image)
-
- # This should probably move somewhere else, like e.g. a download_as
- # method on User objects and at the same time get rewritten to use
- # a web client.
- headers = {}
- headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
-
- (_, _, url_path, _, _, _) = urlparse.urlparse(url)
- access = manager.AuthManager().get_access_key(user, project)
- signature = signer.Signer(user.secret.encode()).s3_authorization(headers,
- 'GET',
- url_path)
- headers['Authorization'] = 'AWS %s:%s' % (access, signature)
-
- if sys.platform.startswith('win'):
- return _fetch_image_no_curl(url, path, headers)
- else:
- cmd = ['/usr/bin/curl', '--fail', '--silent', url]
- for (k, v) in headers.iteritems():
- cmd += ['-H', '\'%s: %s\'' % (k, v)]
-
- cmd += ['-o', path]
- return utils.execute(*cmd)
-
-
-def _fetch_local_image(image, path, user, project):
- source = _image_path(os.path.join(image, 'image'))
- if sys.platform.startswith('win'):
- return shutil.copy(source, path)
- else:
- return utils.execute('cp', source, path)
-
-
-def _image_path(path):
- return os.path.join(FLAGS.images_path, path)
-
-
# TODO(vish): xenapi should use the glance client code directly instead
# of retrieving the image using this method.
def image_url(image):
diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/virt/libvirt/__init__.py
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt/connection.py
index ae9e84120..dc2dd1219 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt/connection.py
@@ -58,7 +58,6 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-#from nova import test
from nova import utils
from nova import vnc
from nova.auth import manager
@@ -67,20 +66,23 @@ from nova.compute import power_state
from nova.virt import disk
from nova.virt import driver
from nova.virt import images
+from nova.virt.libvirt import netutils
+
libvirt = None
libxml2 = None
Template = None
+
LOG = logging.getLogger('nova.virt.libvirt_conn')
+
FLAGS = flags.FLAGS
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
-
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
@@ -102,7 +104,7 @@ flags.DEFINE_string('ajaxterm_portrange',
'10000-12000',
'Range of ports that ajaxterm should randomly try to bind')
flags.DEFINE_string('firewall_driver',
- 'nova.virt.libvirt_conn.IptablesFirewallDriver',
+ 'nova.virt.libvirt.firewall.IptablesFirewallDriver',
'Firewall driver (defaults to iptables)')
flags.DEFINE_string('cpuinfo_xml_template',
utils.abspath('virt/cpuinfo.xml.template'),
@@ -117,6 +119,8 @@ flags.DEFINE_integer('live_migration_bandwidth', 0,
'Define live migration behavior')
flags.DEFINE_string('qemu_img', 'qemu-img',
'binary to use for qemu-img commands')
+flags.DEFINE_bool('start_guests_on_host_boot', False,
+ 'Whether to restart guests when the host reboots')
def get_connection(read_only):
@@ -142,66 +146,6 @@ def _late_load_cheetah():
Template = t.Template
-def _get_net_and_mask(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.netmask())
-
-
-def _get_net_and_prefixlen(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.prefixlen())
-
-
-def _get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
-
-
-def _get_network_info(instance):
- # TODO(adiantum) If we will keep this function
- # we should cache network_info
- admin_context = context.get_admin_context()
-
- ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
- instance['id'])
-
- networks = db.network_get_all_by_instance(admin_context,
- instance['id'])
- network_info = []
-
- def ip_dict(ip):
- return {
- "ip": ip.address,
- "netmask": network["netmask"],
- "enabled": "1"}
-
- def ip6_dict(ip6):
- prefix = ip6.network.cidr_v6
- mac = instance.mac_address
- return {
- "ip": utils.to_global_ipv6(prefix, mac),
- "netmask": ip6.network.netmask_v6,
- "gateway": ip6.network.gateway_v6,
- "enabled": "1"}
-
- for network in networks:
- network_ips = [ip for ip in ip_addresses
- if ip.network_id == network.id]
-
- mapping = {
- 'label': network['label'],
- 'gateway': network['gateway'],
- 'mac': instance.mac_address,
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_ips]}
-
- if FLAGS.use_ipv6:
- mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips]
-
- network_info.append((network, mapping))
- return network_info
-
-
class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
@@ -209,7 +153,6 @@ class LibvirtConnection(driver.ComputeDriver):
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
- self.interfaces_xml = open(FLAGS.injected_network_template).read()
self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -231,12 +174,8 @@ class LibvirtConnection(driver.ComputeDriver):
{'name': instance['name'], 'state': state})
db.instance_set_state(ctxt, instance['id'], state)
- if state == power_state.SHUTOFF:
- # TODO(soren): This is what the compute manager does when you
- # terminate # an instance. At some point I figure we'll have a
- # "terminated" state and some sort of cleanup job that runs
- # occasionally, cleaning them out.
- db.instance_destroy(ctxt, instance['id'])
+ # NOTE(justinsb): We no longer delete SHUTOFF instances,
+ # the user may want to power them back on
if state != power_state.RUNNING:
continue
@@ -311,29 +250,62 @@ class LibvirtConnection(driver.ComputeDriver):
return infos
def destroy(self, instance, cleanup=True):
+ instance_name = instance['name']
+
try:
- virt_dom = self._conn.lookupByName(instance['name'])
- virt_dom.destroy()
- except Exception as _err:
- pass
- # If the instance is already terminated, we're still happy
-
- # We'll save this for when we do shutdown,
- # instead of destroy - but destroy returns immediately
- timer = utils.LoopingCall(f=None)
+ virt_dom = self._lookup_by_name(instance_name)
+ except exception.NotFound:
+ virt_dom = None
- while True:
+ # If the instance is already terminated, we're still happy
+ # Otherwise, destroy it
+ if virt_dom is not None:
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.SHUTDOWN:
- break
- except Exception:
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- break
+ virt_dom.destroy()
+ except libvirt.libvirtError as e:
+ is_okay = False
+ errcode = e.get_error_code()
+ if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
+ # If the instance if already shut off, we get this:
+ # Code=55 Error=Requested operation is not valid:
+ # domain is not running
+ (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
+ if state == power_state.SHUTOFF:
+ is_okay = True
+
+ if not is_okay:
+ LOG.warning(_("Error from libvirt during destroy of "
+ "%(instance_name)s. Code=%(errcode)s "
+ "Error=%(e)s") %
+ locals())
+ raise
+
+ try:
+ # NOTE(justinsb): We remove the domain definition. We probably
+ # would do better to keep it if cleanup=False (e.g. volumes?)
+ # (e.g. #2 - not losing machines on failure)
+ virt_dom.undefine()
+ except libvirt.libvirtError as e:
+ errcode = e.get_error_code()
+ LOG.warning(_("Error from libvirt during undefine of "
+ "%(instance_name)s. Code=%(errcode)s "
+ "Error=%(e)s") %
+ locals())
+ raise
+
+ def _wait_for_destroy():
+ """Called at an interval until the VM is gone."""
+ instance_name = instance['name']
+
+ try:
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("Instance %s destroyed successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_destroy)
+ timer.start(interval=0.5, now=True)
self.firewall_driver.unfilter_instance(instance)
@@ -354,7 +326,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
if device_path.startswith('/dev/'):
xml = """<disk type='block'>
@@ -372,7 +344,7 @@ class LibvirtConnection(driver.ComputeDriver):
name,
mount_device)
else:
- raise exception.Invalid(_("Invalid device path %s") % device_path)
+ raise exception.InvalidDevicePath(path=device_path)
virt_dom.attachDevice(xml)
@@ -391,18 +363,18 @@ class LibvirtConnection(driver.ComputeDriver):
if child.prop('dev') == device:
return str(node)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
@exception.wrap_exception
def detach_volume(self, instance_name, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
- raise exception.NotFound(_("No disk at %s") % mount_device)
+ raise exception.DiskNotFound(location=mount_device)
virt_dom.detachDevice(xml)
@exception.wrap_exception
@@ -415,7 +387,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
image_service = utils.import_object(FLAGS.image_service)
- virt_dom = self._conn.lookupByName(instance['name'])
+ virt_dom = self._lookup_by_name(instance['name'])
elevated = context.get_admin_context()
base = image_service.show(elevated, instance['image_id'])
@@ -423,9 +395,8 @@ class LibvirtConnection(driver.ComputeDriver):
metadata = {'disk_format': base['disk_format'],
'container_format': base['container_format'],
'is_public': False,
+ 'name': '%s.%s' % (base['name'], image_id),
'properties': {'architecture': base['architecture'],
- 'type': base['type'],
- 'name': '%s.%s' % (base['name'], image_id),
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
@@ -452,12 +423,17 @@ class LibvirtConnection(driver.ComputeDriver):
# Export the snapshot to a raw image
temp_dir = tempfile.mkdtemp()
out_path = os.path.join(temp_dir, snapshot_name)
- qemu_img_cmd = '%s convert -f qcow2 -O raw -s %s %s %s' % (
- FLAGS.qemu_img,
- snapshot_name,
- disk_path,
- out_path)
- utils.execute(qemu_img_cmd)
+ qemu_img_cmd = (FLAGS.qemu_img,
+ 'convert',
+ '-f',
+ 'qcow2',
+ '-O',
+ 'raw',
+ '-s',
+ snapshot_name,
+ disk_path,
+ out_path)
+ utils.execute(*qemu_img_cmd)
# Upload that image to the image service
with open(out_path) as image_file:
@@ -471,31 +447,43 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def reboot(self, instance):
+ """Reboot a virtual machine, given an instance reference.
+
+ This method actually destroys and re-creates the domain to ensure the
+ reboot happens, as the guest OS cannot ignore this action.
+
+ """
+ virt_dom = self._conn.lookupByName(instance['name'])
+ # NOTE(itoumsn): Use XML delived from the running instance
+ # instead of using to_xml(instance). This is almost the ultimate
+ # stupid workaround.
+ xml = virt_dom.XMLDesc(0)
+ # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
+ # better because we cannot ensure flushing dirty buffers
+ # in the guest OS. But, in case of KVM, shutdown() does not work...
self.destroy(instance, False)
- xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
- self._conn.createXML(xml, 0)
+ self._create_new_domain(xml)
self.firewall_driver.apply_instance_filter(instance)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_reboot():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_reboot failed: %s'), exn)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rebooted successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_reboot
+ timer = utils.LoopingCall(_wait_for_reboot)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
@@ -515,7 +503,15 @@ class LibvirtConnection(driver.ComputeDriver):
raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance, callback=None):
+ def rescue(self, instance):
+ """Loads a VM using rescue images.
+
+ A rescue is normally performed when something goes wrong with the
+ primary images and data needs to be corrected/recovered. Rescuing
+ should not edit or over-ride the original image, only allow for
+ data recovery.
+
+ """
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -523,31 +519,35 @@ class LibvirtConnection(driver.ComputeDriver):
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
self._create_image(instance, xml, '.rescue', rescue_images)
- self._conn.createXML(xml, 0)
-
- timer = utils.LoopingCall(f=None)
+ self._create_new_domain(xml)
def _wait_for_rescue():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_rescue failed: %s'), exn)
- db.instance_set_state(None,
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_rescue
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rescued successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_rescue)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance, callback=None):
- # NOTE(vish): Because reboot destroys and recreates an instance using
- # the normal xml file, we can just call reboot here
+ def unrescue(self, instance):
+ """Reboot the VM which is being rescued back into primary images.
+
+ Because reboot destroys and re-creates instances, unresue should
+ simply call reboot.
+
+ """
self.reboot(instance)
@exception.wrap_exception
@@ -558,37 +558,36 @@ class LibvirtConnection(driver.ComputeDriver):
# for xenapi(tr3buchet)
@exception.wrap_exception
def spawn(self, instance, network_info=None):
- xml = self.to_xml(instance, network_info)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.NOSTATE,
- 'launching')
+ xml = self.to_xml(instance, False, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info)
- self._conn.createXML(xml, 0)
+ self._create_image(instance, xml, network_info=network_info)
+ domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
- timer = utils.LoopingCall(f=None)
+ if FLAGS.start_guests_on_host_boot:
+ LOG.debug(_("instance %s: setting autostart ON") %
+ instance['name'])
+ domain.setAutostart(1)
def _wait_for_boot():
+ """Called at an interval until the VM is running."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'])
- timer.stop()
- except:
- LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s spawned successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_boot
+ timer = utils.LoopingCall(_wait_for_boot)
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
@@ -654,7 +653,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise Exception(_('Unable to find an open port'))
def get_pty_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = minidom.parseString(xml)
@@ -676,10 +675,13 @@ class LibvirtConnection(driver.ComputeDriver):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
+ def get_host_ip_addr(self):
+ return FLAGS.my_ip
+
@exception.wrap_exception
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO: use etree instead of minidom
dom = minidom.parseString(xml)
@@ -743,7 +745,7 @@ class LibvirtConnection(driver.ComputeDriver):
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None):
if not network_info:
- network_info = _get_network_info(inst)
+ network_info = netutils.get_network_info(inst)
if not suffix:
suffix = ''
@@ -797,7 +799,10 @@ class LibvirtConnection(driver.ComputeDriver):
root_fname = '%08x' % int(disk_images['image_id'])
size = FLAGS.minimum_root_size
- if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
+
+ inst_type_id = inst['instance_type_id']
+ inst_type = instance_types.get_instance_type(inst_type_id)
+ if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
size = None
root_fname += "_sm"
@@ -809,14 +814,13 @@ class LibvirtConnection(driver.ComputeDriver):
user=user,
project=project,
size=size)
- type_data = instance_types.get_instance_type(inst['instance_type'])
- if type_data['local_gb']:
+ if inst_type['local_gb']:
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
- fname="local_%s" % type_data['local_gb'],
+ fname="local_%s" % inst_type['local_gb'],
cow=FLAGS.use_cow_images,
- local_gb=type_data['local_gb'])
+ local_gb=inst_type['local_gb'])
# For now, we assume that if we're not using a kernel, we're using a
# partitioned disk image where the target partition is the first
@@ -899,26 +903,16 @@ class LibvirtConnection(driver.ComputeDriver):
mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
+ template = "<parameter name=\"%s\"value=\"%s\" />\n"
+ net, mask = netutils.get_net_and_mask(network['cidr'])
+ values = [("PROJNET", net), ("PROJMASK", mask)]
if FLAGS.use_ipv6:
- net, mask = _get_net_and_mask(network['cidr'])
- net_v6, prefixlen_v6 = _get_net_and_prefixlen(
+ net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
network['cidr_v6'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJNETV6\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASKV6\" "
- "value=\"%s\" />\n") % \
- (net, mask, net_v6, prefixlen_v6)
- else:
- net, mask = _get_net_and_mask(network['cidr'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n") % \
- (net, mask)
+ values.extend([("PROJNETV6", net_v6),
+ ("PROJMASKV6", prefixlen_v6)])
+
+ extra_params = "".join([template % value for value in values])
else:
extra_params = "\n"
@@ -936,22 +930,18 @@ class LibvirtConnection(driver.ComputeDriver):
return result
- def to_xml(self, instance, rescue=False, network_info=None):
- # TODO(termie): cache?
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
-
+ def _prepare_xml_info(self, instance, rescue=False, network_info=None):
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
- network_info = _get_network_info(instance)
+ network_info = netutils.get_network_info(instance)
nics = []
for (network, mapping) in network_info:
- nics.append(self._get_nic_for_xml(network,
- mapping))
+ nics.append(self._get_nic_for_xml(network, mapping))
# FIXME(vish): stick this in db
- instance_type_name = instance['instance_type']
- instance_type = instance_types.get_instance_type(instance_type_name)
+ inst_type_id = instance['instance_type_id']
+ inst_type = instance_types.get_instance_type(inst_type_id)
if FLAGS.use_cow_images:
driver_type = 'qcow2'
@@ -962,10 +952,10 @@ class LibvirtConnection(driver.ComputeDriver):
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
instance['name']),
- 'memory_kb': instance_type['memory_mb'] * 1024,
- 'vcpus': instance_type['vcpus'],
+ 'memory_kb': inst_type['memory_mb'] * 1024,
+ 'vcpus': inst_type['vcpus'],
'rescue': rescue,
- 'local': instance_type['local_gb'],
+ 'local': inst_type['local_gb'],
'driver_type': driver_type,
'nics': nics}
@@ -980,18 +970,43 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
xml_info['disk'] = xml_info['basepath'] + "/disk"
+ return xml_info
+ def to_xml(self, instance, rescue=False, network_info=None):
+ # TODO(termie): cache?
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ xml_info = self._prepare_xml_info(instance, rescue, network_info)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- LOG.debug(_('instance %s: finished toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
- def get_info(self, instance_name):
+ def _lookup_by_name(self, instance_name):
+ """Retrieve libvirt domain object given an instance name.
+
+ All libvirt error handling should be handled in this method and
+ relevant nova exceptions should be raised in response.
+
+ """
try:
- virt_dom = self._conn.lookupByName(instance_name)
- except:
- raise exception.NotFound(_("Instance %s not found")
- % instance_name)
+ return self._conn.lookupByName(instance_name)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_NO_DOMAIN:
+ raise exception.InstanceNotFound(instance_id=instance_name)
+
+ msg = _("Error from libvirt while looking up %(instance_name)s: "
+ "[Error Code %(error_code)s] %(ex)s") % locals()
+ raise exception.Error(msg)
+
+ def get_info(self, instance_name):
+ """Retrieve information from libvirt for a specific instance name.
+
+ If a libvirt error is encountered during lookup, we might raise a
+ NotFound exception or Error exception depending on how severe the
+ libvirt error is.
+
+ """
+ virt_dom = self._lookup_by_name(instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -999,6 +1014,24 @@ class LibvirtConnection(driver.ComputeDriver):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
+ def _create_new_domain(self, xml, persistent=True, launch_flags=0):
+ # NOTE(justinsb): libvirt has two types of domain:
+ # * a transient domain disappears when the guest is shutdown
+ # or the host is rebooted.
+ # * a permanent domain is not automatically deleted
+ # NOTE(justinsb): Even for ephemeral instances, transient seems risky
+
+ if persistent:
+ # To create a persistent domain, first define it, then launch it.
+ domain = self._conn.defineXML(xml)
+
+ domain.createWithFlags(launch_flags)
+ else:
+ # createXML call creates a transient domain
+ domain = self._conn.createXML(xml, launch_flags)
+
+ return domain
+
def get_diagnostics(self, instance_name):
raise exception.ApiError(_("diagnostics are not supported "
"for libvirt"))
@@ -1010,7 +1043,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all block devices for this domain.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1033,14 +1066,14 @@ class LibvirtConnection(driver.ComputeDriver):
if child.name == 'target':
devdst = child.prop('dev')
- if devdst == None:
+ if devdst is None:
continue
disks.append(devdst)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
return disks
@@ -1052,7 +1085,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all network interfaces for this instance.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1075,14 +1108,14 @@ class LibvirtConnection(driver.ComputeDriver):
if child.name == 'target':
devdst = child.prop('dev')
- if devdst == None:
+ if devdst is None:
continue
interfaces.append(devdst)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
return interfaces
@@ -1218,9 +1251,9 @@ class LibvirtConnection(driver.ComputeDriver):
xml = libxml2.parseDoc(xml)
nodes = xml.xpathEval('//host/cpu')
if len(nodes) != 1:
- raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
- "but %d\n") % len(nodes)
- + xml.serialize())
+ reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
+ reason += xml.serialize()
+ raise exception.InvalidCPUInfo(reason=reason)
cpu_info = dict()
@@ -1249,9 +1282,8 @@ class LibvirtConnection(driver.ComputeDriver):
tkeys = topology.keys()
if set(tkeys) != set(keys):
ks = ', '.join(keys)
- raise exception.Invalid(_("Invalid xml: topology"
- "(%(topology)s) must have "
- "%(ks)s") % locals())
+ reason = _("topology (%(topology)s) must have %(ks)s")
+ raise exception.InvalidCPUInfo(reason=reason % locals())
feature_nodes = xml.xpathEval('//host/cpu/feature')
features = list()
@@ -1267,7 +1299,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
@@ -1275,7 +1307,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
@@ -1309,9 +1341,7 @@ class LibvirtConnection(driver.ComputeDriver):
try:
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
except exception.NotFound:
- raise exception.Invalid(_("Cannot update compute manager "
- "specific info, because no service "
- "record was found."))
+ raise exception.ComputeServiceUnavailable(host=host)
# Updating host information
dic = {'vcpus': self.get_vcpu_total(),
@@ -1364,7 +1394,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise
if ret <= 0:
- raise exception.Invalid(m % locals())
+ raise exception.InvalidCPUInfo(reason=m % locals())
return
@@ -1405,18 +1435,13 @@ class LibvirtConnection(driver.ComputeDriver):
# wait for completion
timeout_count = range(FLAGS.live_migration_retry_count)
while timeout_count:
- try:
- filter_name = 'nova-instance-%s' % instance_ref.name
- self._conn.nwfilterLookupByName(filter_name)
+ if self.firewall_driver.instance_filter_exists(instance_ref):
break
- except libvirt.libvirtError:
- timeout_count.pop()
- if len(timeout_count) == 0:
- ec2_id = instance_ref['hostname']
- iname = instance_ref.name
- msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)')
- raise exception.Error(msg % locals())
- time.sleep(1)
+ timeout_count.pop()
+ if len(timeout_count) == 0:
+ msg = _('Timeout migrating for %s. nwfilter not found.')
+ raise exception.Error(msg % instance_ref.name)
+ time.sleep(1)
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method):
@@ -1479,7 +1504,7 @@ class LibvirtConnection(driver.ComputeDriver):
FLAGS.live_migration_bandwidth)
except Exception:
- recover_method(ctxt, instance_ref)
+ recover_method(ctxt, instance_ref, dest=dest)
raise
# Waiting for completion of live_migration.
@@ -1500,725 +1525,11 @@ class LibvirtConnection(driver.ComputeDriver):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref)
-
-class FirewallDriver(object):
- def prepare_instance_filter(self, instance, network_info=None):
- """Prepare filters for the instance.
-
- At this point, the instance isn't running yet."""
- raise NotImplementedError()
-
- def unfilter_instance(self, instance):
- """Stop filtering instance"""
- raise NotImplementedError()
-
- def apply_instance_filter(self, instance):
- """Apply instance filter.
-
- Once this method returns, the instance should be firewalled
- appropriately. This method should as far as possible be a
- no-op. It's vastly preferred to get everything set up in
- prepare_instance_filter.
- """
- raise NotImplementedError()
-
- def refresh_security_group_rules(self, security_group_id):
- """Refresh security group rules from data store
-
- Gets called when a rule has been added to or removed from
- the security group."""
- raise NotImplementedError()
-
- def refresh_security_group_members(self, security_group_id):
- """Refresh security group members from data store
-
- Gets called when an instance gets added to or removed from
- the security group."""
- raise NotImplementedError()
-
- def refresh_provider_fw_rules(self):
- """Refresh common rules for all hosts/instances from data store.
-
- Gets called when a rule has been added to or removed from
- the list of rules (via admin api).
-
- """
- raise NotImplementedError()
-
- def setup_basic_filtering(self, instance, network_info=None):
- """Create rules to block spoofing and allow dhcp.
-
- This gets called when spawning an instance, before
- :method:`prepare_instance_filter`.
-
- """
- raise NotImplementedError()
-
-
-class NWFilterFirewall(FirewallDriver):
- """
- This class implements a network filtering mechanism versatile
- enough for EC2 style Security Group filtering by leveraging
- libvirt's nwfilter.
-
- First, all instances get a filter ("nova-base-filter") applied.
- This filter provides some basic security such as protection against
- MAC spoofing, IP spoofing, and ARP spoofing.
-
- This filter drops all incoming ipv4 and ipv6 connections.
- Outgoing connections are never blocked.
-
- Second, every security group maps to a nwfilter filter(*).
- NWFilters can be updated at runtime and changes are applied
- immediately, so changes to security groups can be applied at
- runtime (as mandated by the spec).
-
- Security group rules are named "nova-secgroup-<id>" where <id>
- is the internal id of the security group. They're applied only on
- hosts that have instances in the security group in question.
-
- Updates to security groups are done by updating the data model
- (in response to API calls) followed by a request sent to all
- the nodes with instances in the security group to refresh the
- security group.
-
- Each instance has its own NWFilter, which references the above
- mentioned security group NWFilters. This was done because
- interfaces can only reference one filter while filters can
- reference multiple other filters. This has the added benefit of
- actually being able to add and remove security groups from an
- instance at run time. This functionality is not exposed anywhere,
- though.
-
- Outstanding questions:
-
- The name is unique, so would there be any good reason to sync
- the uuid across the nodes (by assigning it from the datamodel)?
-
-
- (*) This sentence brought to you by the redundancy department of
- redundancy.
-
- """
-
- def __init__(self, get_connection, **kwargs):
- self._libvirt_get_connection = get_connection
- self.static_filters_configured = False
- self.handle_security_groups = False
-
- def apply_instance_filter(self, instance):
- """No-op. Everything is done in prepare_instance_filter"""
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
pass
- def _get_connection(self):
- return self._libvirt_get_connection()
- _conn = property(_get_connection)
-
- def nova_dhcp_filter(self):
- """The standard allow-dhcp-server filter is an <ip> one, so it uses
- ebtables to allow traffic through. Without a corresponding rule in
- iptables, it'll get blocked anyway."""
-
- return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
- <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
- <rule action='accept' direction='out'
- priority='100'>
- <udp srcipaddr='0.0.0.0'
- dstipaddr='255.255.255.255'
- srcportstart='68'
- dstportstart='67'/>
- </rule>
- <rule action='accept' direction='in'
- priority='100'>
- <udp srcipaddr='$DHCPSERVER'
- srcportstart='67'
- dstportstart='68'/>
- </rule>
- </filter>'''
-
- def nova_ra_filter(self):
- return '''<filter name='nova-allow-ra-server' chain='root'>
- <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
- <rule action='accept' direction='inout'
- priority='100'>
- <icmpv6 srcipaddr='$RASERVER'/>
- </rule>
- </filter>'''
-
- def setup_basic_filtering(self, instance, network_info=None):
- """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
- logging.info('called setup_basic_filtering in nwfilter')
-
- if not network_info:
- network_info = _get_network_info(instance)
-
- if self.handle_security_groups:
- # No point in setting up a filter set that we'll be overriding
- # anyway.
- return
-
- logging.info('ensuring static filters')
- self._ensure_static_filters()
-
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- self._define_filter(self._filter_container(instance_filter_name,
- ['nova-base']))
-
- def _ensure_static_filters(self):
- """Static filters are filters that have no need to be IP aware.
-
- There is no configuration or tuneability of these filters, so they
- can be set up once and forgotten about.
-
- """
-
- if self.static_filters_configured:
- return
-
- self._define_filter(self._filter_container('nova-base',
- ['no-mac-spoofing',
- 'no-ip-spoofing',
- 'no-arp-spoofing',
- 'allow-dhcp-server']))
- self._define_filter(self.nova_base_ipv4_filter)
- self._define_filter(self.nova_base_ipv6_filter)
- self._define_filter(self.nova_dhcp_filter)
- self._define_filter(self.nova_ra_filter)
- self._define_filter(self.nova_vpn_filter)
- if FLAGS.allow_project_net_traffic:
- self._define_filter(self.nova_project_filter)
- if FLAGS.use_ipv6:
- self._define_filter(self.nova_project_filter_v6)
-
- self.static_filters_configured = True
-
- def _filter_container(self, name, filters):
- xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
- name,
- ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
- return xml
-
- nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
- <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
- def nova_base_ipv4_filter(self):
- retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
- for protocol in ['tcp', 'udp', 'icmp']:
- for direction, action, priority in [('out', 'accept', 399),
- ('in', 'drop', 400)]:
- retval += """<rule action='%s' direction='%s' priority='%d'>
- <%s />
- </rule>""" % (action, direction,
- priority, protocol)
- retval += '</filter>'
- return retval
-
- def nova_base_ipv6_filter(self):
- retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
- for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
- for direction, action, priority in [('out', 'accept', 399),
- ('in', 'drop', 400)]:
- retval += """<rule action='%s' direction='%s' priority='%d'>
- <%s />
- </rule>""" % (action, direction,
- priority, protocol)
- retval += '</filter>'
- return retval
-
- def nova_project_filter(self):
- retval = "<filter name='nova-project' chain='ipv4'>"
- for protocol in ['tcp', 'udp', 'icmp']:
- retval += """<rule action='accept' direction='in' priority='200'>
- <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
- </rule>""" % protocol
- retval += '</filter>'
- return retval
-
- def nova_project_filter_v6(self):
- retval = "<filter name='nova-project-v6' chain='ipv6'>"
- for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
- retval += """<rule action='accept' direction='inout'
- priority='200'>
- <%s srcipaddr='$PROJNETV6'
- srcipmask='$PROJMASKV6' />
- </rule>""" % (protocol)
- retval += '</filter>'
- return retval
-
- def _define_filter(self, xml):
- if callable(xml):
- xml = xml()
- # execute in a native thread and block current greenthread until done
- tpool.execute(self._conn.nwfilterDefineXML, xml)
-
- def unfilter_instance(self, instance):
- # Nothing to do
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
pass
- def prepare_instance_filter(self, instance, network_info=None):
- """Creates an NWFilter for the given instance.
-
- In the process, it makes sure the filters for the provider blocks,
- security groups, and base filter are all in place.
-
- """
- if not network_info:
- network_info = _get_network_info(instance)
- if instance['image_id'] == FLAGS.vpn_image_id:
- base_filter = 'nova-vpn'
- else:
- base_filter = 'nova-base'
-
- self.refresh_provider_fw_rules()
-
- ctxt = context.get_admin_context()
-
- instance_secgroup_filter_name = \
- '%s-secgroup' % (self._instance_filter_name(instance))
-
- instance_secgroup_filter_children = ['nova-base-ipv4',
- 'nova-base-ipv6',
- 'nova-allow-dhcp-server']
-
- for security_group in \
- db.security_group_get_by_instance(ctxt, instance['id']):
-
- self.refresh_security_group_rules(security_group['id'])
-
- instance_secgroup_filter_children += [('nova-secgroup-%s' %
- security_group['id'])]
-
- self._define_filter(
- self._filter_container(instance_secgroup_filter_name,
- instance_secgroup_filter_children))
-
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = [base_filter, 'nova-provider-rules',
- instance_secgroup_filter_name]
-
- if FLAGS.use_ipv6:
- gateway_v6 = network['gateway_v6']
-
- if gateway_v6:
- instance_secgroup_filter_children += \
- ['nova-allow-ra-server']
-
- if FLAGS.allow_project_net_traffic:
- instance_filter_children += ['nova-project']
- if FLAGS.use_ipv6:
- instance_filter_children += ['nova-project-v6']
-
- self._define_filter(
- self._filter_container(instance_filter_name,
- instance_filter_children))
-
- return
-
- def refresh_security_group_rules(self, security_group_id):
- return self._define_filter(
- self.security_group_to_nwfilter_xml(security_group_id))
-
- def refresh_provider_fw_rules(self):
- """Update rules for all instances.
-
- This is part of the FirewallDriver API and is called when the
- provider firewall rules change in the database. In the
- `prepare_instance_filter` we add a reference to the
- 'nova-provider-rules' filter for each instance's firewall, and
- by changing that filter we update them all.
-
- """
- xml = self.provider_fw_to_nwfilter_xml()
- return self._define_filter(xml)
-
- def security_group_to_nwfilter_xml(self, security_group_id):
- security_group = db.security_group_get(context.get_admin_context(),
- security_group_id)
- rule_xml = ""
- v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
- for rule in security_group.rules:
- rule_xml += "<rule action='accept' direction='in' priority='300'>"
- if rule.cidr:
- version = _get_ip_version(rule.cidr)
- if(FLAGS.use_ipv6 and version == 6):
- net, prefixlen = _get_net_and_prefixlen(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
- (v6protocol[rule.protocol], net, prefixlen)
- else:
- net, mask = _get_net_and_mask(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
- (rule.protocol, net, mask)
- if rule.protocol in ['tcp', 'udp']:
- rule_xml += "dstportstart='%s' dstportend='%s' " % \
- (rule.from_port, rule.to_port)
- elif rule.protocol == 'icmp':
- LOG.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r', rule.protocol,
- rule.from_port, rule.to_port)
- if rule.from_port != -1:
- rule_xml += "type='%s' " % rule.from_port
- if rule.to_port != -1:
- rule_xml += "code='%s' " % rule.to_port
-
- rule_xml += '/>\n'
- rule_xml += "</rule>\n"
- xml = "<filter name='nova-secgroup-%s' " % security_group_id
- if(FLAGS.use_ipv6):
- xml += "chain='root'>%s</filter>" % rule_xml
- else:
- xml += "chain='ipv4'>%s</filter>" % rule_xml
- return xml
-
- def provider_fw_to_nwfilter_xml(self):
- """Compose a filter of drop rules from specified cidrs."""
- rule_xml = ""
- v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
- rules = db.provider_fw_rule_get_all(context.get_admin_context())
- for rule in rules:
- rule_xml += "<rule action='block' direction='in' priority='150'>"
- version = _get_ip_version(rule.cidr)
- if(FLAGS.use_ipv6 and version == 6):
- net, prefixlen = _get_net_and_prefixlen(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
- (v6protocol[rule.protocol], net, prefixlen)
- else:
- net, mask = _get_net_and_mask(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
- (rule.protocol, net, mask)
- if rule.protocol in ['tcp', 'udp']:
- rule_xml += "dstportstart='%s' dstportend='%s' " % \
- (rule.from_port, rule.to_port)
- elif rule.protocol == 'icmp':
- LOG.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r', rule.protocol,
- rule.from_port, rule.to_port)
- if rule.from_port != -1:
- rule_xml += "type='%s' " % rule.from_port
- if rule.to_port != -1:
- rule_xml += "code='%s' " % rule.to_port
-
- rule_xml += '/>\n'
- rule_xml += "</rule>\n"
- xml = "<filter name='nova-provider-rules' "
- if(FLAGS.use_ipv6):
- xml += "chain='root'>%s</filter>" % rule_xml
- else:
- xml += "chain='ipv4'>%s</filter>" % rule_xml
- return xml
-
- def _instance_filter_name(self, instance, nic_id=None):
- if not nic_id:
- return 'nova-instance-%s' % (instance['name'])
- return 'nova-instance-%s-%s' % (instance['name'], nic_id)
-
-
-class IptablesFirewallDriver(FirewallDriver):
- def __init__(self, execute=None, **kwargs):
- from nova.network import linux_net
- self.iptables = linux_net.iptables_manager
- self.instances = {}
- self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
- self.basicly_filtered = False
-
- self.iptables.ipv4['filter'].add_chain('sg-fallback')
- self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
- self.iptables.ipv6['filter'].add_chain('sg-fallback')
- self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
-
- def setup_basic_filtering(self, instance, network_info=None):
- """Set up provider rules and basic NWFilter."""
- if not network_info:
- network_info = _get_network_info(instance)
- self.nwfilter.setup_basic_filtering(instance, network_info)
- if not self.basicly_filtered:
- LOG.debug("Setup Basic Filtering")
- self.refresh_provider_fw_rules()
- self.basicly_filtered = True
-
- def apply_instance_filter(self, instance):
- """No-op. Everything is done in prepare_instance_filter"""
- pass
-
- def unfilter_instance(self, instance):
- if self.instances.pop(instance['id'], None):
- self.remove_filters_for_instance(instance)
- self.iptables.apply()
- else:
- LOG.info(_('Attempted to unfilter instance %s which is not '
- 'filtered'), instance['id'])
-
- def prepare_instance_filter(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- self.instances[instance['id']] = instance
- self.add_filters_for_instance(instance, network_info)
- self.iptables.apply()
-
- def add_filters_for_instance(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].add_chain(chain_name)
- self.iptables.ipv4['filter'].empty_chain(chain_name)
-
- ips_v4 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ips']]
-
- for ipv4_address in ips_v4:
- self.iptables.ipv4['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv4_address, chain_name))
-
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain(chain_name)
- self.iptables.ipv6['filter'].empty_chain(chain_name)
- ips_v6 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ip6s']]
-
- for ipv6_address in ips_v6:
- self.iptables.ipv6['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv6_address,
- chain_name))
-
- ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
- for rule in ipv4_rules:
- self.iptables.ipv4['filter'].add_rule(chain_name, rule)
-
- if FLAGS.use_ipv6:
- for rule in ipv6_rules:
- self.iptables.ipv6['filter'].add_rule(chain_name, rule)
-
- def remove_filters_for_instance(self, instance):
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].remove_chain(chain_name)
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].remove_chain(chain_name)
-
- def instance_rules(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- ctxt = context.get_admin_context()
-
- ipv4_rules = []
- ipv6_rules = []
-
- # Always drop invalid packets
- ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
- ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
-
- # Pass through provider-wide drops
- ipv4_rules += ['-j $provider']
- ipv6_rules += ['-j $provider']
-
- # Allow established connections
- ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
- ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
-
- dhcp_servers = [network['gateway'] for (network, _m) in network_info]
-
- for dhcp_server in dhcp_servers:
- ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
- '-j ACCEPT' % (dhcp_server,))
-
- #Allow project network traffic
- if FLAGS.allow_project_net_traffic:
- cidrs = [network['cidr'] for (network, _m) in network_info]
- for cidr in cidrs:
- ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
-
- # We wrap these in FLAGS.use_ipv6 because they might cause
- # a DB lookup. The other ones are just list operations, so
- # they're not worth the clutter.
- if FLAGS.use_ipv6:
- # Allow RA responses
- gateways_v6 = [network['gateway_v6'] for (network, _m) in
- network_info]
- for gateway_v6 in gateways_v6:
- ipv6_rules.append(
- '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
-
- #Allow project network traffic
- if FLAGS.allow_project_net_traffic:
- cidrv6s = [network['cidr_v6'] for (network, _m)
- in network_info]
-
- for cidrv6 in cidrv6s:
- ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
-
- security_groups = db.security_group_get_by_instance(ctxt,
- instance['id'])
-
- # then, security group chains and rules
- for security_group in security_groups:
- rules = db.security_group_rule_get_by_security_group(ctxt,
- security_group['id'])
-
- for rule in rules:
- LOG.debug(_("Adding security group rule: %r"), rule)
-
- if not rule.cidr:
- # Eventually, a mechanism to grant access for security
- # groups will turn up here. It'll use ipsets.
- continue
-
- version = _get_ip_version(rule.cidr)
- if version == 4:
- fw_rules = ipv4_rules
- else:
- fw_rules = ipv6_rules
-
- protocol = rule.protocol
- if version == 6 and rule.protocol == 'icmp':
- protocol = 'icmpv6'
-
- args = ['-p', protocol, '-s', rule.cidr]
-
- if rule.protocol in ['udp', 'tcp']:
- if rule.from_port == rule.to_port:
- args += ['--dport', '%s' % (rule.from_port,)]
- else:
- args += ['-m', 'multiport',
- '--dports', '%s:%s' % (rule.from_port,
- rule.to_port)]
- elif rule.protocol == 'icmp':
- icmp_type = rule.from_port
- icmp_code = rule.to_port
-
- if icmp_type == -1:
- icmp_type_arg = None
- else:
- icmp_type_arg = '%s' % icmp_type
- if not icmp_code == -1:
- icmp_type_arg += '/%s' % icmp_code
-
- if icmp_type_arg:
- if version == 4:
- args += ['-m', 'icmp', '--icmp-type',
- icmp_type_arg]
- elif version == 6:
- args += ['-m', 'icmp6', '--icmpv6-type',
- icmp_type_arg]
-
- args += ['-j ACCEPT']
- fw_rules += [' '.join(args)]
-
- ipv4_rules += ['-j $sg-fallback']
- ipv6_rules += ['-j $sg-fallback']
-
- return ipv4_rules, ipv6_rules
-
- def refresh_security_group_members(self, security_group):
- pass
-
- def refresh_security_group_rules(self, security_group):
- self.do_refresh_security_group_rules(security_group)
- self.iptables.apply()
-
- @utils.synchronized('iptables', external=True)
- def do_refresh_security_group_rules(self, security_group):
- for instance in self.instances.values():
- self.remove_filters_for_instance(instance)
- self.add_filters_for_instance(instance)
-
- def refresh_provider_fw_rules(self):
- """See class:FirewallDriver: docs."""
- self._do_refresh_provider_fw_rules()
- self.iptables.apply()
-
- @utils.synchronized('iptables', external=True)
- def _do_refresh_provider_fw_rules(self):
- """Internal, synchronized version of refresh_provider_fw_rules."""
- self._purge_provider_fw_rules()
- self._build_provider_fw_rules()
-
- def _purge_provider_fw_rules(self):
- """Remove all rules from the provider chains."""
- self.iptables.ipv4['filter'].empty_chain('provider')
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].empty_chain('provider')
-
- def _build_provider_fw_rules(self):
- """Create all rules for the provider IP DROPs."""
- self.iptables.ipv4['filter'].add_chain('provider')
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain('provider')
- ipv4_rules, ipv6_rules = self._provider_rules()
- for rule in ipv4_rules:
- self.iptables.ipv4['filter'].add_rule('provider', rule)
-
- if FLAGS.use_ipv6:
- for rule in ipv6_rules:
- self.iptables.ipv6['filter'].add_rule('provider', rule)
-
- def _provider_rules(self):
- """Generate a list of rules from provider for IP4 & IP6."""
- ctxt = context.get_admin_context()
- ipv4_rules = []
- ipv6_rules = []
- rules = db.provider_fw_rule_get_all(ctxt)
- for rule in rules:
- LOG.debug(_('Adding prvider rule: %r'), rule)
-
- if not rule.cidr:
- # Eventually, a mechanism to grant access for security
- # groups will turn up here. It'll use ipsets.
- continue
-
- version = _get_ip_version(rule.cidr)
- if version == 4:
- fw_rules = ipv4_rules
- else:
- fw_rules = ipv6_rules
-
- protocol = rule.protocol
- if version == 6 and protocol == 'icmp':
- protocol = 'icmpv6'
-
- args = ['-p', protocol, '-s', rule.cidr]
-
- if protocol in ['udp', 'tcp']:
- if rule.from_port == rule.to_port:
- args += ['--dport', '%s' % (rule.from_port,)]
- else:
- args += ['-m', 'multiport',
- '--dports', '%s:%s' % (rule.from_port,
- rule.to_port)]
- elif protocol == 'icmp':
- icmp_type = rule.from_port
- icmp_code = rule.to_port
-
- if icmp_type == -1:
- icmp_type_arg = None
- else:
- icmp_type_arg = '%s' % icmp_type
- if not icmp_code == -1:
- icmp_type_arg += '/%s' % icmp_code
-
- if icmp_type_arg:
- if version == 4:
- args += ['-m', 'icmp', '--icmp-type',
- icmp_type_arg]
- elif version == 6:
- args += ['-m', 'icmp6', '--icmpv6-type',
- icmp_type_arg]
- args += ['-j DROP']
- fw_rules += [' '.join(args)]
- return ipv4_rules, ipv6_rules
-
- def _security_group_chain_name(self, security_group_id):
- return 'nova-sg-%s' % (security_group_id,)
-
- def _instance_chain_name(self, instance):
- return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
new file mode 100644
index 000000000..fd4c120a6
--- /dev/null
+++ b/nova/virt/libvirt/firewall.py
@@ -0,0 +1,798 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from eventlet import tpool
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.libvirt import netutils
+
+
+LOG = logging.getLogger("nova.virt.libvirt.firewall")
+FLAGS = flags.FLAGS
+
+
+try:
+ import libvirt
+except ImportError:
+ LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will "
+ "not work correctly."))
+
+
+class FirewallDriver(object):
+ def prepare_instance_filter(self, instance, network_info=None):
+ """Prepare filters for the instance.
+
+ At this point, the instance isn't running yet."""
+ raise NotImplementedError()
+
+ def unfilter_instance(self, instance):
+ """Stop filtering instance"""
+ raise NotImplementedError()
+
+ def apply_instance_filter(self, instance):
+ """Apply instance filter.
+
+ Once this method returns, the instance should be firewalled
+ appropriately. This method should as far as possible be a
+ no-op. It's vastly preferred to get everything set up in
+ prepare_instance_filter.
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self,
+ security_group_id,
+ network_info=None):
+ """Refresh security group rules from data store
+
+ Gets called when a rule has been added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ """Refresh security group members from data store
+
+ Gets called when an instance gets added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_provider_fw_rules(self):
+ """Refresh common rules for all hosts/instances from data store.
+
+ Gets called when a rule has been added to or removed from
+ the list of rules (via admin api).
+
+ """
+ raise NotImplementedError()
+
+ def setup_basic_filtering(self, instance, network_info=None):
+ """Create rules to block spoofing and allow dhcp.
+
+ This gets called when spawning an instance, before
+ :method:`prepare_instance_filter`.
+
+ """
+ raise NotImplementedError()
+
+ def instance_filter_exists(self, instance):
+ """Check nova-instance-instance-xxx exists"""
+ raise NotImplementedError()
+
+
+class NWFilterFirewall(FirewallDriver):
+ """
+ This class implements a network filtering mechanism versatile
+ enough for EC2 style Security Group filtering by leveraging
+ libvirt's nwfilter.
+
+ First, all instances get a filter ("nova-base-filter") applied.
+ This filter provides some basic security such as protection against
+ MAC spoofing, IP spoofing, and ARP spoofing.
+
+ This filter drops all incoming ipv4 and ipv6 connections.
+ Outgoing connections are never blocked.
+
+ Second, every security group maps to a nwfilter filter(*).
+ NWFilters can be updated at runtime and changes are applied
+ immediately, so changes to security groups can be applied at
+ runtime (as mandated by the spec).
+
+ Security group rules are named "nova-secgroup-<id>" where <id>
+ is the internal id of the security group. They're applied only on
+ hosts that have instances in the security group in question.
+
+ Updates to security groups are done by updating the data model
+ (in response to API calls) followed by a request sent to all
+ the nodes with instances in the security group to refresh the
+ security group.
+
+ Each instance has its own NWFilter, which references the above
+ mentioned security group NWFilters. This was done because
+ interfaces can only reference one filter while filters can
+ reference multiple other filters. This has the added benefit of
+ actually being able to add and remove security groups from an
+ instance at run time. This functionality is not exposed anywhere,
+ though.
+
+ Outstanding questions:
+
+ The name is unique, so would there be any good reason to sync
+ the uuid across the nodes (by assigning it from the datamodel)?
+
+
+ (*) This sentence brought to you by the redundancy department of
+ redundancy.
+
+ """
+
+ def __init__(self, get_connection, **kwargs):
+ self._libvirt_get_connection = get_connection
+ self.static_filters_configured = False
+ self.handle_security_groups = False
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def _get_connection(self):
+ return self._libvirt_get_connection()
+ _conn = property(_get_connection)
+
+ def nova_dhcp_filter(self):
+ """The standard allow-dhcp-server filter is an <ip> one, so it uses
+ ebtables to allow traffic through. Without a corresponding rule in
+ iptables, it'll get blocked anyway."""
+
+ return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in'
+ priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def nova_ra_filter(self):
+ return '''<filter name='nova-allow-ra-server' chain='root'>
+ <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
+ <rule action='accept' direction='inout'
+ priority='100'>
+ <icmpv6 srcipaddr='$RASERVER'/>
+ </rule>
+ </filter>'''
+
+ def setup_basic_filtering(self, instance, network_info=None):
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ logging.info('called setup_basic_filtering in nwfilter')
+
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+
+ if self.handle_security_groups:
+ # No point in setting up a filter set that we'll be overriding
+ # anyway.
+ return
+
+ logging.info('ensuring static filters')
+ self._ensure_static_filters()
+
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ self._define_filter(self._filter_container(instance_filter_name,
+ [base_filter]))
+
+ def _ensure_static_filters(self):
+ """Static filters are filters that have no need to be IP aware.
+
+ There is no configuration or tuneability of these filters, so they
+ can be set up once and forgotten about.
+
+ """
+
+ if self.static_filters_configured:
+ return
+
+ self._define_filter(self._filter_container('nova-base',
+ ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']))
+ self._define_filter(self._filter_container('nova-vpn',
+ ['allow-dhcp-server']))
+ self._define_filter(self.nova_base_ipv4_filter)
+ self._define_filter(self.nova_base_ipv6_filter)
+ self._define_filter(self.nova_dhcp_filter)
+ self._define_filter(self.nova_ra_filter)
+ if FLAGS.allow_project_net_traffic:
+ self._define_filter(self.nova_project_filter)
+ if FLAGS.use_ipv6:
+ self._define_filter(self.nova_project_filter_v6)
+
+ self.static_filters_configured = True
+
+ def _filter_container(self, name, filters):
+ xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
+ name,
+ ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
+ return xml
+
+ def nova_base_ipv4_filter(self):
+ retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ for direction, action, priority in [('out', 'accept', 399),
+ ('in', 'drop', 400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+ def nova_base_ipv6_filter(self):
+ retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
+ for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
+ for direction, action, priority in [('out', 'accept', 399),
+ ('in', 'drop', 400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+ def nova_project_filter(self):
+ retval = "<filter name='nova-project' chain='ipv4'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ retval += """<rule action='accept' direction='in' priority='200'>
+ <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
+ </rule>""" % protocol
+ retval += '</filter>'
+ return retval
+
+ def nova_project_filter_v6(self):
+ retval = "<filter name='nova-project-v6' chain='ipv6'>"
+ for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
+ retval += """<rule action='accept' direction='inout'
+ priority='200'>
+ <%s srcipaddr='$PROJNETV6'
+ srcipmask='$PROJMASKV6' />
+ </rule>""" % (protocol)
+ retval += '</filter>'
+ return retval
+
+ def _define_filter(self, xml):
+ if callable(xml):
+ xml = xml()
+ # execute in a native thread and block current greenthread until done
+ tpool.execute(self._conn.nwfilterDefineXML, xml)
+
+ def unfilter_instance(self, instance):
+ # Nothing to do
+ pass
+
+ def prepare_instance_filter(self, instance, network_info=None):
+ """Creates an NWFilter for the given instance.
+
+ In the process, it makes sure the filters for the provider blocks,
+ security groups, and base filter are all in place.
+
+ """
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+
+ self.refresh_provider_fw_rules()
+
+ ctxt = context.get_admin_context()
+
+ instance_secgroup_filter_name = \
+ '%s-secgroup' % (self._instance_filter_name(instance))
+
+ instance_secgroup_filter_children = ['nova-base-ipv4',
+ 'nova-base-ipv6',
+ 'nova-allow-dhcp-server']
+
+ if FLAGS.use_ipv6:
+ networks = [network for (network, _m) in network_info if
+ network['gateway_v6']]
+
+ if networks:
+ instance_secgroup_filter_children.\
+ append('nova-allow-ra-server')
+
+ for security_group in \
+ db.security_group_get_by_instance(ctxt, instance['id']):
+
+ self.refresh_security_group_rules(security_group['id'])
+
+ instance_secgroup_filter_children.append('nova-secgroup-%s' %
+ security_group['id'])
+
+ self._define_filter(
+ self._filter_container(instance_secgroup_filter_name,
+ instance_secgroup_filter_children))
+
+ network_filters = self.\
+ _create_network_filters(instance, network_info,
+ instance_secgroup_filter_name)
+
+ for (name, children) in network_filters:
+ self._define_filters(name, children)
+
+ def _create_network_filters(self, instance, network_info,
+ instance_secgroup_filter_name):
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ result = []
+ for (_n, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = [base_filter, 'nova-provider-rules',
+ instance_secgroup_filter_name]
+
+ if FLAGS.allow_project_net_traffic:
+ instance_filter_children.append('nova-project')
+ if FLAGS.use_ipv6:
+ instance_filter_children.append('nova-project-v6')
+
+ result.append((instance_filter_name, instance_filter_children))
+
+ return result
+
+ def _define_filters(self, filter_name, filter_children):
+ self._define_fitler(self._filter_container(filter_name,
+ filter_children))
+
+ def refresh_security_group_rules(self,
+ security_group_id,
+ network_info=None):
+ return self._define_filter(
+ self.security_group_to_nwfilter_xml(security_group_id))
+
+ def refresh_provider_fw_rules(self):
+ """Update rules for all instances.
+
+ This is part of the FirewallDriver API and is called when the
+ provider firewall rules change in the database. In the
+ `prepare_instance_filter` we add a reference to the
+ 'nova-provider-rules' filter for each instance's firewall, and
+ by changing that filter we update them all.
+
+ """
+ xml = self.provider_fw_to_nwfilter_xml()
+ return self._define_filter(xml)
+
+ def security_group_to_nwfilter_xml(self, security_group_id):
+ security_group = db.security_group_get(context.get_admin_context(),
+ security_group_id)
+ rule_xml = ""
+ v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
+ for rule in security_group.rules:
+ rule_xml += "<rule action='accept' direction='in' priority='300'>"
+ if rule.cidr:
+ version = netutils.get_ip_version(rule.cidr)
+ if(FLAGS.use_ipv6 and version == 6):
+ net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (v6protocol[rule.protocol], net, prefixlen)
+ else:
+ net, mask = netutils.get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = "<filter name='nova-secgroup-%s' " % security_group_id
+ if(FLAGS.use_ipv6):
+ xml += "chain='root'>%s</filter>" % rule_xml
+ else:
+ xml += "chain='ipv4'>%s</filter>" % rule_xml
+ return xml
+
+ def provider_fw_to_nwfilter_xml(self):
+ """Compose a filter of drop rules from specified cidrs."""
+ rule_xml = ""
+ v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
+ rules = db.provider_fw_rule_get_all(context.get_admin_context())
+ for rule in rules:
+ rule_xml += "<rule action='block' direction='in' priority='150'>"
+ version = netutils.get_ip_version(rule.cidr)
+ if(FLAGS.use_ipv6 and version == 6):
+ net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (v6protocol[rule.protocol], net, prefixlen)
+ else:
+ net, mask = netutils.get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = "<filter name='nova-provider-rules' "
+ if(FLAGS.use_ipv6):
+ xml += "chain='root'>%s</filter>" % rule_xml
+ else:
+ xml += "chain='ipv4'>%s</filter>" % rule_xml
+ return xml
+
+ def _instance_filter_name(self, instance, nic_id=None):
+ if not nic_id:
+ return 'nova-instance-%s' % (instance['name'])
+ return 'nova-instance-%s-%s' % (instance['name'], nic_id)
+
+ def instance_filter_exists(self, instance):
+ """Check nova-instance-instance-xxx exists"""
+ network_info = netutils.get_network_info(instance)
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ try:
+ self._conn.nwfilterLookupByName(instance_filter_name)
+ except libvirt.libvirtError:
+ name = instance.name
+ LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
+ '%(name)s is not found.') % locals())
+ return False
+ return True
+
+
+class IptablesFirewallDriver(FirewallDriver):
+ def __init__(self, execute=None, **kwargs):
+ from nova.network import linux_net
+ self.iptables = linux_net.iptables_manager
+ self.instances = {}
+ self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+ self.basicly_filtered = False
+
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
+ def setup_basic_filtering(self, instance, network_info=None):
+ """Set up provider rules and basic NWFilter."""
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ self.nwfilter.setup_basic_filtering(instance, network_info)
+ if not self.basicly_filtered:
+ LOG.debug(_("iptables firewall: Setup Basic Filtering"))
+ self.refresh_provider_fw_rules()
+ self.basicly_filtered = True
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def unfilter_instance(self, instance):
+ if self.instances.pop(instance['id'], None):
+ self.remove_filters_for_instance(instance)
+ self.iptables.apply()
+ else:
+ LOG.info(_('Attempted to unfilter instance %s which is not '
+ 'filtered'), instance['id'])
+
+ def prepare_instance_filter(self, instance, network_info=None):
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ self.instances[instance['id']] = instance
+ self.add_filters_for_instance(instance, network_info)
+ self.iptables.apply()
+
+ def _create_filter(self, ips, chain_name):
+ return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
+
+ def _filters_for_instance(self, chain_name, network_info):
+ ips_v4 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ips']]
+ ipv4_rules = self._create_filter(ips_v4, chain_name)
+
+ ipv6_rules = []
+ if FLAGS.use_ipv6:
+ ips_v6 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ip6s']]
+ ipv6_rules = self._create_filter(ips_v6, chain_name)
+
+ return ipv4_rules, ipv6_rules
+
+ def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule(chain_name, rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+
+ def add_filters_for_instance(self, instance, network_info=None):
+ chain_name = self._instance_chain_name(instance)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
+ network_info)
+ self._add_filters('local', ipv4_rules, ipv6_rules)
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ self._add_filters(chain_name, ipv4_rules, ipv6_rules)
+
+ def remove_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].remove_chain(chain_name)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].remove_chain(chain_name)
+
+ def instance_rules(self, instance, network_info=None):
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ ctxt = context.get_admin_context()
+
+ ipv4_rules = []
+ ipv6_rules = []
+
+ # Always drop invalid packets
+ ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
+ ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
+
+ # Pass through provider-wide drops
+ ipv4_rules += ['-j $provider']
+ ipv6_rules += ['-j $provider']
+
+ # Allow established connections
+ ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+
+ dhcp_servers = [network['gateway'] for (network, _m) in network_info]
+
+ for dhcp_server in dhcp_servers:
+ ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT' % (dhcp_server,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrs = [network['cidr'] for (network, _m) in network_info]
+ for cidr in cidrs:
+ ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
+
+ # We wrap these in FLAGS.use_ipv6 because they might cause
+ # a DB lookup. The other ones are just list operations, so
+ # they're not worth the clutter.
+ if FLAGS.use_ipv6:
+ # Allow RA responses
+ gateways_v6 = [network['gateway_v6'] for (network, _m) in
+ network_info]
+ for gateway_v6 in gateways_v6:
+ ipv6_rules.append(
+ '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrv6s = [network['cidr_v6'] for (network, _m)
+ in network_info]
+
+ for cidrv6 in cidrv6s:
+ ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
+
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance['id'])
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ rules = db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
+
+ for rule in rules:
+ LOG.debug(_("Adding security group rule: %r"), rule)
+
+ if not rule.cidr:
+ # Eventually, a mechanism to grant access for security
+ # groups will turn up here. It'll use ipsets.
+ continue
+
+ version = netutils.get_ip_version(rule.cidr)
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
+
+ protocol = rule.protocol
+ if version == 6 and rule.protocol == 'icmp':
+ protocol = 'icmpv6'
+
+ args = ['-p', protocol, '-s', rule.cidr]
+
+ if rule.protocol in ['udp', 'tcp']:
+ if rule.from_port == rule.to_port:
+ args += ['--dport', '%s' % (rule.from_port,)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+ elif rule.protocol == 'icmp':
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ if version == 4:
+ args += ['-m', 'icmp', '--icmp-type',
+ icmp_type_arg]
+ elif version == 6:
+ args += ['-m', 'icmp6', '--icmpv6-type',
+ icmp_type_arg]
+
+ args += ['-j ACCEPT']
+ fw_rules += [' '.join(args)]
+
+ ipv4_rules += ['-j $sg-fallback']
+ ipv6_rules += ['-j $sg-fallback']
+
+ return ipv4_rules, ipv6_rules
+
+ def instance_filter_exists(self, instance):
+ """Check nova-instance-instance-xxx exists"""
+ return self.nwfilter.instance_filter_exists(instance)
+
+ def refresh_security_group_members(self, security_group):
+ pass
+
+ def refresh_security_group_rules(self, security_group, network_info=None):
+ self.do_refresh_security_group_rules(security_group, network_info)
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def do_refresh_security_group_rules(self,
+ security_group,
+ network_info=None):
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ self.add_filters_for_instance(instance, network_info)
+
+ def refresh_provider_fw_rules(self):
+ """See class:FirewallDriver: docs."""
+ self._do_refresh_provider_fw_rules()
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def _do_refresh_provider_fw_rules(self):
+ """Internal, synchronized version of refresh_provider_fw_rules."""
+ self._purge_provider_fw_rules()
+ self._build_provider_fw_rules()
+
+ def _purge_provider_fw_rules(self):
+ """Remove all rules from the provider chains."""
+ self.iptables.ipv4['filter'].empty_chain('provider')
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].empty_chain('provider')
+
+ def _build_provider_fw_rules(self):
+ """Create all rules for the provider IP DROPs."""
+ self.iptables.ipv4['filter'].add_chain('provider')
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain('provider')
+ ipv4_rules, ipv6_rules = self._provider_rules()
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule('provider', rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule('provider', rule)
+
+ def _provider_rules(self):
+ """Generate a list of rules from provider for IP4 & IP6."""
+ ctxt = context.get_admin_context()
+ ipv4_rules = []
+ ipv6_rules = []
+ rules = db.provider_fw_rule_get_all(ctxt)
+ for rule in rules:
+ LOG.debug(_('Adding prvider rule: %r'), rule)
+ version = netutils.get_ip_version(rule.cidr)
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
+
+ protocol = rule.protocol
+ if version == 6 and protocol == 'icmp':
+ protocol = 'icmpv6'
+
+ args = ['-p', protocol, '-s', rule.cidr]
+
+ if protocol in ['udp', 'tcp']:
+ if rule.from_port == rule.to_port:
+ args += ['--dport', '%s' % (rule.from_port,)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+ elif protocol == 'icmp':
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ if version == 4:
+ args += ['-m', 'icmp', '--icmp-type',
+ icmp_type_arg]
+ elif version == 6:
+ args += ['-m', 'icmp6', '--icmpv6-type',
+ icmp_type_arg]
+ args += ['-j DROP']
+ fw_rules += [' '.join(args)]
+ return ipv4_rules, ipv6_rules
+
+ def _security_group_chain_name(self, security_group_id):
+ return 'nova-sg-%s' % (security_group_id,)
+
+ def _instance_chain_name(self, instance):
+ return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py
new file mode 100644
index 000000000..4d596078a
--- /dev/null
+++ b/nova/virt/libvirt/netutils.py
@@ -0,0 +1,97 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""Network-releated utilities for supporting libvirt connection code."""
+
+
+import IPy
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import ipv6
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+
+
+def get_net_and_mask(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.netmask())
+
+
+def get_net_and_prefixlen(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.prefixlen())
+
+
+def get_ip_version(cidr):
+ net = IPy.IP(cidr)
+ return int(net.version())
+
+
+def get_network_info(instance):
+ # TODO(adiantum) If we will keep this function
+ # we should cache network_info
+ admin_context = context.get_admin_context()
+
+ ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
+ instance['id'])
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ flavor = db.instance_type_get_by_id(admin_context,
+ instance['instance_type_id'])
+ network_info = []
+
+ for network in networks:
+ network_ips = [ip for ip in ip_addresses
+ if ip['network_id'] == network['id']]
+
+ def ip_dict(ip):
+ return {
+ 'ip': ip['address'],
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
+
+ def ip6_dict():
+ prefix = network['cidr_v6']
+ mac = instance['mac_address']
+ project_id = instance['project_id']
+ return {
+ 'ip': ipv6.to_global(prefix, mac, project_id),
+ 'netmask': network['netmask_v6'],
+ 'enabled': '1'}
+
+ mapping = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
+ 'mac': instance['mac_address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_ips]}
+
+ if FLAGS.use_ipv6:
+ mapping['ip6s'] = [ip6_dict()]
+ mapping['gateway6'] = network['gateway_v6']
+
+ network_info.append((network, mapping))
+ return network_info
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 4bb467fa9..7370684bd 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -387,12 +387,11 @@ def _add_file(file_path):
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
- raise exception.NotFound(_("File- '%s' is not there in the "
- "datastore") % file_path)
+ raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
@@ -579,7 +578,7 @@ class FakeVim(object):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
task_mdo = create_task(method, "success")
@@ -591,7 +590,7 @@ class FakeVim(object):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 159e16a80..0cbdba363 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -43,6 +43,7 @@ flags.DEFINE_string('vmwareapi_wsdl_loc',
if suds:
+
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index cf6c88bbd..c3e79a92f 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -100,8 +100,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref:
- raise exception.Duplicate(_("Attempted to create a VM with a name"
- " %s, but that already exists on the host") % instance.name)
+ raise exception.InstanceExists(name=instance.name)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -116,8 +115,7 @@ class VMWareVMOps(object):
network_utils.get_network_with_the_name(self._session,
net_name)
if network_ref is None:
- raise exception.NotFound(_("Network with the name '%s' doesn't"
- " exist on the ESX host") % net_name)
+ raise exception.NetworkNotFoundForBridge(bridge=net_name)
_check_if_network_bridge_exists()
@@ -337,8 +335,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -388,8 +385,7 @@ class VMWareVMOps(object):
"VirtualMachine",
"datastore")
if not ds_ref_ret:
- raise exception.NotFound(_("Failed to get the datastore "
- "reference(s) which the VM uses"))
+ raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
@@ -480,8 +476,7 @@ class VMWareVMOps(object):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -501,8 +496,8 @@ class VMWareVMOps(object):
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
- raise exception.Invalid(_("instance - %s not poweredOn. So can't "
- "be rebooted.") % instance.name)
+ reason = _("instance is not powered on")
+ raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
@@ -605,8 +600,7 @@ class VMWareVMOps(object):
"""Suspend the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -620,8 +614,9 @@ class VMWareVMOps(object):
LOG.debug(_("Suspended the VM %s ") % instance.name)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- raise exception.Invalid(_("instance - %s is poweredOff and hence "
- " can't be suspended.") % instance.name)
+ reason = _("instance is powered off and can not be suspended.")
+ raise exception.InstanceSuspendFailure(reason=reason)
+
LOG.debug(_("VM %s was already in suspended state. So returning "
"without doing anything") % instance.name)
@@ -629,8 +624,7 @@ class VMWareVMOps(object):
"""Resume the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -643,15 +637,14 @@ class VMWareVMOps(object):
self._wait_with_callback(instance.id, suspend_task, callback)
LOG.debug(_("Resumed the VM %s ") % instance.name)
else:
- raise exception.Invalid(_("instance - %s not in Suspended state "
- "and hence can't be Resumed.") % instance.name)
+ reason = _("instance is not in a suspended state")
+ raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance_name):
"""Return data about the VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance_name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
@@ -687,8 +680,7 @@ class VMWareVMOps(object):
"""Return snapshot of console."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -716,8 +708,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
mac_addr = instance.mac_address
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 20c1b2b45..1c6d2572d 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -42,6 +42,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
@@ -104,11 +105,12 @@ def get_connection(_):
api_retry_count)
-class VMWareESXConnection(object):
+class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
+ super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = VMWareVMOps(session)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4434dbf0b..e36ef3288 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -294,7 +294,7 @@ class Failure(Exception):
def __str__(self):
try:
return str(self.details)
- except Exception, exc:
+ except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index d07d60800..9f6cd608c 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -28,10 +28,7 @@ import urllib
import uuid
from xml.dom import minidom
-from eventlet import event
import glance.client
-from nova import context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -49,6 +46,10 @@ LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
FLAGS = flags.FLAGS
flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
+flags.DEFINE_integer('block_device_creation_timeout', 10,
+ 'time to wait for a block device to be created')
+flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024,
+ 'maximum size in bytes of kernel or ramdisk images')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -101,8 +102,8 @@ class VMHelper(HelperBase):
3. Using hardware virtualization
"""
- instance_type = instance_types.\
- get_instance_type(instance.instance_type)
+ inst_type_id = instance.instance_type_id
+ instance_type = instance_types.get_instance_type(inst_type_id)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
@@ -169,8 +170,8 @@ class VMHelper(HelperBase):
@classmethod
def ensure_free_mem(cls, session, instance):
- instance_type = instance_types.get_instance_type(
- instance.instance_type)
+ inst_type_id = instance.instance_type_id
+ instance_type = instance_types.get_instance_type(inst_type_id)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
@@ -304,7 +305,6 @@ class VMHelper(HelperBase):
% locals())
vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
- vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
@@ -446,6 +446,12 @@ class VMHelper(HelperBase):
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
+ elif image_type == ImageType.KERNEL_RAMDISK and \
+ vdi_size > FLAGS.max_kernel_ramdisk_size:
+ max_size = FLAGS.max_kernel_ramdisk_size
+ raise exception.Error(
+ _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
+ "max %(max_size)d bytes") % locals())
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
@@ -508,9 +514,7 @@ class VMHelper(HelperBase):
try:
return glance_disk_format2nova_type[disk_format]
except KeyError:
- raise exception.NotFound(
- _("Unrecognized disk_format '%(disk_format)s'")
- % locals())
+ raise exception.InvalidDiskFormat(disk_format=disk_format)
def determine_from_instance():
if instance.kernel_id:
@@ -645,8 +649,7 @@ class VMHelper(HelperBase):
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') %
- name_label)
+ raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
@@ -753,14 +756,14 @@ class VMHelper(HelperBase):
session.call_xenapi('SR.scan', sr_ref)
-def get_rrd(host, uuid):
+def get_rrd(host, vm_uuid):
"""Return the VM RRD XML as a string"""
try:
xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
FLAGS.xenapi_connection_username,
FLAGS.xenapi_connection_password,
host,
- uuid))
+ vm_uuid))
return xml.read()
except IOError:
return None
@@ -855,7 +858,7 @@ def safe_find_sr(session):
"""
sr_ref = find_sr(session)
if sr_ref is None:
- raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ raise exception.StorageRepositoryNotFound()
return sr_ref
@@ -896,6 +899,16 @@ def remap_vbd_dev(dev):
return remapped_dev
+def _wait_for_device(dev):
+ """Wait for device node to appear"""
+ for i in xrange(0, FLAGS.block_device_creation_timeout):
+ if os.path.exists('/dev/%s' % dev):
+ return
+ time.sleep(1)
+
+ raise StorageError(_('Timeout waiting for device %s to be created') % dev)
+
+
def with_vdi_attached_here(session, vdi_ref, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
@@ -924,6 +937,11 @@ def with_vdi_attached_here(session, vdi_ref, read_only, f):
if dev != orig_dev:
LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s') % locals())
+ if dev != 'autodetect':
+ # NOTE(johannes): Unit tests will end up with a device called
+ # 'autodetect' which obviously won't exist. It's not ideal,
+ # but the alternatives were much messier
+ _wait_for_device(dev)
return f(dev)
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
@@ -1003,7 +1021,6 @@ def _stream_disk(dev, image_type, virtual_size, image_file):
def _write_partition(virtual_size, dev):
dest = '/dev/%s' % dev
- mbr_last = MBR_SIZE_SECTORS - 1
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
@@ -1130,7 +1147,7 @@ def _prepare_injectables(inst, networks_info):
'dns': dns,
'address_v6': ip_v6 and ip_v6['ip'] or '',
'netmask_v6': ip_v6 and ip_v6['netmask'] or '',
- 'gateway_v6': ip_v6 and ip_v6['gateway'] or '',
+ 'gateway_v6': ip_v6 and info['gateway6'] or '',
'use_ipv6': FLAGS.use_ipv6}
interfaces_info.append(interface_info)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index c96c35a6e..be6ef48ea 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -25,15 +25,15 @@ import M2Crypto
import os
import pickle
import subprocess
-import tempfile
import uuid
-from nova import db
from nova import context
-from nova import log as logging
+from nova import db
from nova import exception
-from nova import utils
from nova import flags
+from nova import ipv6
+from nova import log as logging
+from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import power_state
@@ -127,8 +127,7 @@ class VMOps(object):
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is not None:
- raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance_name)
+ raise exception.InstanceExists(name=instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
@@ -176,7 +175,7 @@ class VMOps(object):
vdi_ref, network_info)
self.create_vifs(vm_ref, network_info)
- self.inject_network_info(instance, vm_ref, network_info)
+ self.inject_network_info(instance, network_info, vm_ref)
return vm_ref
def _spawn(self, instance, vm_ref):
@@ -203,6 +202,13 @@ class VMOps(object):
for path, contents in instance.injected_files:
LOG.debug(_("Injecting file path: '%s'") % path)
self.inject_file(instance, path, contents)
+
+ def _set_admin_password():
+ admin_password = instance.admin_pass
+ if admin_password:
+ LOG.debug(_("Setting admin password"))
+ self.set_admin_password(instance, admin_password)
+
# NOTE(armando): Do we really need to do this in virt?
# NOTE(tr3buchet): not sure but wherever we do it, we need to call
# reset_network afterwards
@@ -211,20 +217,15 @@ class VMOps(object):
def _wait_for_boot():
try:
state = self.get_info(instance_name)['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
_inject_files()
+ _set_admin_password()
return True
except Exception, exc:
LOG.warn(exc)
- LOG.exception(_('instance %s: failed to boot'),
- instance_name)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
+ LOG.exception(_('Instance %s: failed to boot'), instance_name)
timer.stop()
return False
@@ -260,8 +261,8 @@ class VMOps(object):
instance_name = instance_or_vm.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(
- _('Instance not present %s') % instance_name)
+ raise exception.NotFound(_("No opaque_ref could be determined "
+ "for '%s'.") % instance_or_vm)
return vm_ref
def _acquire_bootlock(self, vm):
@@ -387,7 +388,6 @@ class VMOps(object):
def link_disks(self, instance, base_copy_uuid, cow_uuid):
"""Links the base copy VHD to the COW via the XAPI plugin."""
- vm_ref = VMHelper.lookup(self._session, instance.name)
new_base_copy_uuid = str(uuid.uuid4())
new_cow_uuid = str(uuid.uuid4())
params = {'instance_id': instance.id,
@@ -437,11 +437,12 @@ class VMOps(object):
"""
# Need to uniquely identify this request.
- transaction_id = str(uuid.uuid4())
+ key_init_transaction_id = str(uuid.uuid4())
# The simple Diffie-Hellman class is used to manage key exchange.
dh = SimpleDH()
- args = {'id': transaction_id, 'pub': str(dh.get_public())}
- resp = self._make_agent_call('key_init', instance, '', args)
+ key_init_args = {'id': key_init_transaction_id,
+ 'pub': str(dh.get_public())}
+ resp = self._make_agent_call('key_init', instance, '', key_init_args)
if resp is None:
# No response from the agent
return
@@ -455,8 +456,9 @@ class VMOps(object):
dh.compute_shared(agent_pub)
enc_pass = dh.encrypt(new_pass)
# Send the encrypted password
- args['enc_pass'] = enc_pass
- resp = self._make_agent_call('password', instance, '', args)
+ password_transaction_id = str(uuid.uuid4())
+ password_args = {'id': password_transaction_id, 'enc_pass': enc_pass}
+ resp = self._make_agent_call('password', instance, '', password_args)
if resp is None:
# No response from the agent
return
@@ -464,6 +466,9 @@ class VMOps(object):
# Successful return code from password is '0'
if resp_dict['returncode'] != '0':
raise RuntimeError(resp_dict['message'])
+ db.instance_update(context.get_admin_context(),
+ instance['id'],
+ dict(admin_pass=new_pass))
return resp_dict['message']
def inject_file(self, instance, path, contents):
@@ -579,9 +584,8 @@ class VMOps(object):
if not (instance.kernel_id and instance.ramdisk_id):
# 2. We only have kernel xor ramdisk
- raise exception.NotFound(
- _("Instance %(instance_id)s has a kernel or ramdisk but not "
- "both" % locals()))
+ raise exception.InstanceUnacceptable(instance_id=instance_id,
+ reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
@@ -722,8 +726,7 @@ class VMOps(object):
"%s-rescue" % instance.name)
if not rescue_vm_ref:
- raise exception.NotFound(_(
- "Instance is not in Rescue Mode: %s" % instance.name))
+ raise exception.InstanceNotInRescueMode(instance_id=instance.id)
original_vm_ref = VMHelper.lookup(self._session, instance.name)
instance._rescue = False
@@ -760,7 +763,6 @@ class VMOps(object):
instance)))
for vm in rescue_vms:
- rescue_name = vm["name"]
rescue_vm_ref = vm["vm_ref"]
self._destroy_rescue_instance(rescue_vm_ref)
@@ -798,15 +800,17 @@ class VMOps(object):
def _get_network_info(self, instance):
"""Creates network info list for instance."""
admin_context = context.get_admin_context()
- IPs = db.fixed_ip_get_all_by_instance(admin_context,
+ ips = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
- flavor = db.instance_type_get_by_name(admin_context,
- instance['instance_type'])
+
+ inst_type = db.instance_type_get_by_id(admin_context,
+ instance['instance_type_id'])
+
network_info = []
for network in networks:
- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+ network_ips = [ip for ip in ips if ip.network_id == network.id]
def ip_dict(ip):
return {
@@ -814,12 +818,12 @@ class VMOps(object):
"netmask": network["netmask"],
"enabled": "1"}
- def ip6_dict(ip6):
+ def ip6_dict():
return {
- "ip": utils.to_global_ipv6(network['cidr_v6'],
- instance['mac_address']),
+ "ip": ipv6.to_global(network['cidr_v6'],
+ instance['mac_address'],
+ instance['project_id']),
"netmask": network['netmask_v6'],
- "gateway": network['gateway_v6'],
"enabled": "1"}
info = {
@@ -827,23 +831,41 @@ class VMOps(object):
'gateway': network['gateway'],
'broadcast': network['broadcast'],
'mac': instance.mac_address,
- 'rxtx_cap': flavor['rxtx_cap'],
+ 'rxtx_cap': inst_type['rxtx_cap'],
'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
+ 'ips': [ip_dict(ip) for ip in network_ips]}
if network['cidr_v6']:
- info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
+ info['ip6s'] = [ip6_dict()]
+ if network['gateway_v6']:
+ info['gateway6'] = network['gateway_v6']
network_info.append((network, info))
return network_info
- def inject_network_info(self, instance, vm_ref, network_info):
+ #TODO{tr3buchet) remove this shim with nova-multi-nic
+ def inject_network_info(self, instance, network_info=None, vm_ref=None):
+ """
+ shim in place which makes inject_network_info work without being
+ passed network_info.
+ shim goes away after nova-multi-nic
+ """
+ if not network_info:
+ network_info = self._get_network_info(instance)
+ self._inject_network_info(instance, network_info, vm_ref)
+
+ def _inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
+ vm_ref can be passed in because it will sometimes be different than
+ what VMHelper.lookup(session, instance.name) will find (ex: rescue)
"""
logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
- # this function raises if vm_ref is not a vm_opaque_ref
- self._session.get_xenapi().VM.get_record(vm_ref)
+ if vm_ref:
+ # this function raises if vm_ref is not a vm_opaque_ref
+ self._session.get_xenapi().VM.get_record(vm_ref)
+ else:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
for (network, info) in network_info:
location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
@@ -875,8 +897,10 @@ class VMOps(object):
VMHelper.create_vif(self._session, vm_ref, network_ref,
mac_address, device, rxtx_cap)
- def reset_network(self, instance, vm_ref):
+ def reset_network(self, instance, vm_ref=None):
"""Creates uuid arg to pass to make_agent_call and calls it."""
+ if not vm_ref:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
args = {'id': str(uuid.uuid4())}
# TODO(tr3buchet): fix function call after refactor
#resp = self._make_agent_call('resetnetwork', instance, '', args)
@@ -902,7 +926,7 @@ class VMOps(object):
try:
ret = self._make_xenstore_call('read_record', vm, path,
{'ignore_missing_path': 'True'})
- except self.XenAPI.Failure, e:
+ except self.XenAPI.Failure:
return None
ret = json.loads(ret)
if ret == "None":
@@ -1150,23 +1174,22 @@ class SimpleDH(object):
return mpi
def _run_ssl(self, text, which):
- base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc '
- '-a -pass pass:%(shared)s -nosalt %(dec_flag)s')
+ base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s '
+ '-nosalt %(dec_flag)s')
if which.lower()[0] == 'd':
dec_flag = ' -d'
else:
dec_flag = ''
- fd, tmpfile = tempfile.mkstemp()
- os.close(fd)
- file(tmpfile, 'w').write(text)
shared = self._shared
cmd = base_cmd % locals()
proc = _runproc(cmd)
+ proc.stdin.write(text + '\n')
+ proc.stdin.close()
proc.wait()
err = proc.stderr.read()
if err:
raise RuntimeError(_('OpenSSL error: %s') % err)
- return proc.stdout.read()
+ return proc.stdout.read().strip('\n')
def encrypt(self, text):
return self._run_ssl(text, 'enc')
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 72284ac02..7821a4f7e 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -204,14 +204,17 @@ def _get_volume_id(path_or_id):
if isinstance(path_or_id, int):
return path_or_id
# n must contain at least the volume_id
- # /vol- is for remote volumes
- # -vol- is for local volumes
+ # :volume- is for remote volumes
+ # -volume- is for local volumes
# see compute/manager->setup_compute_volume
- volume_id = path_or_id[path_or_id.find('/vol-') + 1:]
+ volume_id = path_or_id[path_or_id.find(':volume-') + 1:]
if volume_id == path_or_id:
- volume_id = path_or_id[path_or_id.find('-vol-') + 1:]
- volume_id = volume_id.replace('--', '-')
- return volume_id
+ volume_id = path_or_id[path_or_id.find('-volume--') + 1:]
+ volume_id = volume_id.replace('volume--', '')
+ else:
+ volume_id = volume_id.replace('volume-', '')
+ volume_id = volume_id[0:volume_id.find('-')]
+ return int(volume_id)
def _get_target_host(iscsi_string):
@@ -244,25 +247,23 @@ def _get_target(volume_id):
Gets iscsi name and portal from volume name and host.
For this method to work the following are needed:
1) volume_ref['host'] must resolve to something rather than loopback
- 2) ietd must bind only to the address as resolved above
- If any of the two conditions are not met, fall back on Flags.
"""
- volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(),
- volume_id)
+ volume_ref = db.volume_get(context.get_admin_context(),
+ volume_id)
result = (None, None)
try:
- (r, _e) = utils.execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" %
- volume_ref['host'])
+ (r, _e) = utils.execute('sudo', 'iscsiadm',
+ '-m', 'discovery',
+ '-t', 'sendtargets',
+ '-p', volume_ref['host'])
except exception.ProcessExecutionError, exc:
LOG.exception(exc)
else:
- targets = r.splitlines()
- if len(_e) == 0 and len(targets) == 1:
- for target in targets:
- if volume_id in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- iscsi_portal = location.split(",")[0]
- result = (iscsi_name, iscsi_portal)
+ volume_name = "volume-%08x" % volume_id
+ for target in r.splitlines():
+ if FLAGS.iscsi_ip_prefix in target and volume_name in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ iscsi_portal = location.split(",")[0]
+ result = (iscsi_name, iscsi_portal)
return result
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 757ecf5ad..afcb8cf47 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -45,8 +45,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
" %(mountpoint)s") % locals())
@@ -98,8 +97,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 99fd35c61..6d828e109 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -57,19 +57,24 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
- suffix "_rec" for record objects
"""
+import json
+import random
import sys
import urlparse
import xmlrpclib
from eventlet import event
from eventlet import tpool
+from eventlet import timeout
from nova import context
from nova import db
+from nova import exception
from nova import utils
from nova import flags
from nova import log as logging
from nova.virt import driver
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
@@ -140,6 +145,9 @@ flags.DEFINE_bool('xenapi_remap_vbd_dev', False,
flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd',
'Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)')
+flags.DEFINE_integer('xenapi_login_timeout',
+ 10,
+ 'Timeout in seconds for XenAPI login.')
def get_connection(_):
@@ -161,9 +169,16 @@ class XenAPIConnection(driver.ComputeDriver):
def __init__(self, url, user, pw):
super(XenAPIConnection, self).__init__()
- session = XenAPISession(url, user, pw)
- self._vmops = VMOps(session)
- self._volumeops = VolumeOps(session)
+ self._session = XenAPISession(url, user, pw)
+ self._vmops = VMOps(self._session)
+ self._volumeops = VolumeOps(self._session)
+ self._host_state = None
+
+ @property
+ def HostState(self):
+ if not self._host_state:
+ self._host_state = HostState(self._session)
+ return self._host_state
def init_host(self, host):
#FIXME(armando): implement this
@@ -311,6 +326,16 @@ class XenAPIConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
+ def update_host_status(self):
+ """Update the status info of the host, and return those values
+ to the calling program."""
+ return self.HostState.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first."""
+ return self.HostState.get_host_stats(refresh=refresh)
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -318,8 +343,10 @@ class XenAPISession(object):
def __init__(self, url, user, pw):
self.XenAPI = self.get_imported_xenapi()
self._session = self._create_session(url)
- self._session.login_with_password(user, pw)
- self.loop = None
+ exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
+ "(is the Dom0 disk full?)"))
+ with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
+ self._session.login_with_password(user, pw)
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
@@ -356,57 +383,52 @@ class XenAPISession(object):
def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
- until it completes. Not re-entrant."""
+ until it completes."""
done = event.Event()
- self.loop = utils.LoopingCall(self._poll_task, id, task, done)
- self.loop.start(FLAGS.xenapi_task_poll_interval, now=True)
- rv = done.wait()
- self.loop.stop()
- return rv
-
- def _stop_loop(self):
- """Stop polling for task to finish."""
- #NOTE(sandy-walsh) Had to break this call out to support unit tests.
- if self.loop:
- self.loop.stop()
+ loop = utils.LoopingCall(f=None)
+
+ def _poll_task():
+ """Poll the given XenAPI task, and return the result if the
+ action was completed successfully or not.
+ """
+ try:
+ name = self._session.xenapi.task.get_name_label(task)
+ status = self._session.xenapi.task.get_status(task)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
+ if status == "pending":
+ return
+ elif status == "success":
+ result = self._session.xenapi.task.get_result(task)
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
+ done.send(_parse_xmlrpc_value(result))
+ else:
+ error_info = self._session.xenapi.task.get_error_info(task)
+ action["error"] = str(error_info)
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
+ done.send_exception(self.XenAPI.Failure(error_info))
+
+ if id:
+ db.instance_action_create(context.get_admin_context(),
+ action)
+ except self.XenAPI.Failure, exc:
+ LOG.warn(exc)
+ done.send_exception(*sys.exc_info())
+ loop.stop()
+
+ loop.f = _poll_task
+ loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ return done.wait()
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
- def _poll_task(self, id, task, done):
- """Poll the given XenAPI task, and fire the given action if we
- get a result.
- """
- try:
- name = self._session.xenapi.task.get_name_label(task)
- status = self._session.xenapi.task.get_status(task)
- if id:
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
- if status == "pending":
- return
- elif status == "success":
- result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%(name)s] %(task)s status:"
- " success %(result)s") % locals())
- done.send(_parse_xmlrpc_value(result))
- else:
- error_info = self._session.xenapi.task.get_error_info(task)
- action["error"] = str(error_info)
- LOG.warn(_("Task [%(name)s] %(task)s status:"
- " %(status)s %(error_info)s") % locals())
- done.send_exception(self.XenAPI.Failure(error_info))
-
- if id:
- db.instance_action_create(context.get_admin_context(), action)
- except self.XenAPI.Failure, exc:
- LOG.warn(exc)
- done.send_exception(*sys.exc_info())
- self._stop_loop()
-
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details"""
try:
@@ -429,6 +451,65 @@ class XenAPISession(object):
raise
+class HostState(object):
+ """Manages information about the XenServer host this compute
+ node is running on.
+ """
+ def __init__(self, session):
+ super(HostState, self).__init__()
+ self._session = session
+ self._stats = {}
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first.
+ """
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Since under Xenserver, a compute node runs on a given host,
+ we can get host status information using xenapi.
+ """
+ LOG.debug(_("Updating host stats"))
+ # Make it something unlikely to match any actual instance ID
+ task_id = random.randint(-80000, -70000)
+ task = self._session.async_call_plugin("xenhost", "host_data", {})
+ task_result = self._session.wait_for_task(task, task_id)
+ if not task_result:
+ task_result = json.dumps("")
+ try:
+ data = json.loads(task_result)
+ except ValueError as e:
+ # Invalid JSON object
+ LOG.error(_("Unable to get updated status: %s") % e)
+ return
+ # Get the SR usage
+ try:
+ sr_ref = vm_utils.safe_find_sr(self._session)
+ except exception.NotFound as e:
+ # No SR configured
+ LOG.error(_("Unable to get SR for this host: %s") % e)
+ return
+ sr_rec = self._session.get_xenapi().SR.get_record(sr_ref)
+ total = int(sr_rec["virtual_allocation"])
+ used = int(sr_rec["physical_utilisation"])
+ data["disk_total"] = total
+ data["disk_used"] = used
+ data["disk_available"] = total - used
+ host_memory = data.get('host_memory', None)
+ if host_memory:
+ data["host_memory_total"] = host_memory.get('total', 0)
+ data["host_memory_overhead"] = host_memory.get('overhead', 0)
+ data["host_memory_free"] = host_memory.get('free', 0)
+ data["host_memory_free_computed"] = \
+ host_memory.get('free-computed', 0)
+ del data['host_memory']
+ self._stats = data
+
+
def _parse_xmlrpc_value(val):
"""Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""