summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorSandy Walsh <sandy.walsh@rackspace.com>2011-01-11 02:47:35 -0400
committerSandy Walsh <sandy.walsh@rackspace.com>2011-01-11 02:47:35 -0400
commit77258c8c5e8f94fbcb15fc4be83cf623ac414bd6 (patch)
treeaafbbd96f9ef443e3154a3f885f3932c0a6604e4 /nova/virt
parent2222851017c5c34b1a9ea1d2855f49c45395843c (diff)
parentb8de5221368c4055fc593c6d0d7164f2be956924 (diff)
Changed shared_ip_group detail routing
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/connection.py5
-rw-r--r--nova/virt/fake.py5
-rw-r--r--nova/virt/hyperv.py67
-rw-r--r--nova/virt/images.py8
-rw-r--r--nova/virt/libvirt_conn.py450
-rw-r--r--nova/virt/xenapi/fake.py24
-rw-r--r--nova/virt/xenapi/vm_utils.py68
-rw-r--r--nova/virt/xenapi/vmops.py26
-rw-r--r--nova/virt/xenapi/volume_utils.py44
-rw-r--r--nova/virt/xenapi/volumeops.py31
-rw-r--r--nova/virt/xenapi_conn.py39
11 files changed, 532 insertions, 235 deletions
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 846423afe..13181b730 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -19,16 +19,17 @@
"""Abstraction of the underlying virtualization API."""
-import logging
import sys
from nova import flags
+from nova import log as logging
from nova.virt import fake
from nova.virt import libvirt_conn
from nova.virt import xenapi_conn
from nova.virt import hyperv
+LOG = logging.getLogger("nova.virt.connection")
FLAGS = flags.FLAGS
@@ -69,6 +70,6 @@ def get_connection(read_only=False):
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
- logging.error(_('Failed to open connection to the hypervisor'))
+ LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return conn
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 32541f5b4..849261f07 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -289,6 +289,11 @@ class FakeConnection(object):
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
+ def get_console_pool_info(self, console_type):
+ return {'address': '127.0.0.1',
+ 'username': 'fakeuser',
+ 'password': 'fakepassword'}
+
class FakeInstance(object):
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 4b9f6f946..30dc1c79b 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -61,11 +61,11 @@ Using the Python WMI library:
"""
import os
-import logging
import time
from nova import exception
from nova import flags
+from nova import log as logging
from nova.auth import manager
from nova.compute import power_state
from nova.virt import images
@@ -76,6 +76,9 @@ wmi = None
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.virt.hyperv')
+
+
HYPERV_POWER_STATE = {
3: power_state.SHUTDOWN,
2: power_state.RUNNING,
@@ -89,7 +92,7 @@ REQ_POWER_STATE = {
'Reboot': 10,
'Reset': 11,
'Paused': 32768,
- 'Suspended': 32769
+ 'Suspended': 32769,
}
@@ -112,7 +115,7 @@ class HyperVConnection(object):
def init_host(self):
#FIXME(chiradeep): implement this
- logging.debug(_('In init host'))
+ LOG.debug(_('In init host'))
pass
def list_instances(self):
@@ -142,11 +145,11 @@ class HyperVConnection(object):
self._create_disk(instance['name'], vhdfile)
self._create_nic(instance['name'], instance['mac_address'])
- logging.debug(_('Starting VM %s '), instance.name)
+ LOG.debug(_('Starting VM %s '), instance.name)
self._set_vm_state(instance['name'], 'Enabled')
- logging.info(_('Started VM %s '), instance.name)
+ LOG.info(_('Started VM %s '), instance.name)
except Exception as exn:
- logging.error(_('spawn vm failed: %s'), exn)
+ LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
def _create_vm(self, instance):
@@ -165,7 +168,7 @@ class HyperVConnection(object):
if not success:
raise Exception(_('Failed to create VM %s'), instance.name)
- logging.debug(_('Created VM %s...'), instance.name)
+ LOG.debug(_('Created VM %s...'), instance.name)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0]
vmsettings = vm.associators(
@@ -182,7 +185,7 @@ class HyperVConnection(object):
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
- logging.debug(_('Set memory for vm %s...'), instance.name)
+ LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
@@ -192,12 +195,12 @@ class HyperVConnection(object):
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
- logging.debug(_('Set vcpus for vm %s...'), instance.name)
+ LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
- logging.debug(_('Creating disk for %s by attaching disk file %s'),
- vm_name, vhdfile)
+ LOG.debug(_('Creating disk for %s by attaching disk file %s'),
+ vm_name, vhdfile)
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
@@ -224,7 +227,7 @@ class HyperVConnection(object):
raise Exception(_('Failed to add diskdrive to VM %s'),
vm_name)
diskdrive_path = new_resources[0]
- logging.debug(_('New disk drive path is %s'), diskdrive_path)
+ LOG.debug(_('New disk drive path is %s'), diskdrive_path)
#Find the default VHD disk object.
vhddefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
@@ -243,11 +246,11 @@ class HyperVConnection(object):
if new_resources is None:
raise Exception(_('Failed to add vhd file to VM %s'),
vm_name)
- logging.info(_('Created disk for %s'), vm_name)
+ LOG.info(_('Created disk for %s'), vm_name)
def _create_nic(self, vm_name, mac):
"""Create a (emulated) nic and attach it to the vm"""
- logging.debug(_('Creating nic for %s '), vm_name)
+ LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
extswitch = self._find_external_network()
@@ -266,11 +269,11 @@ class HyperVConnection(object):
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
- logging.error(_('Failed creating a port on the external vswitch'))
+ LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
vm_name)
- logging.debug(_("Created switch port %s on switch %s"),
- vm_name, extswitch.path_())
+ LOG.debug(_("Created switch port %s on switch %s"),
+ vm_name, extswitch.path_())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@@ -281,7 +284,7 @@ class HyperVConnection(object):
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
vm_name)
- logging.info(_("Created nic for %s "), vm_name)
+ LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
"""Add a new resource (disk/nic) to the VM"""
@@ -314,10 +317,10 @@ class HyperVConnection(object):
time.sleep(0.1)
job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0]
if job.JobState != WMI_JOB_STATE_COMPLETED:
- logging.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
+ LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
- logging.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
- job.ElapsedTime)
+ LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
+ job.ElapsedTime)
return True
def _find_external_network(self):
@@ -352,7 +355,7 @@ class HyperVConnection(object):
def destroy(self, instance):
"""Destroy the VM. Also destroy the associated VHD disk files"""
- logging.debug(_("Got request to destroy vm %s"), instance.name)
+ LOG.debug(_("Got request to destroy vm %s"), instance.name)
vm = self._lookup(instance.name)
if vm is None:
return
@@ -383,7 +386,7 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
- logging.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+ LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
def get_info(self, instance_id):
"""Get information about the VM"""
@@ -399,12 +402,12 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
- logging.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
+ LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
cpu_time=%s"), instance_id,
- str(HYPERV_POWER_STATE[info.EnabledState]),
- str(info.MemoryUsage),
- str(info.NumberOfProcessors),
- str(info.UpTime))
+ str(HYPERV_POWER_STATE[info.EnabledState]),
+ str(info.MemoryUsage),
+ str(info.NumberOfProcessors),
+ str(info.UpTime))
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@@ -438,11 +441,11 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
- logging.info(_("Successfully changed vm state of %s to %s"),
- vm_name, req_state)
+ LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
+ req_state)
else:
- logging.error(_("Failed to change vm state of %s to %s"),
- vm_name, req_state)
+ LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
+ req_state)
raise Exception(_("Failed to change vm state of %s to %s"),
vm_name, req_state)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 2d03da4b4..ecf0e5efb 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,7 +21,6 @@
Handling of VM disk images.
"""
-import logging
import os.path
import shutil
import sys
@@ -30,6 +29,7 @@ import urllib2
import urlparse
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
@@ -40,6 +40,8 @@ FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
+LOG = logging.getLogger('nova.virt.images')
+
def fetch(image, path, user, project):
if FLAGS.use_s3:
@@ -65,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
- logging.debug(_("Finished retreving %s -- placed in %s"), url, path)
+ LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
def _fetch_s3_image(image, path, user, project):
@@ -89,7 +91,7 @@ def _fetch_s3_image(image, path, user, project):
else:
cmd = ['/usr/bin/curl', '--fail', '--silent', url]
for (k, v) in headers.iteritems():
- cmd += ['-H', '%s: %s' % (k, v)]
+ cmd += ['-H', '\'%s: %s\'' % (k, v)]
cmd += ['-o', path]
cmd_out = ' '.join(cmd)
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 00edfbdc8..19f79e19f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -36,7 +36,6 @@ Supports KVM, QEMU, UML, and XEN.
"""
-import logging
import os
import shutil
@@ -50,6 +49,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -62,6 +62,7 @@ libvirt = None
libxml2 = None
Template = None
+LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
# TODO(vish): These flags should probably go into a shared location
@@ -85,6 +86,9 @@ flags.DEFINE_string('libvirt_uri',
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
+flags.DEFINE_string('firewall_driver',
+ 'nova.virt.libvirt_conn.IptablesFirewallDriver',
+ 'Firewall driver (defaults to iptables)')
def get_connection(read_only):
@@ -124,16 +128,24 @@ class LibvirtConnection(object):
self._wrapped_conn = None
self.read_only = read_only
+ self.nwfilter = NWFilterFirewall(self._get_connection)
+
+ if not FLAGS.firewall_driver:
+ self.firewall_driver = self.nwfilter
+ self.nwfilter.handle_security_groups = True
+ else:
+ self.firewall_driver = utils.import_object(FLAGS.firewall_driver)
+
def init_host(self):
- NWFilterFirewall(self._conn).setup_base_nwfilters()
+ pass
- @property
- def _conn(self):
+ def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
- logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri)
+ LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
+ _conn = property(_get_connection)
def _test_connection(self):
try:
@@ -142,7 +154,7 @@ class LibvirtConnection(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
- logging.debug(_('Connection to libvirt broke'))
+ LOG.debug(_('Connection to libvirt broke'))
return False
raise
@@ -214,8 +226,8 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- logging.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ LOG.info(_('instance %s: deleting instance files %s'),
+ instance['name'], target)
if os.path.exists(target):
shutil.rmtree(target)
@@ -279,10 +291,10 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rebooted'), instance['name'])
+ LOG.debug(_('instance %s: rebooted'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_reboot failed: %s'), exn)
+ LOG.exception(_('_wait_for_reboot failed: %s'), exn)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -325,10 +337,10 @@ class LibvirtConnection(object):
state = self.get_info(instance['name'])['state']
db.instance_set_state(None, instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rescued'), instance['name'])
+ LOG.debug(_('instance %s: rescued'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_rescue failed: %s'), exn)
+ LOG.exception(_('_wait_for_rescue failed: %s'), exn)
db.instance_set_state(None,
instance['id'],
power_state.SHUTDOWN)
@@ -350,10 +362,13 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
- NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
+
+ self.nwfilter.setup_basic_filtering(instance)
+ self.firewall_driver.prepare_instance_filter(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
- logging.debug(_("instance %s: is running"), instance['name'])
+ LOG.debug(_("instance %s: is running"), instance['name'])
+ self.firewall_driver.apply_instance_filter(instance)
timer = utils.LoopingCall(f=None)
@@ -363,11 +378,11 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: booted'), instance['name'])
+ LOG.debug(_('instance %s: booted'), instance['name'])
timer.stop()
except:
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -377,11 +392,11 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
- logging.info('virsh said: %r' % (virsh_output,))
+ LOG.info(_('virsh said: %r'), virsh_output)
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- logging.info(_('cool, it\'s a device'))
+ LOG.info(_('cool, it\'s a device'))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -389,7 +404,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- logging.info(_('data: %r, fpath: %r') % (data, fpath))
+ LOG.info(_('data: %r, fpath: %r'), data, fpath)
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -397,7 +412,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- logging.info('Contents: %r' % (contents,))
+ LOG.info(_('Contents of file %s: %r'), fpath, contents)
return contents
@exception.wrap_exception
@@ -431,7 +446,7 @@ class LibvirtConnection(object):
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
- logging.info(_('instance %s: Creating image'), inst['name'])
+ LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
@@ -487,10 +502,10 @@ class LibvirtConnection(object):
'dns': network_ref['dns']}
if key or net:
if key:
- logging.info(_('instance %s: injecting key into image %s'),
+ LOG.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
if net:
- logging.info(_('instance %s: injecting net into image %s'),
+ LOG.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
try:
disk.inject_data(basepath('disk-raw'), key, net,
@@ -498,9 +513,9 @@ class LibvirtConnection(object):
execute=execute)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- logging.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %s: ignoring error injecting data'
+ ' into image %s (%s)'),
+ inst['name'], inst.image_id, e)
if inst['kernel_id']:
if os.path.exists(basepath('disk')):
@@ -526,8 +541,10 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
- logging.debug(_('instance %s: starting toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -569,7 +586,7 @@ class LibvirtConnection(object):
xml_info['disk'] = xml_info['basepath'] + "/disk"
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- logging.debug(_('instance %s: finished toXML method'),
+ LOG.debug(_('instance %s: finished toXML method'),
instance['name'])
return xml
@@ -690,18 +707,63 @@ class LibvirtConnection(object):
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
- def refresh_security_group(self, security_group_id):
- fw = NWFilterFirewall(self._conn)
- fw.ensure_security_group_filter(security_group_id)
+ def get_console_pool_info(self, console_type):
+ #TODO(mdragon): console proxy should be implemented for libvirt,
+ # in case someone wants to use it with kvm or
+ # such. For now return fake data.
+ return {'address': '127.0.0.1',
+ 'username': 'fakeuser',
+ 'password': 'fakepassword'}
+
+ def refresh_security_group_rules(self, security_group_id):
+ self.firewall_driver.refresh_security_group_rules(security_group_id)
+
+ def refresh_security_group_members(self, security_group_id):
+ self.firewall_driver.refresh_security_group_members(security_group_id)
+
+
+class FirewallDriver(object):
+ def prepare_instance_filter(self, instance):
+ """Prepare filters for the instance.
+
+ At this point, the instance isn't running yet."""
+ raise NotImplementedError()
+
+ def apply_instance_filter(self, instance):
+ """Apply instance filter.
+
+ Once this method returns, the instance should be firewalled
+ appropriately. This method should as far as possible be a
+ no-op. It's vastly preferred to get everything set up in
+ prepare_instance_filter.
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ """Refresh security group rules from data store
+
+ Gets called when a rule has been added to or removed from
+ the security group."""
+ raise NotImplementedError()
+ def refresh_security_group_members(self, security_group_id):
+ """Refresh security group members from data store
-class NWFilterFirewall(object):
+ Gets called when an instance gets added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+
+class NWFilterFirewall(FirewallDriver):
"""
This class implements a network filtering mechanism versatile
enough for EC2 style Security Group filtering by leveraging
libvirt's nwfilter.
First, all instances get a filter ("nova-base-filter") applied.
+ This filter provides some basic security such as protection against
+ MAC spoofing, IP spoofing, and ARP spoofing.
+
This filter drops all incoming ipv4 and ipv6 connections.
Outgoing connections are never blocked.
@@ -735,38 +797,79 @@ class NWFilterFirewall(object):
(*) This sentence brought to you by the redundancy department of
redundancy.
+
"""
def __init__(self, get_connection):
- self._conn = get_connection
-
- nova_base_filter = '''<filter name='nova-base' chain='root'>
- <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
- <filterref filter='no-mac-spoofing'/>
- <filterref filter='no-ip-spoofing'/>
- <filterref filter='no-arp-spoofing'/>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
- nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
- <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
- <rule action='accept' direction='out'
- priority='100'>
- <udp srcipaddr='0.0.0.0'
- dstipaddr='255.255.255.255'
- srcportstart='68'
- dstportstart='67'/>
- </rule>
- <rule action='accept' direction='in'
- priority='100'>
- <udp srcipaddr='$DHCPSERVER'
- srcportstart='67'
- dstportstart='68'/>
- </rule>
- </filter>'''
+ self._libvirt_get_connection = get_connection
+ self.static_filters_configured = False
+ self.handle_security_groups = False
+
+ def _get_connection(self):
+ return self._libvirt_get_connection()
+ _conn = property(_get_connection)
+
+ def nova_dhcp_filter(self):
+ """The standard allow-dhcp-server filter is an <ip> one, so it uses
+ ebtables to allow traffic through. Without a corresponding rule in
+ iptables, it'll get blocked anyway."""
+
+ return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in'
+ priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def setup_basic_filtering(self, instance):
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ logging.info('called setup_basic_filtering in nwfilter')
+
+ if self.handle_security_groups:
+ # No point in setting up a filter set that we'll be overriding
+ # anyway.
+ return
+
+ logging.info('ensuring static filters')
+ self._ensure_static_filters()
+
+ instance_filter_name = self._instance_filter_name(instance)
+ self._define_filter(self._filter_container(instance_filter_name,
+ ['nova-base']))
+
+ def _ensure_static_filters(self):
+ if self.static_filters_configured:
+ return
+
+ self._define_filter(self._filter_container('nova-base',
+ ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']))
+ self._define_filter(self.nova_base_ipv4_filter)
+ self._define_filter(self.nova_base_ipv6_filter)
+ self._define_filter(self.nova_dhcp_filter)
+ self._define_filter(self.nova_vpn_filter)
+ if FLAGS.allow_project_net_traffic:
+ self._define_filter(self.nova_project_filter)
+
+ self.static_filters_configured = True
+
+ def _filter_container(self, name, filters):
+ xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
+ name,
+ ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
+ return xml
nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
<uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
@@ -780,7 +883,7 @@ class NWFilterFirewall(object):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
- ('inout', 'drop', 400)]:
+ ('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
@@ -792,7 +895,7 @@ class NWFilterFirewall(object):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
- ('inout', 'drop', 400)]:
+ ('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s-ipv6 />
</rule>""" % (action, direction,
@@ -816,43 +919,49 @@ class NWFilterFirewall(object):
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
- def setup_base_nwfilters(self):
- self._define_filter(self.nova_base_ipv4_filter)
- self._define_filter(self.nova_base_ipv6_filter)
- self._define_filter(self.nova_dhcp_filter)
- self._define_filter(self.nova_base_filter)
- self._define_filter(self.nova_vpn_filter)
- if FLAGS.allow_project_net_traffic:
- self._define_filter(self.nova_project_filter)
-
- def setup_nwfilters_for_instance(self, instance):
+ def prepare_instance_filter(self, instance):
"""
Creates an NWFilter for the given instance. In the process,
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""
- nwfilter_xml = ("<filter name='nova-instance-%s' "
- "chain='root'>\n") % instance['name']
-
if instance['image_id'] == FLAGS.vpn_image_id:
- nwfilter_xml += " <filterref filter='nova-vpn' />\n"
+ base_filter = 'nova-vpn'
else:
- nwfilter_xml += " <filterref filter='nova-base' />\n"
+ base_filter = 'nova-base'
+
+ instance_filter_name = self._instance_filter_name(instance)
+ instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,)
+ instance_filter_children = [base_filter, instance_secgroup_filter_name]
+ instance_secgroup_filter_children = ['nova-base-ipv4',
+ 'nova-base-ipv6',
+ 'nova-allow-dhcp-server']
+
+ ctxt = context.get_admin_context()
if FLAGS.allow_project_net_traffic:
- nwfilter_xml += " <filterref filter='nova-project' />\n"
+ instance_filter_children += ['nova-project']
+
+ for security_group in db.security_group_get_by_instance(ctxt,
+ instance['id']):
+
+ self.refresh_security_group_rules(security_group['id'])
+
+ instance_secgroup_filter_children += [('nova-secgroup-%s' %
+ security_group['id'])]
- for security_group in instance.security_groups:
- self.ensure_security_group_filter(security_group['id'])
+ self._define_filter(
+ self._filter_container(instance_secgroup_filter_name,
+ instance_secgroup_filter_children))
- nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
- "/>\n") % security_group['id']
- nwfilter_xml += "</filter>"
+ self._define_filter(
+ self._filter_container(instance_filter_name,
+ instance_filter_children))
- self._define_filter(nwfilter_xml)
+ return
- def ensure_security_group_filter(self, security_group_id):
+ def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@@ -870,9 +979,9 @@ class NWFilterFirewall(object):
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
- logging.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r' %
- (rule.protocol, rule.from_port, rule.to_port))
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
@@ -883,3 +992,162 @@ class NWFilterFirewall(object):
xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \
(security_group_id, rule_xml,)
return xml
+
+ def _instance_filter_name(self, instance):
+ return 'nova-instance-%s' % instance['name']
+
+
+class IptablesFirewallDriver(FirewallDriver):
+ def __init__(self, execute=None):
+ self.execute = execute or utils.execute
+ self.instances = set()
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def remove_instance(self, instance):
+ self.instances.remove(instance)
+
+ def add_instance(self, instance):
+ self.instances.add(instance)
+
+ def prepare_instance_filter(self, instance):
+ self.add_instance(instance)
+ self.apply_ruleset()
+
+ def apply_ruleset(self):
+ current_filter, _ = self.execute('sudo iptables-save -t filter')
+ current_lines = current_filter.split('\n')
+ new_filter = self.modify_rules(current_lines)
+ self.execute('sudo iptables-restore',
+ process_input='\n'.join(new_filter))
+
+ def modify_rules(self, current_lines):
+ ctxt = context.get_admin_context()
+ # Remove any trace of nova rules.
+ new_filter = filter(lambda l: 'nova-' not in l, current_lines)
+
+ seen_chains = False
+ for rules_index in range(len(new_filter)):
+ if not seen_chains:
+ if new_filter[rules_index].startswith(':'):
+ seen_chains = True
+ elif seen_chains == 1:
+ if not new_filter[rules_index].startswith(':'):
+ break
+
+ our_chains = [':nova-ipv4-fallback - [0:0]']
+ our_rules = ['-A nova-ipv4-fallback -j DROP']
+
+ our_chains += [':nova-local - [0:0]']
+ our_rules += ['-A FORWARD -j nova-local']
+
+ security_groups = set()
+ # Add our chains
+ # First, we add instance chains and rules
+ for instance in self.instances:
+ chain_name = self._instance_chain_name(instance)
+ ip_address = self._ip_for_instance(instance)
+
+ our_chains += [':%s - [0:0]' % chain_name]
+
+ # Jump to the per-instance chain
+ our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
+ chain_name)]
+
+ # Always drop invalid packets
+ our_rules += ['-A %s -m state --state '
+ 'INVALID -j DROP' % (chain_name,)]
+
+ # Allow established connections
+ our_rules += ['-A %s -m state --state '
+ 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
+
+ # Jump to each security group chain in turn
+ for security_group in \
+ db.security_group_get_by_instance(ctxt,
+ instance['id']):
+ security_groups.add(security_group)
+
+ sg_chain_name = self._security_group_chain_name(security_group)
+
+ our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
+
+ # Allow DHCP responses
+ dhcp_server = self._dhcp_server_for_instance(instance)
+ our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
+ (chain_name, dhcp_server)]
+
+ # If nothing matches, jump to the fallback chain
+ our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)]
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ chain_name = self._security_group_chain_name(security_group)
+ our_chains += [':%s - [0:0]' % chain_name]
+
+ rules = \
+ db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
+
+ for rule in rules:
+ logging.info('%r', rule)
+ args = ['-A', chain_name, '-p', rule.protocol]
+
+ if rule.cidr:
+ args += ['-s', rule.cidr]
+ else:
+ # Eventually, a mechanism to grant access for security
+ # groups will turn up here. It'll use ipsets.
+ continue
+
+ if rule.protocol in ['udp', 'tcp']:
+ if rule.from_port == rule.to_port:
+ args += ['--dport', '%s' % (rule.from_port,)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+ elif rule.protocol == 'icmp':
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == '-1':
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == '-1':
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ args += ['-m', 'icmp', '--icmp_type', icmp_type_arg]
+
+ args += ['-j ACCEPT']
+ our_rules += [' '.join(args)]
+
+ new_filter[rules_index:rules_index] = our_rules
+ new_filter[rules_index:rules_index] = our_chains
+ logging.info('new_filter: %s', '\n'.join(new_filter))
+ return new_filter
+
+ def refresh_security_group_members(self, security_group):
+ pass
+
+ def refresh_security_group_rules(self, security_group):
+ self.apply_ruleset()
+
+ def _security_group_chain_name(self, security_group):
+ return 'nova-sg-%s' % (security_group['id'],)
+
+ def _instance_chain_name(self, instance):
+ return 'nova-inst-%s' % (instance['id'],)
+
+ def _ip_for_instance(self, instance):
+ return db.instance_get_fixed_address(context.get_admin_context(),
+ instance['id'])
+
+ def _dhcp_server_for_instance(self, instance):
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ return network['gateway']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index aa4026f97..96d8f5fc8 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -52,12 +52,12 @@ A fake XenAPI SDK.
import datetime
-import logging
import uuid
from pprint import pformat
from nova import exception
+from nova import log as logging
_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
@@ -65,9 +65,11 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
+LOG = logging.getLogger("nova.virt.xenapi.fake")
+
def log_db_contents(msg=None):
- logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
def reset():
@@ -242,9 +244,9 @@ class SessionBase(object):
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s' %
+ _('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
@@ -278,12 +280,12 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- logging.warn('Calling %s %s', name, impl)
+ LOG.debug(_('Calling %s %s'), name, impl)
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
- logging.warn('Calling getter %s', name)
+ LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
@@ -333,10 +335,10 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- logging.error('Raising NotImplemented')
+ LOG.debuug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s or it has '
- 'been called with the wrong number of arguments' % name)
+ _('xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
@@ -351,7 +353,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
@@ -399,7 +401,7 @@ class SessionBase(object):
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index b91c37fb0..a91c8ea27 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
-import logging
import pickle
import urllib
from xml.dom import minidom
@@ -27,6 +26,7 @@ from xml.dom import minidom
from eventlet import event
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
@@ -37,6 +37,7 @@ from nova.virt.xenapi.volume_utils import StorageError
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -121,9 +122,9 @@ class VMHelper(HelperBase):
rec['HVM_boot_params'] = {'order': 'dc'}
rec['platform'] = {'acpi': 'true', 'apic': 'true',
'pae': 'true', 'viridian': 'true'}
- logging.debug('Created VM %s...', instance.name)
+ LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref
@classmethod
@@ -143,10 +144,9 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- logging.debug(_('Creating VBD for VM %s, VDI %s ... '),
- vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
+ LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
return vbd_ref
@@ -161,7 +161,7 @@ class VMHelper(HelperBase):
if vbd_rec['userdevice'] == str(number):
return vbd
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@classmethod
@@ -170,7 +170,7 @@ class VMHelper(HelperBase):
try:
vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
@@ -183,7 +183,7 @@ class VMHelper(HelperBase):
#with Josh Kearney
session.wait_for_task(0, task)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
@@ -199,11 +199,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
+ network_ref)
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
+ vm_ref, network_ref)
return vif_ref
@classmethod
@@ -213,8 +213,7 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- logging.debug(_("Snapshotting VM %s with label '%s'..."),
- vm_ref, label)
+ LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -227,8 +226,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
+ vm_ref)
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -241,8 +240,7 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as '%s'"),
- vdi_uuids, image_name)
+ LOG.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name)
params = {'vdi_uuids': vdi_uuids,
'image_name': image_name,
@@ -260,7 +258,7 @@ class VMHelper(HelperBase):
"""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
- logging.debug("Asking xapi to fetch %s as %s", url, access)
+ LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -278,7 +276,7 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, vdi_ref):
- logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
@@ -289,7 +287,7 @@ class VMHelper(HelperBase):
pv = True
elif pv_str.lower() == 'false':
pv = False
- logging.debug("PV Kernel in VDI:%d", pv)
+ LOG.debug(_("PV Kernel in VDI:%d"), pv)
return pv
@classmethod
@@ -317,10 +315,9 @@ class VMHelper(HelperBase):
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- logging.debug(_('VDI %s is still available'),
- record['uuid'])
+ LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
vdis.append(vdi)
if len(vdis) > 0:
@@ -331,10 +328,10 @@ class VMHelper(HelperBase):
@classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
- logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
- record['power_state'])
- logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
- XENAPI_POWER_STATE[record['power_state']])
+ LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
+ record['power_state'])
+ LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
+ XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -390,11 +387,9 @@ def get_vhd_parent(session, vdi_rec):
"""
if 'vhd-parent' in vdi_rec['sm_config']:
parent_uuid = vdi_rec['sm_config']['vhd-parent']
- #NOTE(sirp): changed xenapi -> get_xenapi()
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- #NOTE(sirp): changed log -> logging
- logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
return parent_ref, parent_rec
else:
return None
@@ -411,7 +406,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
- logging.debug(_("Re-scanning SR %s"), sr_ref)
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
session.wait_for_task(instance_id, task)
@@ -435,10 +430,9 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- logging.debug(
- _("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."),
- parent_uuid, original_parent_uuid)
+ LOG.debug(_("Parent %s doesn't match original parent %s, "
+ "waiting for coalesce..."), parent_uuid,
+ original_parent_uuid)
else:
done.send(parent_uuid)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index b6d620782..e20930fe8 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -20,10 +20,10 @@ Management class for VM-related functions (spawn, reboot, etc).
"""
import json
-import logging
from nova import db
from nova import context
+from nova import log as logging
from nova import exception
from nova import utils
@@ -33,6 +33,9 @@ from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
+XenAPI = None
+LOG = logging.getLogger("nova.virt.xenapi.vmops")
+
class VMOps(object):
"""
@@ -93,10 +96,9 @@ class VMOps(object):
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
- logging.debug(_('Starting VM %s...'), vm_ref)
+ LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- logging.info(_('Spawning VM %s created %s.'), instance.name,
- vm_ref)
+ LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -107,12 +109,12 @@ class VMOps(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance['name'])
timer.stop()
except Exception, exc:
- logging.warn(exc)
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.warn(exc)
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -205,7 +207,7 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Disk clean-up
if vdis:
@@ -214,20 +216,20 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
ret = self._session.wait_for_task(instance_id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
callback(ret)
def pause(self, instance, callback):
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 4bbc41b03..0cd15b950 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -21,16 +21,17 @@ and storage repositories
import re
import string
-import logging
from nova import db
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.virt.xenapi import HelperBase
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.volume_utils")
class StorageError(Exception):
@@ -53,7 +54,7 @@ class VolumeHelper(HelperBase):
"""
sr_ref = session.get_xenapi().SR.get_by_name_label(label)
if len(sr_ref) == 0:
- logging.debug('Introducing %s...', label)
+ LOG.debug(_('Introducing %s...'), label)
record = {}
if 'chapuser' in info and 'chappassword' in info:
record = {'target': info['targetHost'],
@@ -70,10 +71,10 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- logging.debug('Introduced %s as %s.', label, sr_ref)
+ LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
return sr_ref
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
else:
return sr_ref[0]
@@ -85,32 +86,32 @@ class VolumeHelper(HelperBase):
vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
@classmethod
def destroy_iscsi_storage(cls, session, sr_ref):
"""Forget the SR whilst preserving the state of the disk"""
- logging.debug("Forgetting SR %s ... ", sr_ref)
+ LOG.debug(_("Forgetting SR %s ... "), sr_ref)
pbds = []
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when getting PBDs for %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
+ exc, sr_ref)
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when unplugging PBD %s',
- exc, pbd)
+ LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
+ exc, pbd)
try:
session.get_xenapi().SR.forget(sr_ref)
- logging.debug("Forgetting SR %s done.", sr_ref)
+ LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when forgetting SR %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
+ sr_ref)
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -118,12 +119,12 @@ class VolumeHelper(HelperBase):
try:
vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdis[0])
else:
@@ -141,7 +142,7 @@ class VolumeHelper(HelperBase):
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
@@ -165,11 +166,8 @@ class VolumeHelper(HelperBase):
target_host = _get_target_host(iscsi_portal)
target_port = _get_target_port(iscsi_portal)
target_iqn = _get_iqn(iscsi_name, volume_id)
- logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- volume_id,
- target_host,
- target_port,
- target_iqn)
+ LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id, target_host, target_port, target_iqn)
if (device_number < 0) or \
(volume_id is None) or \
(target_host is None) or \
@@ -196,7 +194,7 @@ class VolumeHelper(HelperBase):
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
- logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
@@ -257,7 +255,7 @@ def _get_target(volume_id):
"sendtargets -p %s" %
volume_ref['host'])
except exception.ProcessExecutionError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
targets = r.splitlines()
if len(_e) == 0 and len(targets) == 1:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index fdeb2506c..189f968c6 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,14 +17,17 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
-import logging
from nova import exception
+from nova import log as logging
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.volume_utils import VolumeHelper
from nova.virt.xenapi.volume_utils import StorageError
+LOG = logging.getLogger("nova.virt.xenapi.volumeops")
+
+
class VolumeOps(object):
"""
Management class for Volume-related tasks
@@ -45,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- logging.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %s, %s, %s"),
+ instance_name, device_path, mountpoint)
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -61,7 +64,7 @@ class VolumeOps(object):
try:
vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to create VDI on SR %s for instance %s')
% (sr_ref,
@@ -73,7 +76,7 @@ class VolumeOps(object):
vol_rec['deviceNumber'],
False)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to use SR %s for instance %s')
% (sr_ref,
@@ -84,13 +87,13 @@ class VolumeOps(object):
vbd_ref)
self._session.wait_for_task(vol_rec['deviceNumber'], task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- logging.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s attached to instance %s'),
+ mountpoint, instance_name)
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -100,13 +103,13 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
vm_ref, device_number)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
else:
try:
@@ -114,13 +117,13 @@ class VolumeOps(object):
vbd_ref)
VMHelper.unplug_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
try:
VMHelper.destroy_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- logging.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s detached from instance %s'),
+ mountpoint, instance_name)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c48f5b7cb..11473ddf8 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -51,8 +51,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
"""
-import logging
import sys
+import urlparse
import xmlrpclib
from eventlet import event
@@ -62,9 +62,14 @@ from nova import context
from nova import db
from nova import utils
from nova import flags
+from nova import log as logging
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
+
+LOG = logging.getLogger("nova.virt.xenapi")
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
@@ -186,6 +191,12 @@ class XenAPIConnection(object):
"""Detach volume storage to VM instance"""
return self._volumeops.detach_volume(instance_name, mountpoint)
+ def get_console_pool_info(self, console_type):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return {'address': xs_url.netloc,
+ 'username': FLAGS.xenapi_connection_username,
+ 'password': FLAGS.xenapi_connection_password}
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -194,6 +205,7 @@ class XenAPISession(object):
self.XenAPI = self.get_imported_xenapi()
self._session = self._create_session(url)
self._session.login_with_password(user, pw)
+ self.loop = None
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
@@ -230,14 +242,20 @@ class XenAPISession(object):
def wait_for_task(self, id, task):
"""Return the result of the given task. The task is polled
- until it completes."""
+ until it completes. Not re-entrant."""
done = event.Event()
- loop = utils.LoopingCall(self._poll_task, id, task, done)
- loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ self.loop = utils.LoopingCall(self._poll_task, id, task, done)
+ self.loop.start(FLAGS.xenapi_task_poll_interval, now=True)
rv = done.wait()
- loop.stop()
+ self.loop.stop()
return rv
+ def _stop_loop(self):
+ """Stop polling for task to finish."""
+ #NOTE(sandy-walsh) Had to break this call out to support unit tests.
+ if self.loop:
+ self.loop.stop()
+
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
@@ -256,7 +274,7 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- logging.info(_("Task [%s] %s status: success %s") % (
+ LOG.info(_("Task [%s] %s status: success %s") % (
name,
task,
result))
@@ -264,7 +282,7 @@ class XenAPISession(object):
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- logging.warn(_("Task [%s] %s status: %s %s") % (
+ LOG.warn(_("Task [%s] %s status: %s %s") % (
name,
task,
status,
@@ -272,15 +290,16 @@ class XenAPISession(object):
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.warn(exc)
done.send_exception(*sys.exc_info())
+ self._stop_loop()
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details"""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
@@ -293,7 +312,7 @@ class XenAPISession(object):
else:
raise
except xmlrpclib.ProtocolError, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
raise