summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/manager.py1
-rw-r--r--nova/flags.py5
-rw-r--r--nova/tests/test_xenapi.py292
-rw-r--r--nova/tests/xenapi/stubs.py89
-rw-r--r--nova/virt/firewall.py336
-rw-r--r--nova/virt/libvirt/connection.py6
-rw-r--r--nova/virt/libvirt/firewall.py265
-rw-r--r--nova/virt/libvirt/vif.py3
-rw-r--r--nova/virt/netutils.py (renamed from nova/virt/libvirt/netutils.py)0
-rw-r--r--nova/virt/xenapi/fake.py4
-rw-r--r--nova/virt/xenapi/firewall.py70
-rw-r--r--nova/virt/xenapi/vm_utils.py8
-rw-r--r--nova/virt/xenapi/vmops.py42
-rw-r--r--nova/virt/xenapi_conn.py22
-rw-r--r--[-rwxr-xr-x]plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost58
15 files changed, 915 insertions, 286 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 0dda3be30..cf373570d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -165,7 +165,6 @@ class ComputeManager(manager.SchedulerDependentManager):
# and re-document the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
-
try:
self.driver = utils.check_isinstance(
utils.import_object(compute_driver),
diff --git a/nova/flags.py b/nova/flags.py
index 9c4aeffeb..a51508c97 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -393,7 +393,10 @@ DEFINE_integer('max_vcs_in_vsa', 32,
'maxinum VCs in a VSA')
DEFINE_integer('vsa_part_size_gb', 100,
'default partition size for shared capacity')
-
+# Default firewall driver for security groups and provider firewall
+DEFINE_string('firewall_driver',
+ 'nova.virt.libvirt.firewall.IptablesFirewallDriver',
+ 'Firewall driver (defaults to iptables)')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
'The service to use for retrieving and searching for images.')
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 2b9f977cc..12e15c991 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -42,6 +42,7 @@ from nova.virt.xenapi import vm_utils
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
+from nova.tests import fake_network
from nova.tests import fake_utils
LOG = logging.getLogger('nova.tests.test_xenapi')
@@ -94,7 +95,9 @@ class XenAPIVolumeTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
- xenapi_connection_password='test_pass')
+ xenapi_connection_password='test_pass',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
@@ -205,7 +208,9 @@ class XenAPIVMTestCase(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
- instance_name_template='%d')
+ instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
@@ -247,6 +252,9 @@ class XenAPIVMTestCase(test.TestCase):
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
@@ -255,6 +263,10 @@ class XenAPIVMTestCase(test.TestCase):
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
+ stubs.stubout_is_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
@@ -340,6 +352,7 @@ class XenAPIVMTestCase(test.TestCase):
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
+ 'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})
@@ -409,7 +422,11 @@ class XenAPIVMTestCase(test.TestCase):
instance = db.instance_create(self.context, instance_values)
else:
instance = db.instance_get(self.context, instance_id)
- network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
+ network_info = [({'bridge': 'fa0', 'id': 0,
+ 'injected': True,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': 'dead:beef::1/120',
+ },
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
@@ -420,6 +437,7 @@ class XenAPIVMTestCase(test.TestCase):
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
+ 'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
@@ -488,10 +506,11 @@ class XenAPIVMTestCase(test.TestCase):
# Change the default host_call_plugin to one that'll return
# a swap disk
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
-
stubs.FakeSessionForVMTests.host_call_plugin = \
stubs.FakeSessionForVMTests.host_call_plugin_swap
-
+ # Stubbing out firewall driver as previous stub sets a particular
+ # stub for async plugin calls
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
try:
# We'll steal the above glance linux test
self.test_spawn_vhd_glance_linux()
@@ -686,7 +705,11 @@ class XenAPIVMTestCase(test.TestCase):
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, instance_values)
- network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
+ network_info = [({'bridge': 'fa0', 'id': 0,
+ 'injected': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': 'dead:beef::1/120',
+ },
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
@@ -697,6 +720,7 @@ class XenAPIVMTestCase(test.TestCase):
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
+ 'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
@@ -757,7 +781,9 @@ class XenAPIMigrateInstance(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
- xenapi_connection_password='test_pass')
+ xenapi_connection_password='test_pass',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
@@ -1139,7 +1165,9 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
- xenapi_connection_password='test_pass')
+ xenapi_connection_password='test_pass',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
@@ -1230,7 +1258,9 @@ class XenAPIBWUsageTestCase(test.TestCase):
XenAPIBWUsageTestCase._fake_compile_metrics)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
- xenapi_connection_password='test_pass')
+ xenapi_connection_password='test_pass',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
@@ -1245,3 +1275,247 @@ class XenAPIBWUsageTestCase(test.TestCase):
"""
result = self.conn.get_all_bw_usage(datetime.datetime.utcnow())
self.assertEqual(result, [])
+
+
+class XenAPIDom0IptablesFirewallTestCase(test.TestCase):
+
+ _in_nat_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ ]
+
+ _in_filter_rules = [
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ _in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def setUp(self):
+ super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
+ self.flags(xenapi_connection_url='test_url',
+ xenapi_connection_password='test_pass',
+ instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.stubs = stubout.StubOutForTesting()
+ xenapi_fake.reset()
+ xenapi_fake.create_local_srs()
+ xenapi_fake.create_local_pifs()
+ self.user_id = 'mappin'
+ self.project_id = 'fake'
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
+ test_case=self)
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.conn = xenapi_conn.get_connection(False)
+ self.fw = self.conn._vmops.firewall_driver
+
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'instance_type_id': 1})
+
+ def _create_test_security_group(self):
+ admin_ctxt = context.get_admin_context()
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testgroup',
+ 'description': 'test group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+ return secgroup
+
+ def _validate_security_group(self):
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self._in_filter_rules)
+ for rule in in_rules:
+ if not 'nova' in rule:
+ self.assertTrue(rule in self._out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+ security_group_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
+ ' -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ regex = re.compile('-A .* -j ACCEPT -p tcp --dport 80:81'
+ ' -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_static_filters(self):
+ instance_ref = self._create_instance_ref()
+ src_instance_ref = self._create_instance_ref()
+ admin_ctxt = context.get_admin_context()
+ secgroup = self._create_test_security_group()
+
+ src_secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testsourcegroup',
+ 'description': 'src group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'group_id': src_secgroup['id']})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
+ src_secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+ src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
+
+ def get_fixed_ips(*args, **kwargs):
+ ips = []
+ for _n, info in network_info:
+ ips.extend(info['ips'])
+ return [ip['ip'] for ip in ips]
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ self.stubs.Set(db, 'instance_get_fixed_addresses', get_fixed_ips)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+
+ self._validate_security_group()
+ # Extra test for TCP acceptance rules
+ for ip in get_fixed_ips():
+ regex = re.compile('-A .* -j ACCEPT -p tcp'
+ ' --dport 80:81 -s %s' % ip)
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEquals(len(rulesv4), 2)
+ self.assertEquals(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEquals(len(rulesv4), 2)
+ self.assertEquals(len(rulesv6), 0)
+
+ def test_multinic_iptables(self):
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ network_info = fake_network.\
+ fake_get_instance_nw_info(self.stubs,
+ networks_count,
+ ipv4_addr_per_network)
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ self.assertEquals(ipv4_network_rules,
+ ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
+ self.assertEquals(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ def test_do_refresh_security_group_rules(self):
+ admin_ctxt = context.get_admin_context()
+ instance_ref = self._create_instance_ref()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ secgroup = self._create_test_security_group()
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.instances[instance_ref['id']] = instance_ref
+ self._validate_security_group()
+ # add a rule to the security group
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'udp',
+ 'from_port': 200,
+ 'to_port': 299,
+ 'cidr': '192.168.99.0/24'})
+ #validate the extra rule
+ self.fw.refresh_security_group_rules(secgroup)
+ regex = re.compile('-A .* -j ACCEPT -p udp --dport 200:299'
+ ' -s 192.168.99.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "Rules were not updated properly."
+ "The rule for UDP acceptance is missing")
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 64e7bc3fc..6e24bdc34 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -7,7 +7,6 @@
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
-#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@@ -17,6 +16,7 @@
"""Stubouts, mocks and fixtures for the test suite"""
import eventlet
+import json
import random
from nova.virt import xenapi_conn
@@ -27,6 +27,17 @@ from nova.virt.xenapi import vmops
from nova import utils
+def stubout_firewall_driver(stubs, conn):
+
+ def fake_none(self, *args):
+ return
+
+ vmops = conn._vmops
+ stubs.Set(vmops.firewall_driver, 'setup_basic_filtering', fake_none)
+ stubs.Set(vmops.firewall_driver, 'prepare_instance_filter', fake_none)
+ stubs.Set(vmops.firewall_driver, 'instance_filter_exists', fake_none)
+
+
def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, context, session, instance, image, user,
@@ -42,7 +53,7 @@ def stubout_instance_snapshot(stubs):
stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
-def stubout_session(stubs, cls, product_version=None):
+def stubout_session(stubs, cls, product_version=None, **opt_args):
"""Stubs out three methods from XenAPISession"""
def fake_import(self):
"""Stubs out get_imported_xenapi of XenAPISession"""
@@ -51,7 +62,7 @@ def stubout_session(stubs, cls, product_version=None):
return __import__(fake_module, globals(), locals(), from_list, -1)
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
- lambda s, url: cls(url))
+ lambda s, url: cls(url, **opt_args))
stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi',
fake_import)
if product_version is None:
@@ -90,7 +101,7 @@ def stubout_is_vdi_pv(stubs):
def stubout_determine_is_pv_objectstore(stubs):
- """Assumes VMs never have PV kernels"""
+ """Assumes VMs stu have PV kernels"""
@classmethod
def f(cls, *args):
@@ -98,6 +109,16 @@ def stubout_determine_is_pv_objectstore(stubs):
stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
+def stubout_is_snapshot(stubs):
+ """ Always returns true
+ xenapi fake driver does not create vmrefs for snapshots """
+
+ @classmethod
+ def f(cls, *args):
+ return True
+ stubs.Set(vm_utils.VMHelper, 'is_snapshot', f)
+
+
def stubout_lookup_image(stubs):
"""Simulates a failure in lookup image."""
def f(_1, _2, _3, _4):
@@ -140,6 +161,16 @@ def _make_fake_vdi():
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
+
+ _fake_iptables_save_output = \
+ "# Generated by iptables-save v1.4.10 on Sun Nov 6 22:49:02 2011\n"\
+ "*filter\n"\
+ ":INPUT ACCEPT [0:0]\n"\
+ ":FORWARD ACCEPT [0:0]\n"\
+ ":OUTPUT ACCEPT [0:0]\n"\
+ "COMMIT\n"\
+ "# Completed on Sun Nov 6 22:49:02 2011\n"
+
def __init__(self, uri):
super(FakeSessionForVMTests, self).__init__(uri)
@@ -147,6 +178,9 @@ class FakeSessionForVMTests(fake.SessionBase):
if (plugin, method) == ('glance', 'download_vhd'):
return fake.as_json(dict(vdi_type='os',
vdi_uuid=_make_fake_vdi()))
+ elif (plugin, method) == ("xenhost", "iptables_config"):
+ return fake.as_json(out=self._fake_iptables_save_output,
+ err='')
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
@@ -196,6 +230,53 @@ class FakeSessionForVMTests(fake.SessionBase):
pass
+class FakeSessionForFirewallTests(FakeSessionForVMTests):
+ """ Stubs out a XenApi Session for doing IPTable Firewall tests """
+
+ def __init__(self, uri, test_case=None):
+ super(FakeSessionForFirewallTests, self).__init__(uri)
+ if hasattr(test_case, '_in_filter_rules'):
+ self._in_filter_rules = test_case._in_filter_rules
+ if hasattr(test_case, '_in6_filter_rules'):
+ self._in6_filter_rules = test_case._in6_filter_rules
+ if hasattr(test_case, '_in_nat_rules'):
+ self._in_nat_rules = test_case._in_nat_rules
+ self._test_case = test_case
+
+ def host_call_plugin(self, _1, _2, plugin, method, args):
+ """Mock method four host_call_plugin to be used in unit tests
+ for the dom0 iptables Firewall drivers for XenAPI
+
+ """
+ if plugin == "xenhost" and method == "iptables_config":
+ # The command to execute is a json-encoded list
+ cmd_args = args.get('cmd_args', None)
+ cmd = json.loads(cmd_args)
+ if not cmd:
+ ret_str = ''
+ else:
+ output = ''
+ process_input = args.get('process_input', None)
+ if cmd == ['ip6tables-save', '-t', 'filter']:
+ output = '\n'.join(self._in6_filter_rules)
+ if cmd == ['iptables-save', '-t', 'filter']:
+ output = '\n'.join(self._in_filter_rules)
+ if cmd == ['iptables-save', '-t', 'nat']:
+ output = '\n'.join(self._in_nat_rules)
+ if cmd == ['iptables-restore', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ if self._test_case is not None:
+ self._test_case._out_rules = lines
+ output = '\n'.join(lines)
+ if cmd == ['ip6tables-restore', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ output = '\n'.join(lines)
+ ret_str = fake.as_json(out=output, err='')
+ return ret_str
+
+
def stub_out_vm_methods(stubs):
def fake_shutdown(self, inst, vm, method="clean"):
pass
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
new file mode 100644
index 000000000..c60148231
--- /dev/null
+++ b/nova/virt/firewall.py
@@ -0,0 +1,336 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt import netutils
+
+LOG = logging.getLogger("nova.virt.firewall")
+FLAGS = flags.FLAGS
+flags.DEFINE_bool('allow_same_net_traffic',
+ True,
+ 'Whether to allow network traffic from same network')
+
+
+class FirewallDriver(object):
+ """ Firewall Driver base class.
+
+ Defines methos that any driver providing security groups
+ and provider fireall functionality should implement.
+ """
+ def prepare_instance_filter(self, instance, network_info):
+ """Prepare filters for the instance.
+ At this point, the instance isn't running yet."""
+ raise NotImplementedError()
+
+ def unfilter_instance(self, instance, network_info):
+ """Stop filtering instance"""
+ raise NotImplementedError()
+
+ def apply_instance_filter(self, instance, network_info):
+ """Apply instance filter.
+
+ Once this method returns, the instance should be firewalled
+ appropriately. This method should as far as possible be a
+ no-op. It's vastly preferred to get everything set up in
+ prepare_instance_filter.
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ """Refresh security group rules from data store
+
+ Gets called when a rule has been added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ """Refresh security group members from data store
+
+ Gets called when an instance gets added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_provider_fw_rules(self):
+ """Refresh common rules for all hosts/instances from data store.
+
+ Gets called when a rule has been added to or removed from
+ the list of rules (via admin api).
+
+ """
+ raise NotImplementedError()
+
+ def setup_basic_filtering(self, instance, network_info):
+ """Create rules to block spoofing and allow dhcp.
+
+ This gets called when spawning an instance, before
+ :method:`prepare_instance_filter`.
+
+ """
+ raise NotImplementedError()
+
+ def instance_filter_exists(self, instance, network_info):
+ """Check nova-instance-instance-xxx exists"""
+ raise NotImplementedError()
+
+
+class IptablesFirewallDriver(FirewallDriver):
+ """ Driver which enforces security groups through iptables rules. """
+
+ def __init__(self, **kwargs):
+ from nova.network import linux_net
+ self.iptables = linux_net.iptables_manager
+ self.instances = {}
+ self.network_infos = {}
+ self.basicly_filtered = False
+
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
+ def setup_basic_filtering(self, instance, network_info):
+ pass
+
+ def apply_instance_filter(self, instance, network_info):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def unfilter_instance(self, instance, network_info):
+ if self.instances.pop(instance['id'], None):
+ # NOTE(vish): use the passed info instead of the stored info
+ self.network_infos.pop(instance['id'])
+ self.remove_filters_for_instance(instance)
+ self.iptables.apply()
+ else:
+ LOG.info(_('Attempted to unfilter instance %s which is not '
+ 'filtered'), instance['id'])
+
+ def prepare_instance_filter(self, instance, network_info):
+ self.instances[instance['id']] = instance
+ self.network_infos[instance['id']] = network_info
+ self.add_filters_for_instance(instance)
+ self.iptables.apply()
+
+ def _create_filter(self, ips, chain_name):
+ return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
+
+ def _filters_for_instance(self, chain_name, network_info):
+ ips_v4 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ips']]
+ ipv4_rules = self._create_filter(ips_v4, chain_name)
+
+ ipv6_rules = []
+ if FLAGS.use_ipv6:
+ ips_v6 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ip6s']]
+ ipv6_rules = self._create_filter(ips_v6, chain_name)
+
+ return ipv4_rules, ipv6_rules
+
+ def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule(chain_name, rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+
+ def add_filters_for_instance(self, instance):
+ network_info = self.network_infos[instance['id']]
+ chain_name = self._instance_chain_name(instance)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
+ network_info)
+ self._add_filters('local', ipv4_rules, ipv6_rules)
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ self._add_filters(chain_name, ipv4_rules, ipv6_rules)
+
+ def remove_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].remove_chain(chain_name)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].remove_chain(chain_name)
+
+ @staticmethod
+ def _security_group_chain_name(security_group_id):
+ return 'nova-sg-%s' % (security_group_id,)
+
+ def _instance_chain_name(self, instance):
+ return 'inst-%s' % (instance['id'],)
+
+ def _do_basic_rules(self, ipv4_rules, ipv6_rules, network_info):
+ # Always drop invalid packets
+ ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
+ ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
+
+ # Allow established connections
+ ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+
+ def _do_dhcp_rules(self, ipv4_rules, network_info):
+ dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info]
+
+ for dhcp_server in dhcp_servers:
+ ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT' % (dhcp_server,))
+
+ def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info):
+ cidrs = [network['cidr'] for (network, _i) in network_info]
+ for cidr in cidrs:
+ ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
+ if FLAGS.use_ipv6:
+ cidrv6s = [network['cidr_v6'] for (network, _i) in
+ network_info]
+
+ for cidrv6 in cidrv6s:
+ ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
+
+ def _do_ra_rules(self, ipv6_rules, network_info):
+ gateways_v6 = [mapping['gateway_v6'] for (_n, mapping) in
+ network_info]
+ for gateway_v6 in gateways_v6:
+ ipv6_rules.append(
+ '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
+
+ def _build_icmp_rule(self, rule, version):
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ if version == 4:
+ return ['-m', 'icmp', '--icmp-type', icmp_type_arg]
+ elif version == 6:
+ return ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg]
+ # return empty list if icmp_type == -1
+ return []
+
+ def _build_tcp_udp_rule(self, rule, version):
+ if rule.from_port == rule.to_port:
+ return ['--dport', '%s' % (rule.from_port,)]
+ else:
+ return ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+
+ def instance_rules(self, instance, network_info):
+ ctxt = context.get_admin_context()
+
+ ipv4_rules = []
+ ipv6_rules = []
+
+ # Initialize with basic rules
+ self._do_basic_rules(ipv4_rules, ipv6_rules, network_info)
+ # Set up rules to allow traffic to/from DHCP server
+ self._do_dhcp_rules(ipv4_rules, network_info)
+
+ #Allow project network traffic
+ if FLAGS.allow_same_net_traffic:
+ self._do_project_network_rules(ipv4_rules, ipv6_rules,
+ network_info)
+ # We wrap these in FLAGS.use_ipv6 because they might cause
+ # a DB lookup. The other ones are just list operations, so
+ # they're not worth the clutter.
+ if FLAGS.use_ipv6:
+ # Allow RA responses
+ self._do_ra_rules(ipv6_rules, network_info)
+
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance['id'])
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ rules = db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
+
+ for rule in rules:
+ LOG.debug(_('Adding security group rule: %r'), rule)
+
+ if not rule.cidr:
+ version = 4
+ else:
+ version = netutils.get_ip_version(rule.cidr)
+
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
+
+ protocol = rule.protocol
+ if version == 6 and rule.protocol == 'icmp':
+ protocol = 'icmpv6'
+
+ args = ['-j ACCEPT']
+ if protocol:
+ args += ['-p', protocol]
+
+ if protocol in ['udp', 'tcp']:
+ args += self._build_tcp_udp_rule(rule, version)
+ elif protocol == 'icmp':
+ args += self._build_icmp_rule(rule, version)
+ if rule.cidr:
+ LOG.info('Using cidr %r', rule.cidr)
+ args += ['-s', rule.cidr]
+ fw_rules += [' '.join(args)]
+ else:
+ if rule['grantee_group']:
+ for instance in rule['grantee_group']['instances']:
+ LOG.info('instance: %r', instance)
+ ips = db.instance_get_fixed_addresses(ctxt,
+ instance['id'])
+ LOG.info('ips: %r', ips)
+ for ip in ips:
+ subrule = args + ['-s %s' % ip]
+ fw_rules += [' '.join(subrule)]
+
+ LOG.info('Using fw_rules: %r', fw_rules)
+ ipv4_rules += ['-j $sg-fallback']
+ ipv6_rules += ['-j $sg-fallback']
+
+ return ipv4_rules, ipv6_rules
+
+ def instance_filter_exists(self, instance, network_info):
+ pass
+
+ def refresh_security_group_members(self, security_group):
+ self.do_refresh_security_group_rules(security_group)
+ self.iptables.apply()
+
+ def refresh_security_group_rules(self, security_group):
+ self.do_refresh_security_group_rules(security_group)
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def do_refresh_security_group_rules(self, security_group):
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ self.add_filters_for_instance(instance)
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index c1870261d..2e2327533 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -93,18 +93,12 @@ flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
-flags.DEFINE_bool('allow_same_net_traffic',
- True,
- 'Whether to allow network traffic from same network')
flags.DEFINE_bool('use_cow_images',
True,
'Whether to use cow images')
flags.DEFINE_string('ajaxterm_portrange',
'10000-12000',
'Range of ports that ajaxterm should randomly try to bind')
-flags.DEFINE_string('firewall_driver',
- 'nova.virt.libvirt.firewall.IptablesFirewallDriver',
- 'Firewall driver (defaults to iptables)')
flags.DEFINE_string('cpuinfo_xml_template',
utils.abspath('virt/cpuinfo.xml.template'),
'CpuInfo XML Template (Used only live migration now)')
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index debc186d6..9b9e3540e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -25,7 +25,9 @@ from nova import db
from nova import flags
from nova import log as logging
from nova import utils
-from nova.virt.libvirt import netutils
+
+import nova.virt.firewall as base_firewall
+from nova.virt import netutils
LOG = logging.getLogger("nova.virt.libvirt.firewall")
@@ -39,65 +41,7 @@ except ImportError:
"not work correctly."))
-class FirewallDriver(object):
- def prepare_instance_filter(self, instance, network_info):
- """Prepare filters for the instance.
-
- At this point, the instance isn't running yet."""
- raise NotImplementedError()
-
- def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
- raise NotImplementedError()
-
- def apply_instance_filter(self, instance, network_info):
- """Apply instance filter.
-
- Once this method returns, the instance should be firewalled
- appropriately. This method should as far as possible be a
- no-op. It's vastly preferred to get everything set up in
- prepare_instance_filter.
- """
- raise NotImplementedError()
-
- def refresh_security_group_rules(self, security_group_id):
- """Refresh security group rules from data store
-
- Gets called when a rule has been added to or removed from
- the security group."""
- raise NotImplementedError()
-
- def refresh_security_group_members(self, security_group_id):
- """Refresh security group members from data store
-
- Gets called when an instance gets added to or removed from
- the security group."""
- raise NotImplementedError()
-
- def refresh_provider_fw_rules(self):
- """Refresh common rules for all hosts/instances from data store.
-
- Gets called when a rule has been added to or removed from
- the list of rules (via admin api).
-
- """
- raise NotImplementedError()
-
- def setup_basic_filtering(self, instance, network_info):
- """Create rules to block spoofing and allow dhcp.
-
- This gets called when spawning an instance, before
- :method:`prepare_instance_filter`.
-
- """
- raise NotImplementedError()
-
- def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
- raise NotImplementedError()
-
-
-class NWFilterFirewall(FirewallDriver):
+class NWFilterFirewall(base_firewall.FirewallDriver):
"""
This class implements a network filtering mechanism versatile
enough for EC2 style Security Group filtering by leveraging
@@ -512,19 +456,10 @@ class NWFilterFirewall(FirewallDriver):
return True
-class IptablesFirewallDriver(FirewallDriver):
+class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, execute=None, **kwargs):
- from nova.network import linux_net
- self.iptables = linux_net.iptables_manager
- self.instances = {}
- self.network_infos = {}
+ super(IptablesFirewallDriver, self).__init__(**kwargs)
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
- self.basicly_filtered = False
-
- self.iptables.ipv4['filter'].add_chain('sg-fallback')
- self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
- self.iptables.ipv6['filter'].add_chain('sg-fallback')
- self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
@@ -539,6 +474,8 @@ class IptablesFirewallDriver(FirewallDriver):
pass
def unfilter_instance(self, instance, network_info):
+ # NOTE(salvatore-orlando):
+ # Overriding base class method for applying nwfilter operation
if self.instances.pop(instance['id'], None):
# NOTE(vish): use the passed info instead of the stored info
self.network_infos.pop(instance['id'])
@@ -549,62 +486,7 @@ class IptablesFirewallDriver(FirewallDriver):
LOG.info(_('Attempted to unfilter instance %s which is not '
'filtered'), instance['id'])
- def prepare_instance_filter(self, instance, network_info):
- self.instances[instance['id']] = instance
- self.network_infos[instance['id']] = network_info
- self.add_filters_for_instance(instance)
- self.iptables.apply()
-
- def _create_filter(self, ips, chain_name):
- return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
-
- def _filters_for_instance(self, chain_name, network_info):
- ips_v4 = [ip['ip'] for (_n, mapping) in network_info
- for ip in mapping['ips']]
- ipv4_rules = self._create_filter(ips_v4, chain_name)
-
- ipv6_rules = []
- if FLAGS.use_ipv6:
- ips_v6 = [ip['ip'] for (_n, mapping) in network_info
- for ip in mapping['ip6s']]
- ipv6_rules = self._create_filter(ips_v6, chain_name)
-
- return ipv4_rules, ipv6_rules
-
- def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
- for rule in ipv4_rules:
- self.iptables.ipv4['filter'].add_rule(chain_name, rule)
-
- if FLAGS.use_ipv6:
- for rule in ipv6_rules:
- self.iptables.ipv6['filter'].add_rule(chain_name, rule)
-
- def add_filters_for_instance(self, instance):
- network_info = self.network_infos[instance['id']]
- chain_name = self._instance_chain_name(instance)
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain(chain_name)
- self.iptables.ipv4['filter'].add_chain(chain_name)
- ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
- network_info)
- self._add_filters('local', ipv4_rules, ipv6_rules)
- ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
- self._add_filters(chain_name, ipv4_rules, ipv6_rules)
-
- def remove_filters_for_instance(self, instance):
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].remove_chain(chain_name)
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].remove_chain(chain_name)
-
- @staticmethod
- def instance_rules(instance, network_info):
- ctxt = context.get_admin_context()
-
- ipv4_rules = []
- ipv6_rules = []
-
+ def _do_basic_rules(self, ipv4_rules, ipv6_rules, network_info):
# Always drop invalid packets
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
@@ -617,131 +499,10 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
- dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info]
-
- for dhcp_server in dhcp_servers:
- ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
- '-j ACCEPT' % (dhcp_server,))
-
- #Allow project network traffic
- if FLAGS.allow_same_net_traffic:
- cidrs = [network['cidr'] for (network, _m) in network_info]
- for cidr in cidrs:
- ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
-
- # We wrap these in FLAGS.use_ipv6 because they might cause
- # a DB lookup. The other ones are just list operations, so
- # they're not worth the clutter.
- if FLAGS.use_ipv6:
- # Allow RA responses
- gateways_v6 = [mapping['gateway_v6'] for (_n, mapping) in
- network_info]
- for gateway_v6 in gateways_v6:
- ipv6_rules.append(
- '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
-
- #Allow project network traffic
- if FLAGS.allow_same_net_traffic:
- cidrv6s = [network['cidr_v6'] for (network, _m) in
- network_info]
-
- for cidrv6 in cidrv6s:
- ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
-
- security_groups = db.security_group_get_by_instance(ctxt,
- instance['id'])
-
- # then, security group chains and rules
- for security_group in security_groups:
- rules = db.security_group_rule_get_by_security_group(ctxt,
- security_group['id'])
-
- for rule in rules:
- LOG.debug(_('Adding security group rule: %r'), rule)
-
- if not rule.cidr:
- version = 4
- else:
- version = netutils.get_ip_version(rule.cidr)
-
- if version == 4:
- fw_rules = ipv4_rules
- else:
- fw_rules = ipv6_rules
-
- protocol = rule.protocol
- if version == 6 and rule.protocol == 'icmp':
- protocol = 'icmpv6'
-
- args = ['-j ACCEPT']
- if protocol:
- args += ['-p', protocol]
-
- if protocol in ['udp', 'tcp']:
- if rule.from_port == rule.to_port:
- args += ['--dport', '%s' % (rule.from_port,)]
- else:
- args += ['-m', 'multiport',
- '--dports', '%s:%s' % (rule.from_port,
- rule.to_port)]
- elif protocol == 'icmp':
- icmp_type = rule.from_port
- icmp_code = rule.to_port
-
- if icmp_type == -1:
- icmp_type_arg = None
- else:
- icmp_type_arg = '%s' % icmp_type
- if not icmp_code == -1:
- icmp_type_arg += '/%s' % icmp_code
-
- if icmp_type_arg:
- if version == 4:
- args += ['-m', 'icmp', '--icmp-type',
- icmp_type_arg]
- elif version == 6:
- args += ['-m', 'icmp6', '--icmpv6-type',
- icmp_type_arg]
-
- if rule.cidr:
- LOG.info('Using cidr %r', rule.cidr)
- args += ['-s', rule.cidr]
- fw_rules += [' '.join(args)]
- else:
- if rule['grantee_group']:
- for instance in rule['grantee_group']['instances']:
- LOG.info('instance: %r', instance)
- ips = db.instance_get_fixed_addresses(ctxt,
- instance['id'])
- LOG.info('ips: %r', ips)
- for ip in ips:
- subrule = args + ['-s %s' % ip]
- fw_rules += [' '.join(subrule)]
-
- LOG.info('Using fw_rules: %r', fw_rules)
- ipv4_rules += ['-j $sg-fallback']
- ipv6_rules += ['-j $sg-fallback']
-
- return ipv4_rules, ipv6_rules
-
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
return self.nwfilter.instance_filter_exists(instance, network_info)
- def refresh_security_group_members(self, security_group):
- self.do_refresh_security_group_rules(security_group)
- self.iptables.apply()
-
- def refresh_security_group_rules(self, security_group):
- self.do_refresh_security_group_rules(security_group)
- self.iptables.apply()
-
- @utils.synchronized('iptables', external=True)
- def do_refresh_security_group_rules(self, security_group):
- for instance in self.instances.values():
- self.remove_filters_for_instance(instance)
- self.add_filters_for_instance(instance)
-
def refresh_provider_fw_rules(self):
"""See class:FirewallDriver: docs."""
self._do_refresh_provider_fw_rules()
@@ -821,11 +582,3 @@ class IptablesFirewallDriver(FirewallDriver):
args += ['-j DROP']
fw_rules += [' '.join(args)]
return ipv4_rules, ipv6_rules
-
- @staticmethod
- def _security_group_chain_name(security_group_id):
- return 'nova-sg-%s' % (security_group_id,)
-
- @staticmethod
- def _instance_chain_name(instance):
- return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 6ce1041cc..d4375f6da 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -24,9 +24,10 @@ from nova import flags
from nova import log as logging
from nova.network import linux_net
from nova import utils
-from nova.virt.libvirt import netutils
+from nova.virt import netutils
from nova.virt.vif import VIFDriver
+
LOG = logging.getLogger('nova.virt.libvirt.vif')
FLAGS = flags.FLAGS
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/netutils.py
index 6f303072d..6f303072d 100644
--- a/nova/virt/libvirt/netutils.py
+++ b/nova/virt/netutils.py
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index b7f665fa7..9b0c2adce 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -378,7 +378,7 @@ class SessionBase(object):
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
- raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
+ raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
@@ -386,7 +386,7 @@ class SessionBase(object):
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
- raise Failure(['DEVICE_ALREADY_DETACHED', ref])
+ raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = pbd_ref['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
new file mode 100644
index 000000000..282e12d9e
--- /dev/null
+++ b/nova/virt/xenapi/firewall.py
@@ -0,0 +1,70 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova.virt.firewall import IptablesFirewallDriver
+from nova.virt import netutils
+
+
+LOG = logging.getLogger("nova.virt.xenapi.firewall")
+FLAGS = flags.FLAGS
+
+
+class Dom0IptablesFirewallDriver(IptablesFirewallDriver):
+ """ IptablesFirewallDriver class
+
+ This class provides an implementation for nova.virt.Firewall
+ using iptables. This class is meant to be used with the xenapi
+ backend and uses xenapi plugin to enforce iptables rules in dom0
+
+ """
+ def _plugin_execute(self, *cmd, **kwargs):
+ # Prepare arguments for plugin call
+ args = {}
+ args.update(map(lambda x: (x, str(kwargs[x])), kwargs))
+ args['cmd_args'] = json.dumps(cmd)
+ task = self._session.async_call_plugin(
+ 'xenhost', 'iptables_config', args)
+ ret = self._session.wait_for_task(task)
+ json_ret = json.loads(ret)
+ return (json_ret['out'], json_ret['err'])
+
+ def __init__(self, xenapi_session=None, **kwargs):
+ from nova.network import linux_net
+ super(Dom0IptablesFirewallDriver, self).__init__(**kwargs)
+ self._session = xenapi_session
+ # Create IpTablesManager with executor through plugin
+ self.iptables = linux_net.IptablesManager(self._plugin_execute)
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
+ def _build_tcp_udp_rule(self, rule, version):
+ if rule.from_port == rule.to_port:
+ return ['--dport', '%s' % (rule.from_port,)]
+ else:
+ # No multiport needed for XS!
+ return ['--dport', '%s:%s' % (rule.from_port,
+ rule.to_port)]
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index c0ac3ed8d..6cda61204 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -849,6 +849,14 @@ class VMHelper(HelperBase):
return (None, None)
@classmethod
+ def is_snapshot(cls, session, vm):
+ vm_rec = session.call_xenapi("VM.get_record", vm)
+ if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
+ return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
+ else:
+ return False
+
+ @classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index bb6239d7e..99f5ca650 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -94,6 +94,8 @@ class VMOps(object):
self._session = session
self.poll_rescue_last_ran = None
VMHelper.XenAPI = self.XenAPI
+ fw_class = utils.import_class(FLAGS.firewall_driver)
+ self.firewall_driver = fw_class(xenapi_session=self._session)
vif_impl = utils.import_class(FLAGS.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self._product_version = product_version
@@ -207,9 +209,22 @@ class VMOps(object):
self._update_instance_progress(context, instance,
step=3,
total_steps=BUILD_TOTAL_STEPS)
+ # 4. Prepare security group filters
+ # NOTE(salvatore-orlando): setup_basic_filtering might be empty or
+ # not implemented at all, as basic filter could be implemented
+ # with VIF rules created by xapi plugin
+ try:
+ self.firewall_driver.setup_basic_filtering(
+ instance, network_info)
+ except NotImplementedError:
+ pass
+ self.firewall_driver.prepare_instance_filter(instance,
+ network_info)
- # 4. Boot the Instance
+ # 5. Boot the Instance
self._spawn(instance, vm_ref)
+ # The VM has started, let's ensure the security groups are enforced
+ self.firewall_driver.apply_instance_filter(instance, network_info)
self._update_instance_progress(context, instance,
step=4,
total_steps=BUILD_TOTAL_STEPS)
@@ -828,6 +843,9 @@ class VMOps(object):
def reboot(self, instance, reboot_type):
"""Reboot VM instance."""
+ # Note (salvatore-orlando): security group rules are not re-enforced
+ # upon reboot, since this action on the XenAPI drivers does not
+ # remove existing filters
vm_ref = self._get_vm_opaque_ref(instance)
if reboot_type == "HARD":
@@ -1117,16 +1135,21 @@ class VMOps(object):
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."))
return
-
+ is_snapshot = VMHelper.is_snapshot(self._session, vm_ref)
if shutdown:
self._shutdown(instance, vm_ref)
self._destroy_vdis(instance, vm_ref)
if destroy_kernel_ramdisk:
self._destroy_kernel_ramdisk(instance, vm_ref)
- self._destroy_vm(instance, vm_ref)
+ self._destroy_vm(instance, vm_ref)
self.unplug_vifs(instance, network_info)
+ # Remove security groups filters for instance
+ # Unless the vm is a snapshot
+ if not is_snapshot:
+ self.firewall_driver.unfilter_instance(instance,
+ network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
@@ -1683,6 +1706,19 @@ class VMOps(object):
def clear_param_xenstore(self, instance_or_vm):
"""Removes all data from the xenstore parameter record for this VM."""
self.write_to_param_xenstore(instance_or_vm, {})
+
+ def refresh_security_group_rules(self, security_group_id):
+ """ recreates security group rules for every instance """
+ self.firewall_driver.refresh_security_group_rules(security_group_id)
+
+ def refresh_security_group_members(self, security_group_id):
+ """ recreates security group rules for every instance """
+ self.firewall_driver.refresh_security_group_members(security_group_id)
+
+ def unfilter_instance(self, instance_ref, network_info):
+ """Removes filters for each VIF of the specified instance."""
+ self.firewall_driver.unfilter_instance(instance_ref,
+ network_info=network_info)
########################################################################
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 61b830054..951db00e8 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -411,6 +411,10 @@ class XenAPIConnection(driver.ComputeDriver):
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only libvirt."""
+ # NOTE(salvatore-orlando): it enforces security groups on
+ # host initialization and live migration.
+ # Live migration is not supported by XenAPI (as of 2011-11-09)
+ # In XenAPI we do not assume instances running upon host initialization
return
def live_migration(self, context, instance_ref, dest,
@@ -419,8 +423,22 @@ class XenAPIConnection(driver.ComputeDriver):
return
def unfilter_instance(self, instance_ref, network_info):
- """This method is supported only by libvirt."""
- raise NotImplementedError('This method is supported only by libvirt.')
+ """Removes security groups configured for an instance."""
+ return self._vmops.unfilter_instance(instance_ref, network_info)
+
+ def refresh_security_group_rules(self, security_group_id):
+ """ Updates security group rules for all instances
+ associated with a given security group
+ Invoked when security group rules are updated
+ """
+ return self._vmops.refresh_security_group_rules(security_group_id)
+
+ def refresh_security_group_members(self, security_group_id):
+ """ Updates security group rules for all instances
+ associated with a given security group
+ Invoked when instances are added/removed to a security group
+ """
+ return self._vmops.refresh_security_group_members(security_group_id)
def update_host_status(self):
"""Update the status info of the host, and return those values
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
index f02597afc..64938641f 100755..100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -25,6 +25,7 @@ try:
import json
except ImportError:
import simplejson as json
+import logging
import os
import random
import re
@@ -67,6 +68,35 @@ def _run_command(cmd):
return proc.stdout.read()
+# NOTE (salvatore-orlando):
+# Instead of updating run_command a new method has been implemented,
+# in order to avoid risking breaking existing functions calling _run_command
+def _run_command_with_input(cmd, process_input):
+ """Abstracts out the basics of issuing system commands. If the command
+ returns anything in stderr, a PluginError is raised with that information.
+ Otherwise, the output from stdout is returned.
+
+ process_input specificies a variable to use as the process' standard input.
+ """
+ pipe = subprocess.PIPE
+ # cmd can be either a single string with command and arguments,
+ # or a sequence of string
+ if not hasattr(cmd, '__iter__'):
+ cmd = [cmd] # make it iterable
+
+ #Note(salvatore-orlando): the shell argument has been set to False
+ proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
+ stderr=pipe, close_fds=True)
+ if process_input is not None:
+ (output, err) = proc.communicate(process_input)
+ else:
+ (output, err) = proc.communicate()
+ if err:
+ raise pluginlib.PluginError(err)
+ # This is tantamount to proc.stdout.read()
+ return output
+
+
def _get_host_uuid():
cmd = "xe host-list | grep uuid"
resp = _run_command(cmd)
@@ -163,6 +193,31 @@ def set_config(self, arg_dict):
_write_config_dict(conf)
+def iptables_config(session, args):
+ # command should be either save or restore
+ logging.debug("iptables_config:enter")
+ logging.debug("iptables_config: args=%s", args)
+ cmd_args = pluginlib.exists(args, 'cmd_args')
+ logging.debug("iptables_config: cmd_args=%s", cmd_args)
+ process_input = pluginlib.optional(args, 'process_input')
+ logging.debug("iptables_config: process_input=%s", process_input)
+ cmd = json.loads(cmd_args)
+ cmd = map(str, cmd)
+
+ # either execute iptable-save or iptables-restore
+ # command must be only one of these two
+ # process_input must be used only with iptables-restore
+ if len(cmd) > 0 and cmd[0] in ('iptables-save', 'iptables-restore'):
+ result = _run_command_with_input(cmd, process_input)
+ ret_str = json.dumps(dict(out=result,
+ err=''))
+ logging.debug("iptables_config:exit")
+ return ret_str
+ else:
+ # else don't do anything and return an error
+ raise pluginlib.PluginError(_("Invalid iptables command"))
+
+
def _power_action(action):
host_uuid = _get_host_uuid()
# Host must be disabled first
@@ -326,4 +381,5 @@ if __name__ == "__main__":
"host_reboot": host_reboot,
"host_start": host_start,
"get_config": get_config,
- "set_config": set_config})
+ "set_config": set_config,
+ "iptables_config": iptables_config})