diff options
| author | Ed Leafe <ed@leafe.com> | 2011-01-10 09:09:59 -0600 |
|---|---|---|
| committer | Ed Leafe <ed@leafe.com> | 2011-01-10 09:09:59 -0600 |
| commit | cb3667b95ceead8a677fdfa24abf2a56baf12992 (patch) | |
| tree | 25422e200d7f01225455bb42fda418f945a56b98 | |
| parent | a0ec77b597713fd9a4be5bb7b892eba4ac53e625 (diff) | |
| parent | 4830cb5d8959c06fbe480481823bc922a2a59e3e (diff) | |
| download | nova-cb3667b95ceead8a677fdfa24abf2a56baf12992.tar.gz nova-cb3667b95ceead8a677fdfa24abf2a56baf12992.tar.xz nova-cb3667b95ceead8a677fdfa24abf2a56baf12992.zip | |
merged changes from trunk
| -rw-r--r-- | .bzrignore | 1 | ||||
| -rw-r--r-- | Authors | 2 | ||||
| -rw-r--r-- | doc/source/conf.py | 8 | ||||
| -rw-r--r-- | nova/api/ec2/cloud.py | 33 | ||||
| -rw-r--r-- | nova/compute/api.py | 57 | ||||
| -rw-r--r-- | nova/compute/manager.py | 13 | ||||
| -rw-r--r-- | nova/db/api.py | 7 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/api.py | 42 | ||||
| -rw-r--r-- | nova/log.py | 14 | ||||
| -rw-r--r-- | nova/network/linux_net.py | 2 | ||||
| -rw-r--r-- | nova/service.py | 5 | ||||
| -rw-r--r-- | nova/tests/test_cloud.py | 18 | ||||
| -rw-r--r-- | nova/tests/test_virt.py | 100 | ||||
| -rw-r--r-- | nova/version.py | 46 | ||||
| -rw-r--r-- | nova/virt/libvirt_conn.py | 381 | ||||
| -rw-r--r-- | nova/volume/driver.py | 5 | ||||
| -rw-r--r-- | setup.py | 11 |
17 files changed, 637 insertions, 108 deletions
diff --git a/.bzrignore b/.bzrignore index d81a7d829..b271561a3 100644 --- a/.bzrignore +++ b/.bzrignore @@ -12,3 +12,4 @@ CA/openssl.cnf CA/serial* CA/newcerts/*.pem CA/private/cakey.pem +nova/vcsversion.py @@ -23,6 +23,7 @@ Jonathan Bryce <jbryce@jbryce.com> Josh Kearney <josh.kearney@rackspace.com> Joshua McKenty <jmckenty@gmail.com> Justin Santa Barbara <justin@fathomdb.com> +Ken Pepple <ken.pepple@gmail.com> Matt Dietz <matt.dietz@rackspace.com> Michael Gundlach <michael.gundlach@rackspace.com> Monty Taylor <mordred@inaugust.com> @@ -40,4 +41,3 @@ Trey Morris <trey.morris@rackspace.com> Vishvananda Ishaya <vishvananda@gmail.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com> - diff --git a/doc/source/conf.py b/doc/source/conf.py index 8f1b370cc..996dfb0a7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -version = '2011.1' +from nova import version as nova_version +#import nova.version # The full version, including alpha/beta/rc tags. -release = '2011.1-prerelease' +release = nova_version.version_string() +# The short X.Y version. +version = nova_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ccce83b84..7f1a414f3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -25,7 +25,6 @@ datastore. import base64 import datetime import IPy -import re import os from nova import compute @@ -35,7 +34,6 @@ from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota from nova import network from nova import rpc from nova import utils @@ -133,15 +131,6 @@ class CloudController(object): result[key] = [line] return result - def _trigger_refresh_security_group(self, context, security_group): - nodes = set([instance['host'] for instance in security_group.instances - if instance['host'] is not None]) - for node in nodes: - rpc.cast(context, - '%s.%s' % (FLAGS.compute_topic, node), - {"method": "refresh_security_group", - "args": {"security_group_id": security_group.id}}) - def get_metadata(self, address): ctxt = context.get_admin_context() instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) @@ -249,6 +238,7 @@ class CloudController(object): FLAGS.cc_host, FLAGS.cc_port, FLAGS.ec2_suffix)}] + return {'regionInfo': regions} def describe_snapshots(self, context, @@ -418,7 +408,8 @@ class CloudController(object): match = False if match: db.security_group_rule_destroy(context, rule['id']) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) @@ -443,7 +434,8 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True @@ -602,19 +594,24 @@ class CloudController(object): return [{label: x} for x in lst] def describe_instances(self, context, **kwargs): - return self._format_describe_instances(context) + return self._format_describe_instances(context, **kwargs) - def _format_describe_instances(self, context): - return {'reservationSet': self._format_instances(context)} + def _format_describe_instances(self, context, **kwargs): + return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] - def _format_instances(self, context, **kwargs): + def _format_instances(self, context, instance_id=None, **kwargs): reservations = {} - instances = self.compute_api.get_all(context, **kwargs) + # NOTE(vish): instance_id is an optional list of ids to filter by + if instance_id: + instance_id = [ec2_id_to_id(x) for x in instance_id] + instances = [self.compute_api.get(context, x) for x in instance_id] + else: + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: diff --git a/nova/compute/api.py b/nova/compute/api.py index 800bc6899..a20dc59cb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -185,6 +185,9 @@ class API(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + return instances def ensure_default_security_group(self, context): @@ -204,6 +207,60 @@ class API(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = self.db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group.id}}) + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + self.db.security_group_rule_get_by_security_group_grantee( + context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 21b09e443..3d22ee432 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -141,9 +141,16 @@ class ComputeManager(manager.Manager): host) @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_rules(security_group_id) + + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): diff --git a/nova/db/api.py b/nova/db/api.py index 0fa5eb1e8..8b0242c9a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -772,6 +772,13 @@ def security_group_rule_get_by_security_group(context, security_group_id): security_group_id) +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 45427597a..eb87355b6 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -650,7 +650,7 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -658,7 +658,7 @@ def instance_get(context, instance_id, session=None): elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ @@ -1579,6 +1579,44 @@ def security_group_rule_get(context, security_group_rule_id, session=None): @require_context +def security_group_rule_get_by_security_group(context, security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(parent_group_id=security_group_id).\ + all() + else: + # TODO(vish): Join to group and check for project_id + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(parent_group_id=security_group_id).\ + all() + return result + + +@require_context +def security_group_rule_get_by_security_group_grantee(context, + security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(group_id=security_group_id).\ + all() + else: + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(group_id=security_group_id).\ + all() + return result + + +@require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) diff --git a/nova/log.py b/nova/log.py index 6f5377e87..c1428c051 100644 --- a/nova/log.py +++ b/nova/log.py @@ -34,24 +34,19 @@ import logging.handlers import traceback from nova import flags -# TODO(todd): fix after version.py merge -# from nova import version +from nova import version FLAGS = flags.FLAGS -# TODO(todd): fix after version.py merge -# '(%(name)s %(nova_version)s): %(levelname)s ' flags.DEFINE_string('logging_context_format_string', - '(%(name)s): %(levelname)s ' + '(%(name)s %(nova_version)s): %(levelname)s ' '[%(request_id)s %(user)s ' '%(project)s] %(message)s', 'format string to use for log messages') -# TODO(todd): fix after version.py merge -# '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' flags.DEFINE_string('logging_default_format_string', - '(%(name)s): %(levelname)s [N/A] ' + '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' '%(message)s', 'format string to use for log messages') @@ -162,8 +157,7 @@ class NovaLogger(logging.Logger): extra = {} if context: extra.update(_dictify_context(context)) - # TODO(todd): fix after version.py merge - #extra.update({"nova_version": version.string_with_vcs()}) + extra.update({"nova_version": version.version_string_with_vcs()}) logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index c525d5dc8..eba9502e9 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -209,6 +209,8 @@ def ensure_bridge(bridge, interface, net_attrs=None): _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) + _execute("sudo iptables -N nova-local", check_exit_code=False) + _confirm_rule("FORWARD", "-j nova-local") def get_dhcp_hosts(context, network_id): diff --git a/nova/service.py b/nova/service.py index 864a42469..523c1a8d7 100644 --- a/nova/service.py +++ b/nova/service.py @@ -38,7 +38,6 @@ from nova import log as logging from nova import flags from nova import rpc from nova import utils -from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -209,6 +208,10 @@ class Service(object): logging.exception(_("model server went away")) try: + # NOTE(vish): This is late-loaded to make sure that the + # database is not created before flags have + # been loaded. + from nova.db.sqlalchemy import models models.register_models() except OperationalError: logging.exception(_("Data store %s is unreachable." diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a645ef538..b8a15c7b2 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -133,6 +133,23 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_describe_instances(self): + """Makes sure describe_instances works and filters results.""" + inst1 = db.instance_create(self.context, {'reservation_id': 'a'}) + inst2 = db.instance_create(self.context, {'reservation_id': 'a'}) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 2) + instance_id = cloud.id_to_ec2_id(inst2['id']) + result = self.cloud.describe_instances(self.context, + instance_id=[instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + self.assertEqual(result['instancesSet'][0]['instanceId'], + instance_id) + db.instance_destroy(self.context, inst1['id']) + db.instance_destroy(self.context, inst2['id']) + def test_console_output(self): image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -141,7 +158,6 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - print rv instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 4aa489d08..59053f4d0 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -208,8 +208,99 @@ class LibvirtConnTestCase(test.TestCase): self.manager.delete_user(self.user) -class NWFilterTestCase(test.TestCase): +class IptablesFirewallTestCase(test.TestCase): + def setUp(self): + super(IptablesFirewallTestCase, self).setUp() + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext('fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + self.fw = libvirt_conn.IptablesFirewallDriver() + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(IptablesFirewallTestCase, self).tearDown() + + def _p(self, *args, **kwargs): + if 'iptables-restore' in args: + print ' '.join(args), kwargs['stdin'] + if 'iptables-save' in args: + return + + in_rules = [ + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010' + ] + + def test_static_filters(self): + self.fw.execute = self._p + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake'}) + ip = '10.11.12.13' + + network_ref = db.project_get_network(self.context, + 'fake') + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testgroup', + 'description': 'test group'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'cidr': '192.168.10.0/24'}) + db.instance_add_security_group(admin_ctxt, instance_ref['id'], + secgroup['id']) + instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + + self.fw.add_instance(instance_ref) + + out_rules = self.fw.modify_rules(self.in_rules) + + in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) + for rule in in_rules: + if not 'nova' in rule: + self.assertTrue(rule in out_rules, + 'Rule went missing: %s' % rule) + + print '\n'.join(out_rules) + + +class NWFilterTestCase(test.TestCase): def setUp(self): super(NWFilterTestCase, self).setUp() @@ -224,7 +315,8 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) + self.fw = libvirt_conn.NWFilterFirewall( + lambda: self.fake_libvirt_connection) def tearDown(self): self.manager.delete_project(self.project) @@ -337,7 +429,7 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_base_nwfilters() - self.fw.setup_nwfilters_for_instance(instance) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) _ensure_all_called() self.teardown_security_group() diff --git a/nova/version.py b/nova/version.py new file mode 100644 index 000000000..7b27acb6a --- /dev/null +++ b/nova/version.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from nova.vcsversion import version_info +except ImportError: + version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} + +NOVA_VERSION = ['2011', '1'] +YEAR, COUNT = NOVA_VERSION + +FINAL = False # This becomes true at Release Candidate time + + +def canonical_version_string(): + return '.'.join([YEAR, COUNT]) + + +def version_string(): + if FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + + +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 764ef6600..3a4b6d469 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -86,6 +86,9 @@ flags.DEFINE_string('libvirt_uri', flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt_conn.IptablesFirewallDriver', + 'Firewall driver (defaults to iptables)') def get_connection(read_only): @@ -125,16 +128,24 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only + self.nwfilter = NWFilterFirewall(self._get_connection) + + if not FLAGS.firewall_driver: + self.firewall_driver = self.nwfilter + self.nwfilter.handle_security_groups = True + else: + self.firewall_driver = utils.import_object(FLAGS.firewall_driver) + def init_host(self): - NWFilterFirewall(self._conn).setup_base_nwfilters() + pass - @property - def _conn(self): + def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn + _conn = property(_get_connection) def _test_connection(self): try: @@ -351,10 +362,13 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + + self.nwfilter.setup_basic_filtering(instance) + self.firewall_driver.prepare_instance_filter(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) LOG.debug(_("instance %s: is running"), instance['name']) + self.firewall_driver.apply_instance_filter(instance) timer = utils.LoopingCall(f=None) @@ -693,18 +707,55 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) - def refresh_security_group(self, security_group_id): - fw = NWFilterFirewall(self._conn) - fw.ensure_security_group_filter(security_group_id) + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) + + +class FirewallDriver(object): + def prepare_instance_filter(self, instance): + """Prepare filters for the instance. + + At this point, the instance isn't running yet.""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store + + Gets called when a rule has been added to or removed from + the security group.""" + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store + + Gets called when an instance gets added to or removed from + the security group.""" + raise NotImplementedError() -class NWFilterFirewall(object): +class NWFilterFirewall(FirewallDriver): """ This class implements a network filtering mechanism versatile enough for EC2 style Security Group filtering by leveraging libvirt's nwfilter. First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + This filter drops all incoming ipv4 and ipv6 connections. Outgoing connections are never blocked. @@ -738,38 +789,79 @@ class NWFilterFirewall(object): (*) This sentence brought to you by the redundancy department of redundancy. + """ def __init__(self, get_connection): - self._conn = get_connection - - nova_base_filter = '''<filter name='nova-base' chain='root'> - <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid> - <filterref filter='no-mac-spoofing'/> - <filterref filter='no-ip-spoofing'/> - <filterref filter='no-arp-spoofing'/> - <filterref filter='allow-dhcp-server'/> - <filterref filter='nova-allow-dhcp-server'/> - <filterref filter='nova-base-ipv4'/> - <filterref filter='nova-base-ipv6'/> - </filter>''' - - nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'> - <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> - <rule action='accept' direction='out' - priority='100'> - <udp srcipaddr='0.0.0.0' - dstipaddr='255.255.255.255' - srcportstart='68' - dstportstart='67'/> - </rule> - <rule action='accept' direction='in' - priority='100'> - <udp srcipaddr='$DHCPSERVER' - srcportstart='67' - dstportstart='68'/> - </rule> - </filter>''' + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False + + def _get_connection(self): + return self._libvirt_get_connection() + _conn = property(_get_connection) + + def nova_dhcp_filter(self): + """The standard allow-dhcp-server filter is an <ip> one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway.""" + + return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> + <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> + <rule action='accept' direction='out' + priority='100'> + <udp srcipaddr='0.0.0.0' + dstipaddr='255.255.255.255' + srcportstart='68' + dstportstart='67'/> + </rule> + <rule action='accept' direction='in' + priority='100'> + <udp srcipaddr='$DHCPSERVER' + srcportstart='67' + dstportstart='68'/> + </rule> + </filter>''' + + def setup_basic_filtering(self, instance): + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + logging.info('called setup_basic_filtering in nwfilter') + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + instance_filter_name = self._instance_filter_name(instance) + self._define_filter(self._filter_container(instance_filter_name, + ['nova-base'])) + + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_vpn_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''<filter name='%s' chain='root'>%s</filter>''' % ( + name, + ''.join(["<filterref filter='%s'/>" % (f,) for f in filters])) + return xml nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> @@ -783,7 +875,7 @@ class NWFilterFirewall(object): retval = "<filter name='nova-base-ipv4' chain='ipv4'>" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """<rule action='%s' direction='%s' priority='%d'> <%s /> </rule>""" % (action, direction, @@ -795,7 +887,7 @@ class NWFilterFirewall(object): retval = "<filter name='nova-base-ipv6' chain='ipv6'>" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """<rule action='%s' direction='%s' priority='%d'> <%s-ipv6 /> </rule>""" % (action, direction, @@ -819,43 +911,49 @@ class NWFilterFirewall(object): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def setup_base_nwfilters(self): - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_base_filter) - self._define_filter(self.nova_vpn_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) - - def setup_nwfilters_for_instance(self, instance): + def prepare_instance_filter(self, instance): """ Creates an NWFilter for the given instance. In the process, it makes sure the filters for the security groups as well as the base filter are all in place. """ - nwfilter_xml = ("<filter name='nova-instance-%s' " - "chain='root'>\n") % instance['name'] - if instance['image_id'] == FLAGS.vpn_image_id: - nwfilter_xml += " <filterref filter='nova-vpn' />\n" + base_filter = 'nova-vpn' else: - nwfilter_xml += " <filterref filter='nova-base' />\n" + base_filter = 'nova-base' + + instance_filter_name = self._instance_filter_name(instance) + instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) + instance_filter_children = [base_filter, instance_secgroup_filter_name] + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + + ctxt = context.get_admin_context() if FLAGS.allow_project_net_traffic: - nwfilter_xml += " <filterref filter='nova-project' />\n" + instance_filter_children += ['nova-project'] + + for security_group in db.security_group_get_by_instance(ctxt, + instance['id']): - for security_group in instance.security_groups: - self.ensure_security_group_filter(security_group['id']) + self.refresh_security_group_rules(security_group['id']) - nwfilter_xml += (" <filterref filter='nova-secgroup-%d' " - "/>\n") % security_group['id'] - nwfilter_xml += "</filter>" + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] - self._define_filter(nwfilter_xml) + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) - def ensure_security_group_filter(self, security_group_id): + self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) + + return + + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -886,3 +984,162 @@ class NWFilterFirewall(object): xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \ (security_group_id, rule_xml,) return xml + + def _instance_filter_name(self, instance): + return 'nova-instance-%s' % instance['name'] + + +class IptablesFirewallDriver(FirewallDriver): + def __init__(self, execute=None): + self.execute = execute or utils.execute + self.instances = set() + + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + + def remove_instance(self, instance): + self.instances.remove(instance) + + def add_instance(self, instance): + self.instances.add(instance) + + def prepare_instance_filter(self, instance): + self.add_instance(instance) + self.apply_ruleset() + + def apply_ruleset(self): + current_filter, _ = self.execute('sudo iptables-save -t filter') + current_lines = current_filter.split('\n') + new_filter = self.modify_rules(current_lines) + self.execute('sudo iptables-restore', + process_input='\n'.join(new_filter)) + + def modify_rules(self, current_lines): + ctxt = context.get_admin_context() + # Remove any trace of nova rules. + new_filter = filter(lambda l: 'nova-' not in l, current_lines) + + seen_chains = False + for rules_index in range(len(new_filter)): + if not seen_chains: + if new_filter[rules_index].startswith(':'): + seen_chains = True + elif seen_chains == 1: + if not new_filter[rules_index].startswith(':'): + break + + our_chains = [':nova-ipv4-fallback - [0:0]'] + our_rules = ['-A nova-ipv4-fallback -j DROP'] + + our_chains += [':nova-local - [0:0]'] + our_rules += ['-A FORWARD -j nova-local'] + + security_groups = set() + # Add our chains + # First, we add instance chains and rules + for instance in self.instances: + chain_name = self._instance_chain_name(instance) + ip_address = self._ip_for_instance(instance) + + our_chains += [':%s - [0:0]' % chain_name] + + # Jump to the per-instance chain + our_rules += ['-A nova-local -d %s -j %s' % (ip_address, + chain_name)] + + # Always drop invalid packets + our_rules += ['-A %s -m state --state ' + 'INVALID -j DROP' % (chain_name,)] + + # Allow established connections + our_rules += ['-A %s -m state --state ' + 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)] + + # Jump to each security group chain in turn + for security_group in \ + db.security_group_get_by_instance(ctxt, + instance['id']): + security_groups.add(security_group) + + sg_chain_name = self._security_group_chain_name(security_group) + + our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)] + + # Allow DHCP responses + dhcp_server = self._dhcp_server_for_instance(instance) + our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % + (chain_name, dhcp_server)] + + # If nothing matches, jump to the fallback chain + our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)] + + # then, security group chains and rules + for security_group in security_groups: + chain_name = self._security_group_chain_name(security_group) + our_chains += [':%s - [0:0]' % chain_name] + + rules = \ + db.security_group_rule_get_by_security_group(ctxt, + security_group['id']) + + for rule in rules: + logging.info('%r', rule) + args = ['-A', chain_name, '-p', rule.protocol] + + if rule.cidr: + args += ['-s', rule.cidr] + else: + # Eventually, a mechanism to grant access for security + # groups will turn up here. It'll use ipsets. + continue + + if rule.protocol in ['udp', 'tcp']: + if rule.from_port == rule.to_port: + args += ['--dport', '%s' % (rule.from_port,)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule.from_port, + rule.to_port)] + elif rule.protocol == 'icmp': + icmp_type = rule.from_port + icmp_code = rule.to_port + + if icmp_type == '-1': + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == '-1': + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + args += ['-m', 'icmp', '--icmp_type', icmp_type_arg] + + args += ['-j ACCEPT'] + our_rules += [' '.join(args)] + + new_filter[rules_index:rules_index] = our_rules + new_filter[rules_index:rules_index] = our_chains + logging.info('new_filter: %s', '\n'.join(new_filter)) + return new_filter + + def refresh_security_group_members(self, security_group): + pass + + def refresh_security_group_rules(self, security_group): + self.apply_ruleset() + + def _security_group_chain_name(self, security_group): + return 'nova-sg-%s' % (security_group['id'],) + + def _instance_chain_name(self, instance): + return 'nova-inst-%s' % (instance['id'],) + + def _ip_for_instance(self, instance): + return db.instance_get_fixed_address(context.get_admin_context(), + instance['id']) + + def _dhcp_server_for_instance(self, instance): + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + return network['gateway'] diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 477e0abf4..6bc925f3e 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -20,7 +20,6 @@ Drivers for volumes. """ -import os import time from nova import exception @@ -80,7 +79,9 @@ class VolumeDriver(object): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - if not os.path.isdir("/dev/%s" % FLAGS.volume_group): + out, err = self._execute("sudo vgs --noheadings -o name") + volume_groups = out.split() + if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) @@ -24,6 +24,15 @@ from setuptools.command.sdist import sdist from sphinx.setup_command import BuildDoc from nova.utils import parse_mailmap, str_dict_replace +from nova import version + +if os.path.isdir('.bzr'): + with open("nova/vcsversion.py", 'w') as version_file: + vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], + stdout=subprocess.PIPE) + vcsversion = vcs_cmd.communicate()[0] + version_file.write(vcsversion) + class local_BuildDoc(BuildDoc): def run(self): @@ -49,7 +58,7 @@ class local_sdist(sdist): sdist.run(self) setup(name='nova', - version='2011.1', + version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', |
