summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-manage31
-rw-r--r--nova/api/ec2/__init__.py2
-rw-r--r--nova/api/ec2/cloud.py258
-rw-r--r--nova/api/openstack/servers.py35
-rw-r--r--nova/auth/manager.py13
-rw-r--r--nova/compute/manager.py27
-rw-r--r--nova/db/api.py115
-rw-r--r--nova/db/sqlalchemy/api.py303
-rw-r--r--nova/db/sqlalchemy/models.py97
-rw-r--r--nova/db/sqlalchemy/session.py9
-rw-r--r--nova/exception.py3
-rw-r--r--nova/fakerabbit.py1
-rw-r--r--nova/network/linux_net.py29
-rw-r--r--nova/network/manager.py287
-rw-r--r--nova/process.py2
-rw-r--r--nova/test.py20
-rw-r--r--nova/tests/api_unittest.py188
-rw-r--r--nova/tests/cloud_unittest.py5
-rw-r--r--nova/tests/compute_unittest.py3
-rw-r--r--nova/tests/network_unittest.py32
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/scheduler_unittest.py1
-rw-r--r--nova/tests/virt_unittest.py194
-rw-r--r--nova/utils.py2
-rw-r--r--nova/virt/fake.py2
-rw-r--r--nova/virt/interfaces.template1
-rw-r--r--nova/virt/libvirt.qemu.xml.template4
-rw-r--r--nova/virt/libvirt.uml.xml.template4
-rw-r--r--nova/virt/libvirt.xen.xml.template30
-rw-r--r--nova/virt/libvirt_conn.py262
-rw-r--r--nova/virt/xenapi.py3
-rw-r--r--run_tests.py1
32 files changed, 1596 insertions, 370 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index e19bf70b7..d36b0f53a 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -73,6 +73,7 @@ from nova import flags
from nova import quota
from nova import utils
from nova.auth import manager
+from nova.network import manager as network_manager
from nova.cloudpipe import pipelib
@@ -377,6 +378,29 @@ class FloatingIpCommands(object):
floating_ip['address'],
instance)
+class NetworkCommands(object):
+ """Class for managing networks."""
+
+ def create(self, fixed_range=None, num_networks=None,
+ network_size=None, vlan_start=None, vpn_start=None):
+ """Creates fixed ips for host by range
+ arguments: [fixed_range=FLAG], [num_networks=FLAG],
+ [network_size=FLAG], [vlan_start=FLAG],
+ [vpn_start=FLAG]"""
+ if not fixed_range:
+ fixed_range = FLAGS.fixed_range
+ if not num_networks:
+ num_networks = FLAGS.num_networks
+ if not network_size:
+ network_size = FLAGS.network_size
+ if not vlan_start:
+ vlan_start = FLAGS.vlan_start
+ if not vpn_start:
+ vpn_start = FLAGS.vpn_start
+ net_manager = utils.import_object(FLAGS.network_manager)
+ net_manager.create_networks(None, fixed_range, int(num_networks),
+ int(network_size), int(vlan_start),
+ int(vpn_start))
CATEGORIES = [
('user', UserCommands),
@@ -384,7 +408,8 @@ CATEGORIES = [
('role', RoleCommands),
('shell', ShellCommands),
('vpn', VpnCommands),
- ('floating', FloatingIpCommands)
+ ('floating', FloatingIpCommands),
+ ('network', NetworkCommands)
]
@@ -454,9 +479,9 @@ def main():
fn(*argv)
sys.exit(0)
except TypeError:
- print "Wrong number of arguments supplied"
+ print "Possible wrong number of arguments supplied"
print "%s %s: %s" % (category, action, fn.__doc__)
- sys.exit(2)
+ raise
if __name__ == '__main__':
main()
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 6b538a7f1..6e771f064 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -142,6 +142,8 @@ class Authorizer(wsgi.Middleware):
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
+ 'AuthorizeSecurityGroupIngress': ['netadmin'],
+ 'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 619c1a4b0..a7693cadd 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -28,6 +28,8 @@ import logging
import os
import time
+import IPy
+
from nova import crypto
from nova import db
from nova import exception
@@ -43,6 +45,7 @@ from nova.api.ec2 import images
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+InvalidInputException = exception.InvalidInputException
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
@@ -127,6 +130,15 @@ class CloudController(object):
result[key] = [line]
return result
+ def _trigger_refresh_security_group(self, security_group):
+ nodes = set([instance['host'] for instance in security_group.instances
+ if instance['host'] is not None])
+ for node in nodes:
+ rpc.call('%s.%s' % (FLAGS.compute_topic, node),
+ { "method": "refresh_security_group",
+ "args": { "context": None,
+ "security_group_id": security_group.id}})
+
def get_metadata(self, address):
instance_ref = db.fixed_ip_get_instance(None, address)
if instance_ref is None:
@@ -246,28 +258,210 @@ class CloudController(object):
pass
return True
- def describe_security_groups(self, context, group_names, **kwargs):
- groups = {'securityGroupSet': []}
+ def describe_security_groups(self, context, group_name=None, **kwargs):
+ self._ensure_default_security_group(context)
+ if context.user.is_admin():
+ groups = db.security_group_get_all(context)
+ else:
+ groups = db.security_group_get_by_project(context,
+ context.project.id)
+ groups = [self._format_security_group(context, g) for g in groups]
+ if not group_name is None:
+ groups = [g for g in groups if g.name in group_name]
+
+ return {'securityGroupInfo': groups }
+
+ def _format_security_group(self, context, group):
+ g = {}
+ g['groupDescription'] = group.description
+ g['groupName'] = group.name
+ g['ownerId'] = group.project_id
+ g['ipPermissions'] = []
+ for rule in group.rules:
+ r = {}
+ r['ipProtocol'] = rule.protocol
+ r['fromPort'] = rule.from_port
+ r['toPort'] = rule.to_port
+ r['groups'] = []
+ r['ipRanges'] = []
+ if rule.group_id:
+ source_group = db.security_group_get(context, rule.group_id)
+ r['groups'] += [{'groupName': source_group.name,
+ 'userId': source_group.project_id}]
+ else:
+ r['ipRanges'] += [{'cidrIp': rule.cidr}]
+ g['ipPermissions'] += [r]
+ return g
+
+
+ def _authorize_revoke_rule_args_to_dict(self, context,
+ to_port=None, from_port=None,
+ ip_protocol=None, cidr_ip=None,
+ user_id=None,
+ source_security_group_name=None,
+ source_security_group_owner_id=None):
+
+ values = {}
+
+ if source_security_group_name:
+ source_project_id = self._get_source_project_id(context,
+ source_security_group_owner_id)
+
+ source_security_group = \
+ db.security_group_get_by_name(context,
+ source_project_id,
+ source_security_group_name)
+ values['group_id'] = source_security_group['id']
+ elif cidr_ip:
+ # If this fails, it throws an exception. This is what we want.
+ IPy.IP(cidr_ip)
+ values['cidr'] = cidr_ip
+ else:
+ values['cidr'] = '0.0.0.0/0'
+
+ if ip_protocol and from_port and to_port:
+ from_port = int(from_port)
+ to_port = int(to_port)
+ ip_protocol = str(ip_protocol)
+
+ if ip_protocol.upper() not in ['TCP','UDP','ICMP']:
+ raise InvalidInputException('%s is not a valid ipProtocol' %
+ (ip_protocol,))
+ if ((min(from_port, to_port) < -1) or
+ (max(from_port, to_port) > 65535)):
+ raise InvalidInputException('Invalid port range')
+
+ values['protocol'] = ip_protocol
+ values['from_port'] = from_port
+ values['to_port'] = to_port
+ else:
+ # If cidr based filtering, protocol and ports are mandatory
+ if 'cidr' in values:
+ return None
+
+ return values
+
- # Stubbed for now to unblock other things.
- return groups
+ def _security_group_rule_exists(self, security_group, values):
+ """Indicates whether the specified rule values are already
+ defined in the given security group.
+ """
+ for rule in security_group.rules:
+ if 'group_id' in values:
+ if rule['group_id'] == values['group_id']:
+ return True
+ else:
+ is_duplicate = True
+ for key in ('cidr', 'from_port', 'to_port', 'protocol'):
+ if rule[key] != values[key]:
+ is_duplicate = False
+ break
+ if is_duplicate:
+ return True
+ return False
+
+
+ def revoke_security_group_ingress(self, context, group_name, **kwargs):
+ self._ensure_default_security_group(context)
+ security_group = db.security_group_get_by_name(context,
+ context.project.id,
+ group_name)
+
+ criteria = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
+ if criteria == None:
+ raise exception.ApiError("No rule for the specified parameters.")
+
+ for rule in security_group.rules:
+ match = True
+ for (k,v) in criteria.iteritems():
+ if getattr(rule, k, False) != v:
+ match = False
+ if match:
+ db.security_group_rule_destroy(context, rule['id'])
+ self._trigger_refresh_security_group(security_group)
+ return True
+ raise exception.ApiError("No rule for the specified parameters.")
+
+ # TODO(soren): This has only been tested with Boto as the client.
+ # Unfortunately, it seems Boto is using an old API
+ # for these operations, so support for newer API versions
+ # is sketchy.
+ def authorize_security_group_ingress(self, context, group_name, **kwargs):
+ self._ensure_default_security_group(context)
+ security_group = db.security_group_get_by_name(context,
+ context.project.id,
+ group_name)
+
+ values = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
+ values['parent_group_id'] = security_group.id
+
+ if self._security_group_rule_exists(security_group, values):
+ raise exception.ApiError('This rule already exists in group %s' %
+ group_name)
+
+ security_group_rule = db.security_group_rule_create(context, values)
+
+ self._trigger_refresh_security_group(security_group)
- def create_security_group(self, context, group_name, **kwargs):
return True
+
+ def _get_source_project_id(self, context, source_security_group_owner_id):
+ if source_security_group_owner_id:
+ # Parse user:project for source group.
+ source_parts = source_security_group_owner_id.split(':')
+
+ # If no project name specified, assume it's same as user name.
+ # Since we're looking up by project name, the user name is not
+ # used here. It's only read for EC2 API compatibility.
+ if len(source_parts) == 2:
+ source_project_id = source_parts[1]
+ else:
+ source_project_id = source_parts[0]
+ else:
+ source_project_id = context.project.id
+
+ return source_project_id
+
+
+ def create_security_group(self, context, group_name, group_description):
+ self._ensure_default_security_group(context)
+ if db.security_group_exists(context, context.project.id, group_name):
+ raise exception.ApiError('group %s already exists' % group_name)
+
+ group = {'user_id' : context.user.id,
+ 'project_id': context.project.id,
+ 'name': group_name,
+ 'description': group_description}
+ group_ref = db.security_group_create(context, group)
+
+ return {'securityGroupSet': [self._format_security_group(context,
+ group_ref)]}
+
+
def delete_security_group(self, context, group_name, **kwargs):
+ security_group = db.security_group_get_by_name(context,
+ context.project.id,
+ group_name)
+ db.security_group_destroy(context, security_group.id)
return True
+
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
internal_id = ec2_id_to_internal_id(ec2_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id)
- return rpc.call('%s.%s' % (FLAGS.compute_topic,
- instance_ref['host']),
- {"method": "get_console_output",
- "args": {"context": None,
- "instance_id": instance_ref['id']}})
+ output = rpc.call('%s.%s' % (FLAGS.compute_topic,
+ instance_ref['host']),
+ { "method" : "get_console_output",
+ "args" : { "context": None,
+ "instance_id": instance_ref['id']}})
+
+ now = datetime.datetime.utcnow()
+ return { "InstanceId" : ec2_id,
+ "Timestamp" : now,
+ "output" : base64.b64encode(output) }
def describe_volumes(self, context, **kwargs):
if context.user.is_admin():
@@ -545,15 +739,27 @@ class CloudController(object):
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
- network_ref = db.project_get_network(context, context.project.id)
+ network_ref = self.network_manager.get_network(context)
host = network_ref['host']
if not host:
host = rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"context": None,
- "project_id": context.project.id}})
+ "network_id": network_ref['id']}})
return db.queue_get_for(context, FLAGS.network_topic, host)
+ def _ensure_default_security_group(self, context):
+ try:
+ db.security_group_get_by_name(context,
+ context.project.id,
+ 'default')
+ except exception.NotFound:
+ values = { 'name' : 'default',
+ 'description' : 'default',
+ 'user_id' : context.user.id,
+ 'project_id' : context.project.id }
+ group = db.security_group_create(context, values)
+
def run_instances(self, context, **kwargs):
instance_type = kwargs.get('instance_type', 'm1.small')
if instance_type not in INSTANCE_TYPES:
@@ -601,8 +807,17 @@ class CloudController(object):
kwargs['key_name'])
key_data = key_pair_ref['public_key']
- # TODO: Get the real security group of launch in here
- security_group = "default"
+ security_group_arg = kwargs.get('security_group', ["default"])
+ if not type(security_group_arg) is list:
+ security_group_arg = [security_group_arg]
+
+ security_groups = []
+ self._ensure_default_security_group(context)
+ for security_group_name in security_group_arg:
+ group = db.security_group_get_by_name(context,
+ context.project.id,
+ security_group_name)
+ security_groups.append(group['id'])
reservation_id = utils.generate_uid('r')
base_options = {}
@@ -616,12 +831,12 @@ class CloudController(object):
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project.id
base_options['user_data'] = kwargs.get('user_data', '')
- base_options['security_group'] = security_group
- base_options['instance_type'] = instance_type
+
base_options['display_name'] = kwargs.get('display_name')
base_options['display_description'] = kwargs.get('display_description')
type_data = INSTANCE_TYPES[instance_type]
+ base_options['instance_type'] = instance_type
base_options['memory_mb'] = type_data['memory_mb']
base_options['vcpus'] = type_data['vcpus']
base_options['local_gb'] = type_data['local_gb']
@@ -630,6 +845,10 @@ class CloudController(object):
instance_ref = db.instance_create(context, base_options)
inst_id = instance_ref['id']
+ for security_group_id in security_groups:
+ db.instance_add_security_group(context, inst_id,
+ security_group_id)
+
inst = {}
inst['mac_address'] = utils.generate_mac()
inst['launch_index'] = num
@@ -637,12 +856,13 @@ class CloudController(object):
ec2_id = internal_id_to_ec2_id(internal_id)
inst['hostname'] = ec2_id
db.instance_update(context, inst_id, inst)
+ # TODO(vish): This probably should be done in the scheduler
+ # or in compute as a call. The network should be
+ # allocated after the host is assigned and setup
+ # can happen at the same time.
address = self.network_manager.allocate_fixed_ip(context,
inst_id,
vpn)
-
- # TODO(vish): This probably should be done in the scheduler
- # network is setup when host is assigned
network_topic = self._get_network_topic(context)
rpc.call(network_topic,
{"method": "setup_fixed_ip",
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 5d1ed9822..1a0792bf8 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -48,9 +48,9 @@ def _entity_list(entities):
return dict(servers=entities)
def _entity_detail(inst):
- """ Maps everything to valid attributes for return"""
- power_mapping = {
- power_state.NOSTATE: 'build',
+ """ Maps everything to Rackspace-like attributes for return"""
+ power_mapping = {
+ power_state.NOSTATE: 'build',
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
power_state.PAUSED: 'suspended',
@@ -60,7 +60,7 @@ def _entity_detail(inst):
}
inst_dict = {}
- mapped_keys = dict(status='state', imageId='image_id',
+ mapped_keys = dict(status='state', imageId='image_id',
flavorId='instance_type', name='server_name', id='id')
for k, v in mapped_keys.iteritems():
@@ -83,7 +83,7 @@ class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
- "server": [ "id", "imageId", "name", "flavorId", "hostId",
+ "server": [ "id", "imageId", "name", "flavorId", "hostId",
"status", "progress", "progress" ]
}
}
@@ -155,7 +155,7 @@ class Controller(wsgi.Controller):
user_id = req.environ['nova.context']['user']['id']
inst_dict = self._deserialize(req.body, req)
-
+
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
@@ -163,12 +163,12 @@ class Controller(wsgi.Controller):
if not instance or instance.user_id != user_id:
return faults.Fault(exc.HTTPNotFound())
- self.db_driver.instance_update(None, int(id),
+ self.db_driver.instance_update(None, int(id),
_filter_params(inst_dict['server']))
return faults.Fault(exc.HTTPNoContent())
def action(self, req, id):
- """ multi-purpose method used to reboot, rebuild, and
+ """ multi-purpose method used to reboot, rebuild, and
resize a server """
user_id = req.environ['nova.context']['user']['id']
input_dict = self._deserialize(req.body, req)
@@ -195,12 +195,11 @@ class Controller(wsgi.Controller):
if v['flavorid'] == flavor_id][0]
image_id = env['server']['imageId']
-
img_service = utils.import_object(FLAGS.image_service)
image = img_service.show(image_id)
- if not image:
+ if not image:
raise Exception, "Image not found"
inst['server_name'] = env['server']['name']
@@ -236,15 +235,14 @@ class Controller(wsgi.Controller):
ref = self.db_driver.instance_create(None, inst)
inst['id'] = ref.internal_id
-
# TODO(dietz): this isn't explicitly necessary, but the networking
# calls depend on an object with a project_id property, and therefore
# should be cleaned up later
api_context = context.APIRequestContext(user_id)
-
+
inst['mac_address'] = utils.generate_mac()
-
- #TODO(dietz) is this necessary?
+
+ #TODO(dietz) is this necessary?
inst['launch_index'] = 0
inst['hostname'] = str(ref.internal_id)
@@ -256,21 +254,20 @@ class Controller(wsgi.Controller):
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
- network_topic = self._get_network_topic(user_id)
+ network_topic = self._get_network_topic(None)
rpc.call(network_topic,
{"method": "setup_fixed_ip",
"args": {"context": None,
"address": address}})
return inst
- def _get_network_topic(self, user_id):
+ def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
- network_ref = self.db_driver.project_get_network(None,
- user_id)
+ network_ref = self.network_manager.get_network(context)
host = network_ref['host']
if not host:
host = rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"context": None,
- "project_id": user_id}})
+ "network_id": network_ref['id']}})
return self.db_driver.queue_get_for(None, FLAGS.network_topic, host)
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 49235c910..9c499c98d 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -484,12 +484,6 @@ class AuthManager(object):
member_users)
if project_dict:
project = Project(**project_dict)
- try:
- self.network_manager.allocate_network(context,
- project.id)
- except:
- drv.delete_project(project.id)
- raise
return project
def modify_project(self, project, manager_user=None, description=None):
@@ -558,13 +552,6 @@ class AuthManager(object):
def delete_project(self, project, context=None):
"""Deletes a project"""
- try:
- network_ref = db.project_get_network(context,
- Project.safe_id(project))
- db.network_destroy(context, network_ref['id'])
- except:
- logging.exception('Could not destroy network for %s',
- project)
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 131fac406..c602d013d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -64,14 +64,19 @@ class ComputeManager(manager.Manager):
@defer.inlineCallbacks
@exception.wrap_exception
+ def refresh_security_group(self, context, security_group_id, **_kwargs):
+ yield self.driver.refresh_security_group(security_group_id)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
"""Launch a new instance with specified options."""
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref['internal_id'] in self.driver.list_instances():
+ if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
project_id = instance_ref['project_id']
- self.network_manager.setup_compute_network(context, project_id)
+ self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@@ -144,26 +149,10 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
- # TODO(vish): Move this into the driver layer
-
logging.debug("instance %s: getting console output", instance_id)
instance_ref = self.db.instance_get(context, instance_id)
- if FLAGS.connection_type == 'libvirt':
- fname = os.path.abspath(os.path.join(FLAGS.instances_path,
- instance_ref['internal_id'],
- 'console.log'))
- with open(fname, 'r') as f:
- output = f.read()
- else:
- output = 'FAKE CONSOLE OUTPUT'
-
- # TODO(termie): this stuff belongs in the API layer, no need to
- # munge the data we send to ourselves
- output = {"InstanceId": instance_id,
- "Timestamp": "2",
- "output": base64.b64encode(output)}
- return output
+ return self.driver.get_console_output(instance_ref)
@defer.inlineCallbacks
@exception.wrap_exception
diff --git a/nova/db/api.py b/nova/db/api.py
index 11815991e..7e6994b56 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -304,6 +304,11 @@ def instance_update(context, instance_id, values):
return IMPL.instance_update(context, instance_id, values)
+def instance_add_security_group(context, instance_id, security_group_id):
+ """Associate the given security group with the given instance"""
+ return IMPL.instance_add_security_group(context, instance_id, security_group_id)
+
+
###################
@@ -335,6 +340,11 @@ def key_pair_get_all_by_user(context, user_id):
####################
+def network_associate(context, project_id):
+ """Associate a free network to a project."""
+ return IMPL.network_associate(context, project_id)
+
+
def network_count(context):
"""Return the number of networks."""
return IMPL.network_count(context)
@@ -355,9 +365,12 @@ def network_count_reserved_ips(context, network_id):
return IMPL.network_count_reserved_ips(context, network_id)
-def network_create(context, values):
- """Create a network from the values dictionary."""
- return IMPL.network_create(context, values)
+def network_create_safe(context, values):
+ """Create a network from the values dict
+
+ The network is only returned if the create succeeds. If the create violates
+ constraints because the network already exists, no exception is raised."""
+ return IMPL.network_create_safe(context, values)
def network_create_fixed_ips(context, network_id, num_vpn_clients):
@@ -365,9 +378,14 @@ def network_create_fixed_ips(context, network_id, num_vpn_clients):
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
-def network_destroy(context, network_id):
- """Destroy the network or raise if it does not exist."""
- return IMPL.network_destroy(context, network_id)
+def network_disassociate(context, network_id):
+ """Disassociate the network from project or raise if it does not exist."""
+ return IMPL.network_disassociate(context, network_id)
+
+
+def network_disassociate_all(context):
+ """Disassociate all networks from projects."""
+ return IMPL.network_disassociate_all(context)
def network_get(context, network_id):
@@ -382,10 +400,15 @@ def network_get_associated_fixed_ips(context, network_id):
def network_get_by_bridge(context, bridge):
- """Get an network or raise if it does not exist."""
+ """Get a network by bridge or raise if it does not exist."""
return IMPL.network_get_by_bridge(context, bridge)
+def network_get_by_instance(context, instance_id):
+ """Get a network by instance id or raise if it does not exist."""
+ return IMPL.network_get_by_instance(context, instance_id)
+
+
def network_get_index(context, network_id):
"""Get non-conflicting index for network"""
return IMPL.network_get_index(context, network_id)
@@ -396,19 +419,6 @@ def network_get_vpn_ip(context, network_id):
return IMPL.network_get_vpn_ip(context, network_id)
-def network_index_count(context):
- """Return count of network indexes"""
- return IMPL.network_index_count(context)
-
-
-def network_index_create_safe(context, values):
- """Create a network index from the values dict
-
- The index is not returned. If the create violates the unique
- constraints because the index already exists, no exception is raised."""
- return IMPL.network_index_create_safe(context, values)
-
-
def network_set_cidr(context, network_id, cidr):
"""Set the Classless Inner Domain Routing for the network"""
return IMPL.network_set_cidr(context, network_id, cidr)
@@ -571,6 +581,71 @@ def volume_update(context, volume_id, values):
return IMPL.volume_update(context, volume_id, values)
+####################
+
+
+def security_group_get_all(context):
+ """Get all security groups"""
+ return IMPL.security_group_get_all(context)
+
+
+def security_group_get(context, security_group_id):
+ """Get security group by its internal id"""
+ return IMPL.security_group_get(context, security_group_id)
+
+
+def security_group_get_by_name(context, project_id, group_name):
+ """Returns a security group with the specified name from a project"""
+ return IMPL.security_group_get_by_name(context, project_id, group_name)
+
+
+def security_group_get_by_project(context, project_id):
+ """Get all security groups belonging to a project"""
+ return IMPL.security_group_get_by_project(context, project_id)
+
+
+def security_group_get_by_instance(context, instance_id):
+ """Get security groups to which the instance is assigned"""
+ return IMPL.security_group_get_by_instance(context, instance_id)
+
+
+def security_group_exists(context, project_id, group_name):
+ """Indicates if a group name exists in a project"""
+ return IMPL.security_group_exists(context, project_id, group_name)
+
+
+def security_group_create(context, values):
+ """Create a new security group"""
+ return IMPL.security_group_create(context, values)
+
+
+def security_group_destroy(context, security_group_id):
+ """Deletes a security group"""
+ return IMPL.security_group_destroy(context, security_group_id)
+
+
+def security_group_destroy_all(context):
+ """Deletes a security group"""
+ return IMPL.security_group_destroy_all(context)
+
+
+####################
+
+
+def security_group_rule_create(context, values):
+ """Create a new security group"""
+ return IMPL.security_group_rule_create(context, values)
+
+
+def security_group_rule_get_by_security_group(context, security_group_id):
+ """Get all rules for a a given security group"""
+ return IMPL.security_group_rule_get_by_security_group(context, security_group_id)
+
+def security_group_rule_destroy(context, security_group_rule_id):
+ """Deletes a security group rule"""
+ return IMPL.security_group_rule_destroy(context, security_group_rule_id)
+
+
###################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 1043f4bfb..6b979f0ae 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -29,8 +29,11 @@ from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
from sqlalchemy.exc import IntegrityError
-from sqlalchemy.orm import joinedload, joinedload_all
-from sqlalchemy.sql import exists, func
+from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
+from sqlalchemy.sql import exists
+from sqlalchemy.sql import func
+from sqlalchemy.orm.exc import NoResultFound
FLAGS = flags.FLAGS
@@ -571,11 +574,13 @@ def instance_get(context, instance_id, session=None):
if is_admin_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(id=instance_id
).filter_by(deleted=can_read_deleted(context)
).first()
elif is_user_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(project_id=context.project.id
).filter_by(id=instance_id
).filter_by(deleted=False
@@ -591,6 +596,7 @@ def instance_get_all(context):
session = get_session()
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(deleted=can_read_deleted(context)
).all()
@@ -600,6 +606,7 @@ def instance_get_all_by_user(context, user_id):
session = get_session()
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(deleted=can_read_deleted(context)
).filter_by(user_id=user_id
).all()
@@ -612,6 +619,7 @@ def instance_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(project_id=project_id
).filter_by(deleted=can_read_deleted(context)
).all()
@@ -624,12 +632,14 @@ def instance_get_all_by_reservation(context, reservation_id):
if is_admin_context(context):
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(reservation_id=reservation_id
).filter_by(deleted=can_read_deleted(context)
).all()
elif is_user_context(context):
return session.query(models.Instance
).options(joinedload_all('fixed_ip.floating_ips')
+ ).options(joinedload('security_groups')
).filter_by(project_id=context.project.id
).filter_by(reservation_id=reservation_id
).filter_by(deleted=False
@@ -642,11 +652,13 @@ def instance_get_by_internal_id(context, internal_id):
if is_admin_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(internal_id=internal_id
).filter_by(deleted=can_read_deleted(context)
).first()
elif is_user_context(context):
result = session.query(models.Instance
+ ).options(joinedload('security_groups')
).filter_by(project_id=context.project.id
).filter_by(internal_id=internal_id
).filter_by(deleted=False
@@ -718,6 +730,18 @@ def instance_update(context, instance_id, values):
instance_ref.save(session=session)
+def instance_add_security_group(context, instance_id, security_group_id):
+ """Associate the given security group with the given instance"""
+ session = get_session()
+ with session.begin():
+ instance_ref = instance_get(context, instance_id, session=session)
+ security_group_ref = security_group_get(context,
+ security_group_id,
+ session=session)
+ instance_ref.security_groups += [security_group_ref]
+ instance_ref.save(session=session)
+
+
###################
@@ -781,6 +805,24 @@ def key_pair_get_all_by_user(context, user_id):
@require_admin_context
+def network_associate(context, project_id):
+ session = get_session()
+ with session.begin():
+ network_ref = session.query(models.Network
+ ).filter_by(deleted=False
+ ).filter_by(project_id=None
+ ).with_lockmode('update'
+ ).first()
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ if not network_ref:
+ raise db.NoMoreNetworks()
+ network_ref['project_id'] = project_id
+ session.add(network_ref)
+ return network_ref
+
+
+@require_admin_context
def network_count(context):
session = get_session()
return session.query(models.Network
@@ -820,31 +862,26 @@ def network_count_reserved_ips(context, network_id):
@require_admin_context
-def network_create(context, values):
+def network_create_safe(context, values):
network_ref = models.Network()
for (key, value) in values.iteritems():
network_ref[key] = value
- network_ref.save()
- return network_ref
+ try:
+ network_ref.save()
+ return network_ref
+ except IntegrityError:
+ return None
@require_admin_context
-def network_destroy(context, network_id):
+def network_disassociate(context, network_id):
+ network_update(context, network_id, {'project_id': None})
+
+
+@require_admin_context
+def network_disassociate_all(context):
session = get_session()
- with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update networks set deleted=1 where id=:id',
- {'id': network_id})
- session.execute('update fixed_ips set deleted=1 where network_id=:id',
- {'id': network_id})
- session.execute('update floating_ips set deleted=1 '
- 'where fixed_ip_id in '
- '(select id from fixed_ips '
- 'where network_id=:id)',
- {'id': network_id})
- session.execute('update network_indexes set network_id=NULL '
- 'where network_id=:id',
- {'id': network_id})
+ session.execute('update networks set project_id=NULL')
@require_context
@@ -894,48 +931,21 @@ def network_get_by_bridge(context, bridge):
if not result:
raise exception.NotFound('No network for bridge %s' % bridge)
-
return result
@require_admin_context
-def network_get_index(context, network_id):
+def network_get_by_instance(_context, instance_id):
session = get_session()
- with session.begin():
- network_index = session.query(models.NetworkIndex
- ).filter_by(network_id=None
- ).filter_by(deleted=False
- ).with_lockmode('update'
- ).first()
-
- if not network_index:
- raise db.NoMoreNetworks()
-
- network_index['network'] = network_get(context,
- network_id,
- session=session)
- session.add(network_index)
-
- return network_index['index']
-
-
-@require_admin_context
-def network_index_count(context):
- session = get_session()
- return session.query(models.NetworkIndex
- ).filter_by(deleted=can_read_deleted(context)
- ).count()
-
-
-@require_admin_context
-def network_index_create_safe(context, values):
- network_index_ref = models.NetworkIndex()
- for (key, value) in values.iteritems():
- network_index_ref[key] = value
- try:
- network_index_ref.save()
- except IntegrityError:
- pass
+ rv = session.query(models.Network
+ ).filter_by(deleted=False
+ ).join(models.Network.fixed_ips
+ ).filter_by(instance_id=instance_id
+ ).filter_by(deleted=False
+ ).first()
+ if not rv:
+ raise exception.NotFound('No network for instance %s' % instance_id)
+ return rv
@require_admin_context
@@ -975,15 +985,22 @@ def network_update(context, network_id, values):
@require_context
def project_get_network(context, project_id):
session = get_session()
- result= session.query(models.Network
+ rv = session.query(models.Network
).filter_by(project_id=project_id
).filter_by(deleted=False
).first()
-
- if not result:
- raise exception.NotFound('No network for project: %s' % project_id)
-
- return result
+ if not rv:
+ try:
+ return network_associate(context, project_id)
+ except IntegrityError:
+ # NOTE(vish): We hit this if there is a race and two
+ # processes are attempting to allocate the
+ # network at the same time
+ rv = session.query(models.Network
+ ).filter_by(project_id=project_id
+ ).filter_by(deleted=False
+ ).first()
+ return rv
###################
@@ -1193,6 +1210,7 @@ def volume_get(context, volume_id, session=None):
@require_admin_context
def volume_get_all(context):
+ session = get_session()
return session.query(models.Volume
).filter_by(deleted=can_read_deleted(context)
).all()
@@ -1283,6 +1301,163 @@ def volume_update(context, volume_id, values):
###################
+@require_context
+def security_group_get_all(context):
+ session = get_session()
+ return session.query(models.SecurityGroup
+ ).filter_by(deleted=can_read_deleted(context)
+ ).options(joinedload_all('rules')
+ ).all()
+
+
+@require_context
+def security_group_get(context, security_group_id, session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroup
+ ).filter_by(deleted=can_read_deleted(context),
+ ).filter_by(id=security_group_id
+ ).options(joinedload_all('rules')
+ ).first()
+ else:
+ result = session.query(models.SecurityGroup
+ ).filter_by(deleted=False
+ ).filter_by(id=security_group_id
+ ).filter_by(project_id=context.project_id
+ ).options(joinedload_all('rules')
+ ).first()
+ if not result:
+ raise exception.NotFound("No secuity group with id %s" %
+ security_group_id)
+ return result
+
+
+@require_context
+def security_group_get_by_name(context, project_id, group_name):
+ session = get_session()
+ result = session.query(models.SecurityGroup
+ ).filter_by(project_id=project_id
+ ).filter_by(name=group_name
+ ).filter_by(deleted=False
+ ).options(joinedload_all('rules')
+ ).options(joinedload_all('instances')
+ ).first()
+ if not result:
+ raise exception.NotFound(
+ 'No security group named %s for project: %s' \
+ % (group_name, project_id))
+ return result
+
+
+@require_context
+def security_group_get_by_project(context, project_id):
+ session = get_session()
+ return session.query(models.SecurityGroup
+ ).filter_by(project_id=project_id
+ ).filter_by(deleted=False
+ ).options(joinedload_all('rules')
+ ).all()
+
+
+@require_context
+def security_group_get_by_instance(context, instance_id):
+ session = get_session()
+ return session.query(models.SecurityGroup
+ ).filter_by(deleted=False
+ ).options(joinedload_all('rules')
+ ).join(models.SecurityGroup.instances
+ ).filter_by(id=instance_id
+ ).filter_by(deleted=False
+ ).all()
+
+
+@require_context
+def security_group_exists(context, project_id, group_name):
+ try:
+ group = security_group_get_by_name(context, project_id, group_name)
+ return group != None
+ except exception.NotFound:
+ return False
+
+
+@require_context
+def security_group_create(context, values):
+ security_group_ref = models.SecurityGroup()
+ # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
+ # once save() is called. This will get cleaned up in next orm pass.
+ security_group_ref.rules
+ for (key, value) in values.iteritems():
+ security_group_ref[key] = value
+ security_group_ref.save()
+ return security_group_ref
+
+
+@require_context
+def security_group_destroy(context, security_group_id):
+ session = get_session()
+ with session.begin():
+ # TODO(vish): do we have to use sql here?
+ session.execute('update security_groups set deleted=1 where id=:id',
+ {'id': security_group_id})
+ session.execute('update security_group_rules set deleted=1 '
+ 'where group_id=:id',
+ {'id': security_group_id})
+
+@require_context
+def security_group_destroy_all(context, session=None):
+ if not session:
+ session = get_session()
+ with session.begin():
+ # TODO(vish): do we have to use sql here?
+ session.execute('update security_groups set deleted=1')
+ session.execute('update security_group_rules set deleted=1')
+
+
+###################
+
+
+@require_context
+def security_group_rule_get(context, security_group_rule_id, session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroupIngressRule
+ ).filter_by(deleted=can_read_deleted(context)
+ ).filter_by(id=security_group_rule_id
+ ).first()
+ else:
+ # TODO(vish): Join to group and check for project_id
+ result = session.query(models.SecurityGroupIngressRule
+ ).filter_by(deleted=False
+ ).filter_by(id=security_group_rule_id
+ ).first()
+ if not result:
+ raise exception.NotFound("No secuity group rule with id %s" %
+ security_group_rule_id)
+ return result
+
+
+@require_context
+def security_group_rule_create(context, values):
+ security_group_rule_ref = models.SecurityGroupIngressRule()
+ for (key, value) in values.iteritems():
+ security_group_rule_ref[key] = value
+ security_group_rule_ref.save()
+ return security_group_rule_ref
+
+@require_context
+def security_group_rule_destroy(context, security_group_rule_id):
+ session = get_session()
+ with session.begin():
+ security_group_rule = security_group_rule_get(context,
+ security_group_rule_id,
+ session=session)
+ security_group_rule.delete(session=session)
+
+
+###################
+
@require_admin_context
def user_get(context, id, session=None):
if not session:
@@ -1492,6 +1667,8 @@ def user_add_project_role(context, user_id, project_id, role):
###################
+
+@require_admin_context
def host_get_networks(context, host):
session = get_session()
with session.begin():
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index ebcb73413..eed8f0578 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -25,7 +25,7 @@ import datetime
# TODO(vish): clean up these imports
from sqlalchemy.orm import relationship, backref, exc, object_mapper
-from sqlalchemy import Column, Integer, String
+from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
@@ -169,7 +169,7 @@ class Instance(BASE, NovaBase):
@property
def name(self):
- return self.internal_id
+ return "instance-%d" % self.internal_id
image_id = Column(String(255))
kernel_id = Column(String(255))
@@ -187,7 +187,6 @@ class Instance(BASE, NovaBase):
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(Text)
- security_group = Column(String(255))
state = Column(Integer)
state_description = Column(String(255))
@@ -289,10 +288,66 @@ class ExportDevice(BASE, NovaBase):
'ExportDevice.deleted==False)')
+class SecurityGroupInstanceAssociation(BASE, NovaBase):
+ __tablename__ = 'security_group_instance_association'
+ id = Column(Integer, primary_key=True)
+ security_group_id = Column(Integer, ForeignKey('security_groups.id'))
+ instance_id = Column(Integer, ForeignKey('instances.id'))
+
+
+class SecurityGroup(BASE, NovaBase):
+ """Represents a security group"""
+ __tablename__ = 'security_groups'
+ id = Column(Integer, primary_key=True)
+
+ name = Column(String(255))
+ description = Column(String(255))
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+
+ instances = relationship(Instance,
+ secondary="security_group_instance_association",
+ primaryjoin="and_(SecurityGroup.id == SecurityGroupInstanceAssociation.security_group_id,"
+ "SecurityGroup.deleted == False)",
+ secondaryjoin="and_(SecurityGroupInstanceAssociation.instance_id == Instance.id,"
+ "Instance.deleted == False)",
+ backref='security_groups')
+
+ @property
+ def user(self):
+ return auth.manager.AuthManager().get_user(self.user_id)
+
+ @property
+ def project(self):
+ return auth.manager.AuthManager().get_project(self.project_id)
+
+
+class SecurityGroupIngressRule(BASE, NovaBase):
+ """Represents a rule in a security group"""
+ __tablename__ = 'security_group_rules'
+ id = Column(Integer, primary_key=True)
+
+ parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
+ parent_group = relationship("SecurityGroup", backref="rules",
+ foreign_keys=parent_group_id,
+ primaryjoin="and_(SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,"
+ "SecurityGroupIngressRule.deleted == False)")
+
+ protocol = Column(String(5)) # "tcp", "udp", or "icmp"
+ from_port = Column(Integer)
+ to_port = Column(Integer)
+ cidr = Column(String(255))
+
+ # Note: This is not the parent SecurityGroup. It's SecurityGroup we're
+ # granting access for.
+ group_id = Column(Integer, ForeignKey('security_groups.id'))
+
+
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh"""
__tablename__ = 'key_pairs'
id = Column(Integer, primary_key=True)
+
name = Column(String(255))
user_id = Column(String(255))
@@ -308,10 +363,13 @@ class KeyPair(BASE, NovaBase):
class Network(BASE, NovaBase):
"""Represents a network"""
__tablename__ = 'networks'
+ __table_args__ = (schema.UniqueConstraint("vpn_public_address",
+ "vpn_public_port"),
+ {'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
injected = Column(Boolean, default=False)
- cidr = Column(String(255))
+ cidr = Column(String(255), unique=True)
netmask = Column(String(255))
bridge = Column(String(255))
gateway = Column(String(255))
@@ -324,28 +382,13 @@ class Network(BASE, NovaBase):
vpn_private_address = Column(String(255))
dhcp_start = Column(String(255))
- project_id = Column(String(255))
+ # NOTE(vish): The unique constraint below helps avoid a race condition
+ # when associating a network, but it also means that we
+ # can't associate two networks with one project.
+ project_id = Column(String(255), unique=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
-class NetworkIndex(BASE, NovaBase):
- """Represents a unique offset for a network
-
- Currently vlan number, vpn port, and fixed ip ranges are keyed off of
- this index. These may ultimately need to be converted to separate
- pools.
- """
- __tablename__ = 'network_indexes'
- id = Column(Integer, primary_key=True)
- index = Column(Integer, unique=True)
- network_id = Column(Integer, ForeignKey('networks.id'), nullable=True)
- network = relationship(Network,
- backref=backref('network_index', uselist=False),
- foreign_keys=network_id,
- primaryjoin='and_(NetworkIndex.network_id==Network.id,'
- 'NetworkIndex.deleted==False)')
-
-
class AuthToken(BASE, NovaBase):
"""Represents an authorization token for all API transactions. Fields
are a string representing the actual token and a user id for mapping
@@ -358,7 +401,6 @@ class AuthToken(BASE, NovaBase):
cdn_management_url = Column(String(255))
-
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance"""
@@ -461,9 +503,10 @@ class FloatingIp(BASE, NovaBase):
def register_models():
"""Register Models and create metadata"""
from sqlalchemy import create_engine
- models = (Service, Instance, Volume, ExportDevice,
- FixedIp, FloatingIp, Network, NetworkIndex,
- AuthToken, UserProjectAssociation, User, Project) # , Image, Host)
+ models = (Service, Instance, Volume, ExportDevice, FixedIp,
+ FloatingIp, Network, SecurityGroup,
+ SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
+ AuthToken, User, Project) # , Image, Host
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 69a205378..826754f6a 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -36,7 +36,8 @@ def get_session(autocommit=True, expire_on_commit=False):
if not _MAKER:
if not _ENGINE:
_ENGINE = create_engine(FLAGS.sql_connection, echo=False)
- _MAKER = sessionmaker(bind=_ENGINE,
- autocommit=autocommit,
- expire_on_commit=expire_on_commit)
- return _MAKER()
+ _MAKER = (sessionmaker(bind=_ENGINE,
+ autocommit=autocommit,
+ expire_on_commit=expire_on_commit))
+ session = _MAKER()
+ return session
diff --git a/nova/exception.py b/nova/exception.py
index b8894758f..f157fab2d 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -69,6 +69,9 @@ class NotEmpty(Error):
class Invalid(Error):
pass
+class InvalidInputException(Error):
+ pass
+
def wrap_exception(f):
def _wrap(*args, **kw):
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 835973810..df5e61e6e 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -116,6 +116,7 @@ class Backend(object):
message = Message(backend=self, body=message_data,
content_type=content_type,
content_encoding=content_encoding)
+ message.result = True
logging.debug('Getting from %s: %s', queue, message)
return message
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 37f9c8253..c0be0e8cc 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -65,12 +65,12 @@ def init_host():
# SNAT rule for outbound traffic.
_confirm_rule("POSTROUTING", "-t nat -s %s "
"-j SNAT --to-source %s"
- % (FLAGS.private_range, FLAGS.routing_source_ip))
+ % (FLAGS.fixed_range, FLAGS.routing_source_ip))
_confirm_rule("POSTROUTING", "-t nat -s %s -j MASQUERADE" %
- FLAGS.private_range)
+ FLAGS.fixed_range)
_confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
- {'range': FLAGS.private_range})
+ {'range': FLAGS.fixed_range})
def bind_floating_ip(floating_ip):
"""Bind ip to public interface"""
@@ -180,14 +180,14 @@ def update_dhcp(context, network_id):
"""
network_ref = db.network_get(context, network_id)
- conffile = _dhcp_file(network_ref['vlan'], 'conf')
+ conffile = _dhcp_file(network_ref['bridge'], 'conf')
with open(conffile, 'w') as f:
f.write(get_dhcp_hosts(context, network_id))
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
- pid = _dnsmasq_pid_for(network_ref['vlan'])
+ pid = _dnsmasq_pid_for(network_ref['bridge'])
# if dnsmasq is already running, then tell it to reload
if pid:
@@ -250,11 +250,11 @@ def _dnsmasq_cmd(net):
' --strict-order',
' --bind-interfaces',
' --conf-file=',
- ' --pid-file=%s' % _dhcp_file(net['vlan'], 'pid'),
+ ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
' --listen-address=%s' % net['gateway'],
' --except-interface=lo',
' --dhcp-range=%s,static,120s' % net['dhcp_start'],
- ' --dhcp-hostsfile=%s' % _dhcp_file(net['vlan'], 'conf'),
+ ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
' --dhcp-script=%s' % FLAGS.dhcpbridge,
' --leasefile-ro']
return ''.join(cmd)
@@ -271,24 +271,25 @@ def _stop_dnsmasq(network):
logging.debug("Killing dnsmasq threw %s", exc)
-def _dhcp_file(vlan, kind):
- """Return path to a pid, leases or conf file for a vlan"""
+def _dhcp_file(bridge, kind):
+ """Return path to a pid, leases or conf file for a bridge"""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
+ return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path,
+ bridge,
+ kind))
- return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
-
-def _dnsmasq_pid_for(vlan):
- """Returns he pid for prior dnsmasq instance for a vlan
+def _dnsmasq_pid_for(bridge):
+ """Returns the pid for prior dnsmasq instance for a bridge
Returns None if no pid file exists
If machine has rebooted pid might be incorrect (caller should check)
"""
- pid_file = _dhcp_file(vlan, 'pid')
+ pid_file = _dhcp_file(bridge, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 9c1846dd9..2ea1c1aa0 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -37,19 +37,10 @@ from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
-flags.DEFINE_list('flat_network_ips',
- ['192.168.0.2', '192.168.0.3', '192.168.0.4'],
- 'Available ips for simple network')
-flags.DEFINE_string('flat_network_network', '192.168.0.0',
- 'Network for simple network')
-flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
- 'Netmask for simple network')
-flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
- 'Broadcast for simple network')
-flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
- 'Broadcast for simple network')
flags.DEFINE_string('flat_network_dns', '8.8.4.4',
'Dns for simple network')
+flags.DEFINE_string('flat_network_dhcp_start', '192.168.0.2',
+ 'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
@@ -57,8 +48,8 @@ flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_integer('network_size', 256,
'Number of addresses in each private subnet')
-flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block')
-flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block')
+flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block')
+flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_integer('cnt_vpn_clients', 5,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
@@ -91,13 +82,9 @@ class NetworkManager(manager.Manager):
for network in self.db.host_get_networks(None, self.host):
self._on_set_network_host(None, network['id'])
- def set_network_host(self, context, project_id):
- """Safely sets the host of the projects network"""
+ def set_network_host(self, context, network_id):
+ """Safely sets the host of the network"""
logging.debug("setting network host")
- network_ref = self.db.project_get_network(context, project_id)
- # TODO(vish): can we minimize db access by just getting the
- # id here instead of the ref?
- network_id = network_ref['id']
host = self.db.network_set_host(None,
network_id,
self.host)
@@ -108,7 +95,7 @@ class NetworkManager(manager.Manager):
"""Gets a fixed ip from the pool"""
raise NotImplementedError()
- def deallocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool"""
raise NotImplementedError()
@@ -117,10 +104,10 @@ class NetworkManager(manager.Manager):
raise NotImplementedError()
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a project"""
+ """Called when this host becomes the host for a network"""
raise NotImplementedError()
- def setup_compute_network(self, context, project_id):
+ def setup_compute_network(self, context, instance_id):
"""Sets up matching network for compute hosts"""
raise NotImplementedError()
@@ -150,6 +137,57 @@ class NetworkManager(manager.Manager):
"""Returns an floating ip to the pool"""
self.db.floating_ip_deallocate(context, floating_address)
+ def lease_fixed_ip(self, context, mac, address):
+ """Called by dhcp-bridge when ip is leased"""
+ logging.debug("Leasing IP %s", address)
+ fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
+ instance_ref = fixed_ip_ref['instance']
+ if not instance_ref:
+ raise exception.Error("IP %s leased that isn't associated" %
+ address)
+ if instance_ref['mac_address'] != mac:
+ raise exception.Error("IP %s leased to bad mac %s vs %s" %
+ (address, instance_ref['mac_address'], mac))
+ self.db.fixed_ip_update(context,
+ fixed_ip_ref['address'],
+ {'leased': True})
+ if not fixed_ip_ref['allocated']:
+ logging.warn("IP %s leased that was already deallocated", address)
+
+ def release_fixed_ip(self, context, mac, address):
+ """Called by dhcp-bridge when ip is released"""
+ logging.debug("Releasing IP %s", address)
+ fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
+ instance_ref = fixed_ip_ref['instance']
+ if not instance_ref:
+ raise exception.Error("IP %s released that isn't associated" %
+ address)
+ if instance_ref['mac_address'] != mac:
+ raise exception.Error("IP %s released from bad mac %s vs %s" %
+ (address, instance_ref['mac_address'], mac))
+ if not fixed_ip_ref['leased']:
+ logging.warn("IP %s released that was not leased", address)
+ self.db.fixed_ip_update(context,
+ fixed_ip_ref['str_id'],
+ {'leased': False})
+ if not fixed_ip_ref['allocated']:
+ self.db.fixed_ip_disassociate(context, address)
+ # NOTE(vish): dhcp server isn't updated until next setup, this
+ # means there will stale entries in the conf file
+ # the code below will update the file if necessary
+ if FLAGS.update_dhcp_on_disassociate:
+ network_ref = self.db.fixed_ip_get_network(context, address)
+ self.driver.update_dhcp(context, network_ref['id'])
+
+ def get_network(self, context):
+ """Get the network for the current context"""
+ raise NotImplementedError()
+
+ def create_networks(self, context, num_networks, network_size,
+ *args, **kwargs):
+ """Create networks based on parameters"""
+ raise NotImplementedError()
+
@property
def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
"""Number of reserved ips at the bottom of the range"""
@@ -163,7 +201,7 @@ class NetworkManager(manager.Manager):
def _create_fixed_ips(self, context, network_id):
"""Create all fixed ips for network"""
network_ref = self.db.network_get(context, network_id)
- # NOTE(vish): should these be properties of the network as opposed
+ # NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
@@ -185,8 +223,13 @@ class FlatManager(NetworkManager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool"""
- network_ref = self.db.project_get_network(context, context.project.id)
- address = self.db.fixed_ip_associate_pool(context,
+ # TODO(vish): when this is called by compute, we can associate compute
+ # with a network, or a cluster of computes with a network
+ # and use that network here with a method like
+ # network_get_by_compute_host
+ network_ref = self.db.network_get_by_bridge(None,
+ FLAGS.flat_network_bridge)
+ address = self.db.fixed_ip_associate_pool(None,
network_ref['id'],
instance_id)
self.db.fixed_ip_update(context, address, {'allocated': True})
@@ -195,9 +238,9 @@ class FlatManager(NetworkManager):
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool"""
self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context, address)
+ self.db.fixed_ip_disassociate(None, address)
- def setup_compute_network(self, context, project_id):
+ def setup_compute_network(self, context, instance_id):
"""Network is created manually"""
pass
@@ -205,25 +248,66 @@ class FlatManager(NetworkManager):
"""Currently no setup"""
pass
+ def create_networks(self, context, cidr, num_networks, network_size,
+ *args, **kwargs):
+ """Create networks based on parameters"""
+ fixed_net = IPy.IP(cidr)
+ for index in range(num_networks):
+ start = index * network_size
+ significant_bits = 32 - int(math.log(network_size, 2))
+ cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['cidr'] = cidr
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['dhcp_start'] = str(project_net[2])
+ network_ref = self.db.network_create_safe(context, net)
+ if network_ref:
+ self._create_fixed_ips(context, network_ref['id'])
+
+ def get_network(self, context):
+ """Get the network for the current context"""
+ # NOTE(vish): To support mutilple network hosts, This could randomly
+ # select from multiple networks instead of just
+ # returning the one. It could also potentially be done
+ # in the scheduler.
+ return self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a project"""
- # NOTE(vish): should there be two types of network objects
- # in the datastore?
+ """Called when this host becomes the host for a network"""
net = {}
net['injected'] = True
- net['network_str'] = FLAGS.flat_network_network
- net['netmask'] = FLAGS.flat_network_netmask
net['bridge'] = FLAGS.flat_network_bridge
- net['gateway'] = FLAGS.flat_network_gateway
- net['broadcast'] = FLAGS.flat_network_broadcast
net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net)
- # NOTE(vish): Rignt now we are putting all of the fixed ips in
- # one large pool, but ultimately it may be better to
- # have each network manager have its own network that
- # it is responsible for and its own pool of ips.
- for address in FLAGS.flat_network_ips:
- self.db.fixed_ip_create(context, {'address': address})
+
+
+
+class FlatDHCPManager(NetworkManager):
+ """Flat networking with dhcp"""
+
+ def setup_fixed_ip(self, context, address):
+ """Setup dhcp for this network"""
+ network_ref = db.fixed_ip_get_by_address(context, address)
+ self.driver.update_dhcp(context, network_ref['id'])
+
+ def deallocate_fixed_ip(self, context, address, *args, **kwargs):
+ """Returns a fixed ip to the pool"""
+ self.db.fixed_ip_update(context, address, {'allocated': False})
+
+ def _on_set_network_host(self, context, network_id):
+ """Called when this host becomes the host for a project"""
+ super(FlatDHCPManager, self)._on_set_network_host(context, network_id)
+ network_ref = self.db.network_get(context, network_id)
+ self.db.network_update(context,
+ network_id,
+ {'dhcp_start': FLAGS.flat_network_dhcp_start})
+ self.driver.ensure_bridge(network_ref['bridge'],
+ FLAGS.bridge_dev,
+ network_ref)
class VlanManager(NetworkManager):
@@ -251,10 +335,13 @@ class VlanManager(NetworkManager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool"""
+ # TODO(vish): This should probably be getting project_id from
+ # the instance, but it is another trip to the db.
+ # Perhaps this method should take an instance_ref.
network_ref = self.db.project_get_network(context, context.project.id)
if kwargs.get('vpn', None):
address = network_ref['vpn_private_address']
- self.db.fixed_ip_associate(context, address, instance_id)
+ self.db.fixed_ip_associate(None, address, instance_id)
else:
address = self.db.fixed_ip_associate_pool(None,
network_ref['id'],
@@ -265,8 +352,6 @@ class VlanManager(NetworkManager):
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool"""
self.db.fixed_ip_update(context, address, {'allocated': False})
- fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
-
def setup_fixed_ip(self, context, address):
"""Sets forwarding rules and dhcp for fixed ip"""
@@ -278,80 +363,9 @@ class VlanManager(NetworkManager):
network_ref['vpn_private_address'])
self.driver.update_dhcp(context, network_ref['id'])
- def lease_fixed_ip(self, context, mac, address):
- """Called by dhcp-bridge when ip is leased"""
- logging.debug("Leasing IP %s", address)
- fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
- instance_ref = fixed_ip_ref['instance']
- if not instance_ref:
- raise exception.Error("IP %s leased that isn't associated" %
- address)
- if instance_ref['mac_address'] != mac:
- raise exception.Error("IP %s leased to bad mac %s vs %s" %
- (address, instance_ref['mac_address'], mac))
- self.db.fixed_ip_update(context,
- fixed_ip_ref['address'],
- {'leased': True})
- if not fixed_ip_ref['allocated']:
- logging.warn("IP %s leased that was already deallocated", address)
-
- def release_fixed_ip(self, context, mac, address):
- """Called by dhcp-bridge when ip is released"""
- logging.debug("Releasing IP %s", address)
- fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
- instance_ref = fixed_ip_ref['instance']
- if not instance_ref:
- raise exception.Error("IP %s released that isn't associated" %
- address)
- if instance_ref['mac_address'] != mac:
- raise exception.Error("IP %s released from bad mac %s vs %s" %
- (address, instance_ref['mac_address'], mac))
- if not fixed_ip_ref['leased']:
- logging.warn("IP %s released that was not leased", address)
- self.db.fixed_ip_update(context,
- fixed_ip_ref['str_id'],
- {'leased': False})
- if not fixed_ip_ref['allocated']:
- self.db.fixed_ip_disassociate(context, address)
- # NOTE(vish): dhcp server isn't updated until next setup, this
- # means there will stale entries in the conf file
- # the code below will update the file if necessary
- if FLAGS.update_dhcp_on_disassociate:
- network_ref = self.db.fixed_ip_get_network(context, address)
- self.driver.update_dhcp(context, network_ref['id'])
-
- def allocate_network(self, context, project_id):
- """Set up the network"""
- self._ensure_indexes(context)
- network_ref = db.network_create(context, {'project_id': project_id})
- network_id = network_ref['id']
- private_net = IPy.IP(FLAGS.private_range)
- index = db.network_get_index(context, network_id)
- vlan = FLAGS.vlan_start + index
- start = index * FLAGS.network_size
- significant_bits = 32 - int(math.log(FLAGS.network_size, 2))
- cidr = "%s/%s" % (private_net[start], significant_bits)
- project_net = IPy.IP(cidr)
-
- net = {}
- net['cidr'] = cidr
- # NOTE(vish): we could turn these into properties
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['vpn_private_address'] = str(project_net[2])
- net['dhcp_start'] = str(project_net[3])
- net['vlan'] = vlan
- net['bridge'] = 'br%s' % vlan
- net['vpn_public_address'] = FLAGS.vpn_ip
- net['vpn_public_port'] = FLAGS.vpn_start + index
- db.network_update(context, network_id, net)
- self._create_fixed_ips(context, network_id)
- return network_id
-
- def setup_compute_network(self, context, project_id):
+ def setup_compute_network(self, context, instance_id):
"""Sets up matching network for compute hosts"""
- network_ref = self.db.project_get_network(context, project_id)
+ network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'])
@@ -360,17 +374,42 @@ class VlanManager(NetworkManager):
# TODO(vish): Implement this
pass
- def _ensure_indexes(self, context):
- """Ensure the indexes for the network exist
-
- This could use a manage command instead of keying off of a flag"""
- if not self.db.network_index_count(context):
- for index in range(FLAGS.num_networks):
- self.db.network_index_create_safe(context, {'index': index})
+ def create_networks(self, context, cidr, num_networks, network_size,
+ vlan_start, vpn_start):
+ """Create networks based on parameters"""
+ fixed_net = IPy.IP(cidr)
+ for index in range(num_networks):
+ vlan = vlan_start + index
+ start = index * network_size
+ significant_bits = 32 - int(math.log(network_size, 2))
+ cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['cidr'] = cidr
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['vpn_private_address'] = str(project_net[2])
+ net['dhcp_start'] = str(project_net[3])
+ net['vlan'] = vlan
+ net['bridge'] = 'br%s' % vlan
+ # NOTE(vish): This makes ports unique accross the cloud, a more
+ # robust solution would be to make them unique per ip
+ net['vpn_public_port'] = vpn_start + index
+ network_ref = self.db.network_create_safe(context, net)
+ if network_ref:
+ self._create_fixed_ips(context, network_ref['id'])
+
+ def get_network(self, context):
+ """Get the network for the current context"""
+ return self.db.project_get_network(None, context.project.id)
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a project"""
+ """Called when this host becomes the host for a network"""
network_ref = self.db.network_get(context, network_id)
+ net = {}
+ net['vpn_public_address'] = FLAGS.vpn_ip
+ db.network_update(context, network_id, net)
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'],
network_ref)
diff --git a/nova/process.py b/nova/process.py
index b3cad894b..13cb90e82 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -113,7 +113,7 @@ class BackRelayWithInput(protocol.ProcessProtocol):
if self.started_deferred:
self.started_deferred.callback(self)
if self.process_input:
- self.transport.write(self.process_input)
+ self.transport.write(str(self.process_input))
self.transport.closeStdin()
def get_process_output(executable, args=None, env=None, path=None,
diff --git a/nova/test.py b/nova/test.py
index 1f4b33272..f6485377d 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -24,6 +24,7 @@ and some black magic for inline callbacks.
import sys
import time
+import datetime
import mox
import stubout
@@ -31,9 +32,11 @@ from tornado import ioloop
from twisted.internet import defer
from twisted.trial import unittest
+from nova import db
from nova import fakerabbit
from nova import flags
from nova import rpc
+from nova.network import manager as network_manager
FLAGS = flags.FLAGS
@@ -57,6 +60,16 @@ class TrialTestCase(unittest.TestCase):
def setUp(self): # pylint: disable-msg=C0103
"""Run before each test method to initialize test environment"""
super(TrialTestCase, self).setUp()
+ # NOTE(vish): We need a better method for creating fixtures for tests
+ # now that we have some required db setup for the system
+ # to work properly.
+ self.start = datetime.datetime.utcnow()
+ if db.network_count(None) != 5:
+ network_manager.VlanManager().create_networks(None,
+ FLAGS.fixed_range,
+ 5, 16,
+ FLAGS.vlan_start,
+ FLAGS.vpn_start)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
@@ -73,7 +86,9 @@ class TrialTestCase(unittest.TestCase):
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
-
+ # NOTE(vish): Clean up any ips associated during the test.
+ db.fixed_ip_disassociate_all_by_timeout(None, FLAGS.host, self.start)
+ db.network_disassociate_all(None)
rpc.Consumer.attach_to_twisted = self.originalAttach
for x in self.injected:
try:
@@ -83,6 +98,7 @@ class TrialTestCase(unittest.TestCase):
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
+ db.security_group_destroy_all(None)
super(TrialTestCase, self).tearDown()
@@ -138,7 +154,7 @@ class TrialTestCase(unittest.TestCase):
class BaseTestCase(TrialTestCase):
# TODO(jaypipes): Can this be moved into the TrialTestCase class?
"""Base test case class for all unit tests.
-
+
DEPRECATED: This is being removed once Tornado is gone, use TrialTestCase.
"""
def setUp(self): # pylint: disable-msg=C0103
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index c040cdad3..7ab27e000 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -91,6 +91,9 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
self.app = api.API()
+
+ def expect_http(self, host=None, is_secure=False):
+ """Returns a new EC2 connection"""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
@@ -100,9 +103,6 @@ class ApiEc2TestCase(test.BaseTestCase):
path='/services/Cloud')
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
-
- def expect_http(self, host=None, is_secure=False):
- """Returns a new EC2 connection"""
http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
@@ -138,3 +138,185 @@ class ApiEc2TestCase(test.BaseTestCase):
self.assertEquals(len(results), 1)
self.manager.delete_project(project)
self.manager.delete_user(user)
+
+ def test_get_all_security_groups(self):
+ """Test that we can retrieve security groups"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEquals(len(rv), 1)
+ self.assertEquals(rv[0].name, 'default')
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ def test_create_delete_security_group(self):
+ """Test that we can create a security group"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # At the moment, you need both of these to actually be netadmin
+ self.manager.add_role('fake', 'netadmin')
+ project.add_role('fake', 'netadmin')
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+
+ self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ self.assertEquals(len(rv), 2)
+ self.assertTrue(security_group_name in [group.name for group in rv])
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ def test_authorize_revoke_security_group_cidr(self):
+ """
+ Test that we can add and remove CIDR based rules
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # At the moment, you need both of these to actually be netadmin
+ self.manager.add_role('fake', 'netadmin')
+ project.add_role('fake', 'netadmin')
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEquals(len(group.rules), 1)
+ self.assertEquals(int(group.rules[0].from_port), 80)
+ self.assertEquals(int(group.rules[0].to_port), 81)
+ self.assertEquals(len(group.rules[0].grants), 1)
+ self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ return
+
+ def test_authorize_revoke_security_group_foreign_group(self):
+ """
+ Test that we can grant and revoke another security group access
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ project = self.manager.create_project('fake', 'fake', 'fake')
+
+ # At the moment, you need both of these to actually be netadmin
+ self.manager.add_role('fake', 'netadmin')
+ project.add_role('fake', 'netadmin')
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+ other_security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ other_group = self.ec2.create_security_group(other_security_group_name,
+ 'some other group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEquals(len(group.rules), 1)
+ self.assertEquals(len(group.rules[0].grants), 1)
+ self.assertEquals(str(group.rules[0].grants[0]),
+ '%s-%s' % (other_security_group_name, 'fake'))
+
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ for group in rv:
+ if group.name == security_group_name:
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+ group.revoke(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
+ return
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 8e5881edb..ff466135d 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -64,18 +64,17 @@ class CloudTestCase(test.TrialTestCase):
self.cloud = cloud.CloudController()
# set up a service
- self.compute = utils.import_class(FLAGS.compute_manager)()
+ self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.compute)
self.compute_consumer.attach_to_eventlet()
- self.network = utils.import_class(FLAGS.network_manager)()
+ self.network = utils.import_object(FLAGS.network_manager)
self.network_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.network_topic,
proxy=self.network)
self.network_consumer.attach_to_eventlet()
-
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index 1e2bb113b..5a7f170f3 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -40,7 +40,8 @@ class ComputeTestCase(test.TrialTestCase):
def setUp(self): # pylint: disable-msg=C0103
logging.getLogger().setLevel(logging.DEBUG)
super(ComputeTestCase, self).setUp()
- self.flags(connection_type='fake')
+ self.flags(connection_type='fake',
+ network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 59b0a36e4..3afb4d19e 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -52,13 +52,14 @@ class NetworkTestCase(test.TrialTestCase):
self.context = context.APIRequestContext(project=None, user=self.user)
for i in range(5):
name = 'project%s' % i
- self.projects.append(self.manager.create_project(name,
- 'netuser',
- name))
+ project = self.manager.create_project(name, 'netuser', name)
+ self.projects.append(project)
# create the necessary network data for the project
- user_context = context.get_admin_context(user=self.user)
-
- self.network.set_network_host(user_context, self.projects[i].id)
+ user_context = context.APIRequestContext(project=self.projects[i],
+ user=self.user)
+ network_ref = self.network.get_network(user_context)
+ self.network.set_network_host(context.get_admin_context(),
+ network_ref['id'])
instance_ref = self._create_instance(0)
self.instance_id = instance_ref['id']
instance_ref = self._create_instance(1)
@@ -99,7 +100,7 @@ class NetworkTestCase(test.TrialTestCase):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips
self.context.project = self.projects[0]
- pubnet = IPy.IP(flags.FLAGS.public_range)
+ pubnet = IPy.IP(flags.FLAGS.floating_range)
address = str(pubnet[0])
try:
db.floating_ip_get_by_address(None, address)
@@ -109,6 +110,7 @@ class NetworkTestCase(test.TrialTestCase):
float_addr = self.network.allocate_floating_ip(self.context,
self.projects[0].id)
fix_addr = self._create_address(0)
+ lease_ip(fix_addr)
self.assertEqual(float_addr, str(pubnet[0]))
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(None, self.instance_id)
@@ -118,6 +120,7 @@ class NetworkTestCase(test.TrialTestCase):
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
+ release_ip(fix_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
@@ -190,8 +193,10 @@ class NetworkTestCase(test.TrialTestCase):
release_ip(address3)
for instance_id in instance_ids:
db.instance_destroy(None, instance_id)
- release_ip(first)
+ self.context.project = self.projects[0]
+ self.network.deallocate_fixed_ip(self.context, first)
self._deallocate_address(0, first)
+ release_ip(first)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
@@ -207,10 +212,13 @@ class NetworkTestCase(test.TrialTestCase):
for i in range(networks_left):
project = self.manager.create_project('many%s' % i, self.user)
projects.append(project)
+ db.project_get_network(None, project.id)
+ project = self.manager.create_project('last', self.user)
+ projects.append(project)
self.assertRaises(db.NoMoreNetworks,
- self.manager.create_project,
- 'boom',
- self.user)
+ db.project_get_network,
+ None,
+ project.id)
for project in projects:
self.manager.delete_project(project)
@@ -223,7 +231,9 @@ class NetworkTestCase(test.TrialTestCase):
address2 = self._create_address(0)
self.assertEqual(address, address2)
+ lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address2)
+ release_ip(address)
def test_available_ips(self):
"""Make sure the number of available ips for the network is correct
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index eb2ee0406..872f1ab23 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -210,7 +210,7 @@ class S3APITestCase(test.TrialTestCase):
"""Setup users, projects, and start a test server."""
super(S3APITestCase, self).setUp()
- FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver',
+ FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
self.auth_manager = manager.AuthManager()
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index 53a8be144..80100fc2f 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -75,6 +75,7 @@ class SimpleDriverTestCase(test.TrialTestCase):
self.flags(connection_type='fake',
max_cores=4,
max_gigabytes=4,
+ network_manager='nova.network.manager.FlatManager',
volume_driver='nova.volume.driver.FakeAOEDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py
index 730928f39..edcdba425 100644
--- a/nova/tests/virt_unittest.py
+++ b/nova/tests/virt_unittest.py
@@ -14,53 +14,78 @@
# License for the specific language governing permissions and limitations
# under the License.
-from xml.etree.ElementTree import fromstring as parseXml
+from xml.etree.ElementTree import fromstring as xml_to_tree
+from xml.dom.minidom import parseString as xml_to_dom
+from nova import db
from nova import flags
from nova import test
+from nova import utils
+from nova.api import context
+from nova.api.ec2 import cloud
from nova.auth import manager
-# Needed to get FLAGS.instances_path defined:
-from nova.compute import manager as compute_manager
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
+flags.DECLARE('instances_path', 'nova.compute.manager')
class LibvirtConnTestCase(test.TrialTestCase):
def setUp(self):
+ super(LibvirtConnTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
FLAGS.instances_path = ''
def test_get_uri_and_template(self):
- instance = { 'name' : 'i-cafebabe',
- 'id' : 'i-cafebabe',
+ ip = '10.11.12.13'
+
+ instance = { 'internal_id' : 1,
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2,
'project_id' : 'fake',
- 'ip_address' : '10.11.12.13',
'bridge' : 'br101',
'instance_type' : 'm1.small'}
+ instance_ref = db.instance_create(None, instance)
+ user_context = context.APIRequestContext(project=self.project,
+ user=self.user)
+ network_ref = self.network.get_network(user_context)
+ self.network.set_network_host(context.get_admin_context(),
+ network_ref['id'])
+
+ fixed_ip = { 'address' : ip,
+ 'network_id' : network_ref['id'] }
+
+ fixed_ip_ref = db.fixed_ip_create(None, fixed_ip)
+ db.fixed_ip_update(None, ip, { 'allocated' : True,
+ 'instance_id' : instance_ref['id'] })
+
type_uri_map = { 'qemu' : ('qemu:///system',
- [(lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find('.').get('type'), 'qemu'),
+ [(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text, 'hvm'),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm' : ('qemu:///system',
- [(lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find('.').get('type'), 'kvm'),
+ [(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text, 'hvm'),
(lambda t: t.find('./devices/emulator'), None)]),
'uml' : ('uml:///system',
- [(lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find('.').get('type'), 'uml'),
+ [(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text, 'uml')]),
}
+ common_checks = [(lambda t: t.find('.').tag, 'domain'),
+ (lambda t: \
+ t.find('./devices/interface/filterref/parameter') \
+ .get('name'), 'IP'),
+ (lambda t: \
+ t.find('./devices/interface/filterref/parameter') \
+ .get('value'), '10.11.12.13')]
+
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
@@ -68,13 +93,18 @@ class LibvirtConnTestCase(test.TrialTestCase):
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
- xml = conn.to_xml(instance)
- tree = parseXml(xml)
+ xml = conn.to_xml(instance_ref)
+ tree = xml_to_tree(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s failed check %d' % (xml, i))
+ for i, (check, expected_result) in enumerate(common_checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
+
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
@@ -88,5 +118,141 @@ class LibvirtConnTestCase(test.TrialTestCase):
def tearDown(self):
+ super(LibvirtConnTestCase, self).tearDown()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+
+class NWFilterTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NWFilterTestCase, self).setUp()
+
+ class Mock(object):
+ pass
+
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.APIRequestContext(self.user, self.project)
+
+ self.fake_libvirt_connection = Mock()
+
+ self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection)
+
+ def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+
+
+ def test_cidr_rule_nwfilter_xml(self):
+ cloud_controller = cloud.CloudController()
+ cloud_controller.create_security_group(self.context,
+ 'testgroup',
+ 'test group description')
+ cloud_controller.authorize_security_group_ingress(self.context,
+ 'testgroup',
+ from_port='80',
+ to_port='81',
+ ip_protocol='tcp',
+ cidr_ip='0.0.0.0/0')
+
+
+ security_group = db.security_group_get_by_name(self.context,
+ 'fake',
+ 'testgroup')
+
+ xml = self.fw.security_group_to_nwfilter_xml(security_group.id)
+
+ dom = xml_to_dom(xml)
+ self.assertEqual(dom.firstChild.tagName, 'filter')
+
+ rules = dom.getElementsByTagName('rule')
+ self.assertEqual(len(rules), 1)
+
+ # It's supposed to allow inbound traffic.
+ self.assertEqual(rules[0].getAttribute('action'), 'accept')
+ self.assertEqual(rules[0].getAttribute('direction'), 'in')
+
+ # Must be lower priority than the base filter (which blocks everything)
+ self.assertTrue(int(rules[0].getAttribute('priority')) < 1000)
+
+ ip_conditions = rules[0].getElementsByTagName('tcp')
+ self.assertEqual(len(ip_conditions), 1)
+ self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0')
+ self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0')
+ self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80')
+ self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81')
+
+
+ self.teardown_security_group()
+
+ def teardown_security_group(self):
+ cloud_controller = cloud.CloudController()
+ cloud_controller.delete_security_group(self.context, 'testgroup')
+
+
+ def setup_and_return_security_group(self):
+ cloud_controller = cloud.CloudController()
+ cloud_controller.create_security_group(self.context,
+ 'testgroup',
+ 'test group description')
+ cloud_controller.authorize_security_group_ingress(self.context,
+ 'testgroup',
+ from_port='80',
+ to_port='81',
+ ip_protocol='tcp',
+ cidr_ip='0.0.0.0/0')
+
+ return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
+
+ def test_creates_base_rule_first(self):
+ # These come pre-defined by libvirt
+ self.defined_filters = ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']
+
+ self.recursive_depends = {}
+ for f in self.defined_filters:
+ self.recursive_depends[f] = []
+
+ def _filterDefineXMLMock(xml):
+ dom = xml_to_dom(xml)
+ name = dom.firstChild.getAttribute('name')
+ self.recursive_depends[name] = []
+ for f in dom.getElementsByTagName('filterref'):
+ ref = f.getAttribute('filter')
+ self.assertTrue(ref in self.defined_filters,
+ ('%s referenced filter that does ' +
+ 'not yet exist: %s') % (name, ref))
+ dependencies = [ref] + self.recursive_depends[ref]
+ self.recursive_depends[name] += dependencies
+
+ self.defined_filters.append(name)
+ return True
+
+ self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
+
+ instance_ref = db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake'})
+ inst_id = instance_ref['id']
+
+ def _ensure_all_called(_):
+ instance_filter = 'nova-instance-%s' % instance_ref['name']
+ secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
+ for required in [secgroup_filter, 'allow-dhcp-server',
+ 'no-arp-spoofing', 'no-ip-spoofing',
+ 'no-mac-spoofing']:
+ self.assertTrue(required in self.recursive_depends[instance_filter],
+ "Instance's filter does not include %s" % required)
+
+ self.security_group = self.setup_and_return_security_group()
+
+ db.instance_add_security_group(self.context, inst_id, self.security_group.id)
+ instance = db.instance_get(self.context, inst_id)
+
+ d = self.fw.setup_nwfilters_for_instance(instance)
+ d.addCallback(_ensure_all_called)
+ d.addCallback(lambda _:self.teardown_security_group())
+
+ return d
diff --git a/nova/utils.py b/nova/utils.py
index b1699bda8..10b27ffec 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -128,8 +128,6 @@ def runthis(prompt, cmd, check_exit_code = True):
def generate_uid(topic, size=8):
if topic == "i":
# Instances have integer internal ids.
- #TODO(gundlach): We should make this more than 32 bits, but we need to
- #figure out how to make the DB happy with 64 bit integers.
return random.randint(0, 2**32-1)
else:
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 4ae6afcc4..dc6112f20 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -223,6 +223,8 @@ class FakeConnection(object):
"""
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
+ def get_console_output(self, instance):
+ return 'FAKE CONSOLE OUTPUT'
class FakeInstance(object):
def __init__(self):
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index 11df301f6..87b92b84a 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -10,7 +10,6 @@ auto eth0
iface eth0 inet static
address %(address)s
netmask %(netmask)s
- network %(network)s
broadcast %(broadcast)s
gateway %(gateway)s
dns-nameservers %(dns)s
diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template
index 17bd79b7c..2538b1ade 100644
--- a/nova/virt/libvirt.qemu.xml.template
+++ b/nova/virt/libvirt.qemu.xml.template
@@ -20,6 +20,10 @@
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
</interface>
<serial type="file">
<source path='%(basepath)s/console.log'/>
diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template
index c039d6d90..bb8b47911 100644
--- a/nova/virt/libvirt.uml.xml.template
+++ b/nova/virt/libvirt.uml.xml.template
@@ -14,6 +14,10 @@
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
</interface>
<console type="file">
<source path='%(basepath)s/console.log'/>
diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template
new file mode 100644
index 000000000..9677902c6
--- /dev/null
+++ b/nova/virt/libvirt.xen.xml.template
@@ -0,0 +1,30 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <os>
+ <type>linux</type>
+ <kernel>%(basepath)s/kernel</kernel>
+ <initrd>%(basepath)s/ramdisk</initrd>
+ <root>/dev/xvda1</root>
+ <cmdline>ro</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <memory>%(memory_kb)s</memory>
+ <vcpu>%(vcpus)s</vcpu>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='sda' />
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ </interface>
+ <console type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </console>
+ </devices>
+</domain>
+
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index d868e083c..ce97ef1eb 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -25,14 +25,17 @@ import logging
import os
import shutil
+import IPy
from twisted.internet import defer
from twisted.internet import task
+from twisted.internet import threads
from nova import db
from nova import exception
from nova import flags
from nova import process
from nova import utils
+#from nova.api import context
from nova.auth import manager
from nova.compute import disk
from nova.compute import instance_types
@@ -47,6 +50,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.qemu.xml.template'),
'Libvirt XML Template for QEmu/KVM')
+flags.DEFINE_string('libvirt_xen_xml_template',
+ utils.abspath('virt/libvirt.xen.xml.template'),
+ 'Libvirt XML Template for Xen')
flags.DEFINE_string('libvirt_uml_xml_template',
utils.abspath('virt/libvirt.uml.xml.template'),
'Libvirt XML Template for user-mode-linux')
@@ -55,11 +61,14 @@ flags.DEFINE_string('injected_network_template',
'Template file for injected network')
flags.DEFINE_string('libvirt_type',
'kvm',
- 'Libvirt domain type (valid options are: kvm, qemu, uml)')
+ 'Libvirt domain type (valid options are: kvm, qemu, uml, xen)')
flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
+flags.DEFINE_bool('allow_project_net_traffic',
+ True,
+ 'Whether to allow in project network traffic')
def get_connection(read_only):
@@ -104,6 +113,9 @@ class LibvirtConnection(object):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
template_file = FLAGS.libvirt_uml_xml_template
+ elif FLAGS.libvirt_type == 'xen':
+ uri = FLAGS.libvirt_uri or 'xen:///'
+ template_file = FLAGS.libvirt_xen_xml_template
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
template_file = FLAGS.libvirt_xml_template
@@ -134,7 +146,7 @@ class LibvirtConnection(object):
d.addCallback(lambda _: self._cleanup(instance))
# FIXME: What does this comment mean?
# TODO(termie): short-circuit me for tests
- # WE'LL save this for when we do shutdown,
+ # WE'LL save this for when we do shutdown,
# instead of destroy - but destroy returns immediately
timer = task.LoopingCall(f=None)
def _wait_for_shutdown():
@@ -214,6 +226,7 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
+ yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
yield self._create_image(instance, xml)
yield self._conn.createXML(xml, 0)
# TODO(termie): this should actually register
@@ -242,6 +255,46 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
yield local_d
+ def _flush_xen_console(self, virsh_output):
+ logging.info('virsh said: %r' % (virsh_output,))
+ virsh_output = virsh_output[0].strip()
+
+ if virsh_output.startswith('/dev/'):
+ logging.info('cool, it\'s a device')
+ d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False)
+ d.addCallback(lambda r:r[0])
+ return d
+ else:
+ return ''
+
+ def _append_to_file(self, data, fpath):
+ logging.info('data: %r, fpath: %r' % (data, fpath))
+ fp = open(fpath, 'a+')
+ fp.write(data)
+ return fpath
+
+ def _dump_file(self, fpath):
+ fp = open(fpath, 'r+')
+ contents = fp.read()
+ logging.info('Contents: %r' % (contents,))
+ return contents
+
+ @exception.wrap_exception
+ def get_console_output(self, instance):
+ console_log = os.path.join(FLAGS.instances_path, instance['internal_id'], 'console.log')
+ logging.info('console_log: %s' % console_log)
+ logging.info('FLAGS.libvirt_type: %s' % FLAGS.libvirt_type)
+ if FLAGS.libvirt_type == 'xen':
+ # Xen is spethial
+ d = process.simple_execute("virsh ttyconsole %s" % instance['name'])
+ d.addCallback(self._flush_xen_console)
+ d.addCallback(self._append_to_file, console_log)
+ else:
+ d = defer.succeed(console_log)
+ d.addCallback(self._dump_file)
+ return d
+
+
@defer.inlineCallbacks
def _create_image(self, inst, libvirt_xml):
# syntactic nicety
@@ -280,12 +333,11 @@ class LibvirtConnection(object):
key = str(inst['key_data'])
net = None
- network_ref = db.project_get_network(None, project.id)
+ network_ref = db.network_get_by_instance(None, inst['id'])
if network_ref['injected']:
address = db.instance_get_fixed_address(None, inst['id'])
with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': address,
- 'network': network_ref['network'],
'netmask': network_ref['netmask'],
'gateway': network_ref['gateway'],
'broadcast': network_ref['broadcast'],
@@ -314,9 +366,13 @@ class LibvirtConnection(object):
def to_xml(self, instance):
# TODO(termie): cache?
logging.debug('instance %s: starting toXML method', instance['name'])
- network = db.project_get_network(None, instance['project_id'])
+ network = db.project_get_network(None,
+ instance['project_id'])
# FIXME(vish): stick this in db
instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
+ ip_address = db.instance_get_fixed_address({}, instance['id'])
+ # Assume that the gateway also acts as the dhcp server.
+ dhcp_server = network['gateway']
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@@ -324,7 +380,9 @@ class LibvirtConnection(object):
'memory_kb': instance_type['memory_mb'] * 1024,
'vcpus': instance_type['vcpus'],
'bridge_name': network['bridge'],
- 'mac_address': instance['mac_address']}
+ 'mac_address': instance['mac_address'],
+ 'ip_address': ip_address,
+ 'dhcp_server': dhcp_server }
libvirt_xml = self.libvirt_xml % xml_info
logging.debug('instance %s: finished toXML method', instance['name'])
@@ -438,3 +496,195 @@ class LibvirtConnection(object):
"""
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
+
+
+ def refresh_security_group(self, security_group_id):
+ fw = NWFilterFirewall(self._conn)
+ fw.ensure_security_group_filter(security_group_id)
+
+
+class NWFilterFirewall(object):
+ """
+ This class implements a network filtering mechanism versatile
+ enough for EC2 style Security Group filtering by leveraging
+ libvirt's nwfilter.
+
+ First, all instances get a filter ("nova-base-filter") applied.
+ This filter drops all incoming ipv4 and ipv6 connections.
+ Outgoing connections are never blocked.
+
+ Second, every security group maps to a nwfilter filter(*).
+ NWFilters can be updated at runtime and changes are applied
+ immediately, so changes to security groups can be applied at
+ runtime (as mandated by the spec).
+
+ Security group rules are named "nova-secgroup-<id>" where <id>
+ is the internal id of the security group. They're applied only on
+ hosts that have instances in the security group in question.
+
+ Updates to security groups are done by updating the data model
+ (in response to API calls) followed by a request sent to all
+ the nodes with instances in the security group to refresh the
+ security group.
+
+ Each instance has its own NWFilter, which references the above
+ mentioned security group NWFilters. This was done because
+ interfaces can only reference one filter while filters can
+ reference multiple other filters. This has the added benefit of
+ actually being able to add and remove security groups from an
+ instance at run time. This functionality is not exposed anywhere,
+ though.
+
+ Outstanding questions:
+
+ The name is unique, so would there be any good reason to sync
+ the uuid across the nodes (by assigning it from the datamodel)?
+
+
+ (*) This sentence brought to you by the redundancy department of
+ redundancy.
+ """
+
+ def __init__(self, get_connection):
+ self._conn = get_connection
+
+
+ nova_base_filter = '''<filter name='nova-base' chain='root'>
+ <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
+ <filterref filter='no-mac-spoofing'/>
+ <filterref filter='no-ip-spoofing'/>
+ <filterref filter='no-arp-spoofing'/>
+ <filterref filter='allow-dhcp-server'/>
+ <filterref filter='nova-allow-dhcp-server'/>
+ <filterref filter='nova-base-ipv4'/>
+ <filterref filter='nova-base-ipv6'/>
+ </filter>'''
+
+ nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in' priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def nova_base_ipv4_filter(self):
+ retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ for direction,action,priority in [('out','accept', 399),
+ ('inout','drop', 400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+
+ def nova_base_ipv6_filter(self):
+ retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ for direction,action,priority in [('out','accept',399),
+ ('inout','drop',400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s-ipv6 />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+
+ def nova_project_filter(self, project, net, mask):
+ retval = "<filter name='nova-project-%s' chain='ipv4'>" % project
+ for protocol in ['tcp', 'udp', 'icmp']:
+ retval += """<rule action='accept' direction='in' priority='200'>
+ <%s srcipaddr='%s' srcipmask='%s' />
+ </rule>""" % (protocol, net, mask)
+ retval += '</filter>'
+ return retval
+
+
+ def _define_filter(self, xml):
+ if callable(xml):
+ xml = xml()
+ d = threads.deferToThread(self._conn.nwfilterDefineXML, xml)
+ return d
+
+
+ @staticmethod
+ def _get_net_and_mask(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.netmask())
+
+ @defer.inlineCallbacks
+ def setup_nwfilters_for_instance(self, instance):
+ """
+ Creates an NWFilter for the given instance. In the process,
+ it makes sure the filters for the security groups as well as
+ the base filter are all in place.
+ """
+
+ yield self._define_filter(self.nova_base_ipv4_filter)
+ yield self._define_filter(self.nova_base_ipv6_filter)
+ yield self._define_filter(self.nova_dhcp_filter)
+ yield self._define_filter(self.nova_base_filter)
+
+ nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" +
+ " <filterref filter='nova-base' />\n"
+ ) % instance['name']
+
+ if FLAGS.allow_project_net_traffic:
+ network_ref = db.project_get_network({}, instance['project_id'])
+ net, mask = self._get_net_and_mask(network_ref['cidr'])
+ project_filter = self.nova_project_filter(instance['project_id'],
+ net, mask)
+ yield self._define_filter(project_filter)
+
+ nwfilter_xml += (" <filterref filter='nova-project-%s' />\n"
+ ) % instance['project_id']
+
+ for security_group in instance.security_groups:
+ yield self.ensure_security_group_filter(security_group['id'])
+
+ nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
+ ) % security_group['id']
+ nwfilter_xml += "</filter>"
+
+ yield self._define_filter(nwfilter_xml)
+ return
+
+ def ensure_security_group_filter(self, security_group_id):
+ return self._define_filter(
+ self.security_group_to_nwfilter_xml(security_group_id))
+
+
+ def security_group_to_nwfilter_xml(self, security_group_id):
+ security_group = db.security_group_get({}, security_group_id)
+ rule_xml = ""
+ for rule in security_group.rules:
+ rule_xml += "<rule action='accept' direction='in' priority='300'>"
+ if rule.cidr:
+ net, mask = self._get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port))
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = '''<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>''' % (security_group_id, rule_xml,)
+ return xml
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index 118e0b687..04e830b64 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -294,6 +294,9 @@ class XenAPIConnection(object):
'num_cpu': rec['VCPUs_max'],
'cpu_time': 0}
+ def get_console_output(self, instance):
+ return 'FAKE CONSOLE OUTPUT'
+
@utils.deferredToThread
def _lookup(self, i):
return self._lookup_blocking(i)
diff --git a/run_tests.py b/run_tests.py
index fa1e6f15b..0b27ec6cf 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -65,6 +65,7 @@ from nova.tests.service_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.virt_unittest import *
from nova.tests.volume_unittest import *
+from nova.tests.virt_unittest import *
FLAGS = flags.FLAGS