summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Day <eday@oddments.org>2010-08-07 14:45:36 -0700
committerEric Day <eday@oddments.org>2010-08-07 14:45:36 -0700
commitff47d384a4be8627a32ee8e34dddc5bd005ac063 (patch)
tree4e2ca3073e85e69262d5b6a0be1e09919ded43bc
parentb77d261b0288f89d2b25a52e7ad7c8073e357cb1 (diff)
parentc0c2f5851ab70ee91f1a271a6c32a84315c1e831 (diff)
downloadnova-ff47d384a4be8627a32ee8e34dddc5bd005ac063.tar.gz
nova-ff47d384a4be8627a32ee8e34dddc5bd005ac063.tar.xz
nova-ff47d384a4be8627a32ee8e34dddc5bd005ac063.zip
Merged trunk.
-rwxr-xr-xbin/nova-dhcpbridge22
-rwxr-xr-xbin/nova-manage23
-rwxr-xr-xbin/nova-network6
-rw-r--r--nova/auth/fakeldap.py11
-rw-r--r--nova/auth/ldapdriver.py25
-rw-r--r--nova/auth/manager.py139
-rw-r--r--nova/compute/model.py45
-rw-r--r--nova/compute/service.py19
-rw-r--r--nova/datastore.py12
-rw-r--r--nova/endpoint/cloud.py176
-rw-r--r--nova/network/exception.py (renamed from nova/compute/exception.py)2
-rw-r--r--nova/network/linux_net.py (renamed from nova/compute/linux_net.py)0
-rw-r--r--nova/network/model.py (renamed from nova/compute/network.py)131
-rw-r--r--nova/network/service.py209
-rw-r--r--nova/network/vpn.py116
-rw-r--r--nova/objectstore/handler.py3
-rw-r--r--nova/objectstore/image.py8
-rw-r--r--nova/rpc.py4
-rw-r--r--nova/tests/auth_unittest.py24
-rw-r--r--nova/tests/model_unittest.py135
-rw-r--r--nova/tests/network_unittest.py114
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt_conn.py17
23 files changed, 755 insertions, 488 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 0db241b5e..b3e7d456a 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -35,32 +35,34 @@ sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
from nova import flags
from nova import rpc
from nova import utils
-from nova.compute import linux_net
-from nova.compute import network
-
+from nova.network import linux_net
+from nova.network import model
+from nova.network import service
FLAGS = flags.FLAGS
def add_lease(mac, ip, hostname, interface):
if FLAGS.fake_rabbit:
- network.lease_ip(ip)
+ service.VlanNetworkService().lease_ip(ip)
else:
- rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
- "args" : {"address": ip}})
+ rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
+ {"method": "lease_ip",
+ "args" : {"fixed_ip": ip}})
def old_lease(mac, ip, hostname, interface):
logging.debug("Adopted old lease or got a change of mac/hostname")
def del_lease(mac, ip, hostname, interface):
if FLAGS.fake_rabbit:
- network.release_ip(ip)
+ service.VlanNetworkService().release_ip(ip)
else:
- rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
- "args" : {"address": ip}})
+ rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
+ {"method": "release_ip",
+ "args" : {"fixed_ip": ip}})
def init_leases(interface):
- net = network.get_network_by_interface(interface)
+ net = model.get_network_by_interface(interface)
res = ""
for host_name in net.hosts:
res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
diff --git a/bin/nova-manage b/bin/nova-manage
index b0f0029ed..7835c7a77 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -29,16 +29,12 @@ from nova import flags
from nova import utils
from nova.auth import manager
from nova.compute import model
-from nova.compute import network
from nova.cloudpipe import pipelib
from nova.endpoint import cloud
FLAGS = flags.FLAGS
-class NetworkCommands(object):
- def restart(self):
- network.restart_nets()
class VpnCommands(object):
def __init__(self):
@@ -170,6 +166,13 @@ class ProjectCommands(object):
arguments: name"""
self.manager.delete_project(name)
+ def environment(self, project_id, user_id, filename='novarc'):
+ """exports environment variables to an sourcable file
+ arguments: project_id user_id [filename='novarc]"""
+ rc = self.manager.get_environment_rc(project_id, user_id)
+ with open(filename, 'w') as f:
+ f.write(rc)
+
def list(self):
"""lists all projects
arguments: <none>"""
@@ -182,14 +185,11 @@ class ProjectCommands(object):
self.manager.remove_from_project(user, project)
def zip(self, project_id, user_id, filename='nova.zip'):
- """exports credentials for user to a zip file
+ """exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]"""
- project = self.manager.get_project(project_id)
- if project:
- with open(filename, 'w') as f:
- f.write(project.get_credentials(user_id))
- else:
- print "Project %s doesn't exist" % project
+ zip = self.manager.get_credentials(project_id, user_id)
+ with open(filename, 'w') as f:
+ f.write(zip)
def usage(script_name):
@@ -197,7 +197,6 @@ def usage(script_name):
categories = [
- ('network', NetworkCommands),
('user', UserCommands),
('project', ProjectCommands),
('role', RoleCommands),
diff --git a/bin/nova-network b/bin/nova-network
index 52d6cb70a..ba9063f56 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -21,12 +21,16 @@
Twistd daemon for the nova network nodes.
"""
+from nova import flags
from nova import twistd
+
from nova.network import service
+FLAGS = flags.FLAGS
+
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.NetworkService.create()
+ application = service.type_to_class(FLAGS.network_type).create()
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 4c32ed9d9..b420924af 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -28,6 +28,8 @@ import json
from nova import datastore
+SCOPE_BASE = 0
+SCOPE_ONELEVEL = 1 # not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
@@ -188,15 +190,18 @@ class FakeLDAP(object):
Args:
dn -- dn to search under
- scope -- only SCOPE_SUBTREE is supported
+ scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
- if scope != SCOPE_SUBTREE:
+ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
redis = datastore.Redis.instance()
- keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
+ if scope == SCOPE_BASE:
+ keys = ["%s%s" % (self.__redis_prefix, dn)]
+ else:
+ keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
objects = []
for key in keys:
# get the attributes from redis
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 055e8332b..ec739e134 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -272,26 +272,30 @@ class LdapDriver(object):
"""Check if project exists"""
return self.get_project(name) != None
- def __find_object(self, dn, query = None):
+ def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
- objects = self.__find_objects(dn, query)
+ objects = self.__find_objects(dn, query, scope)
if len(objects) == 0:
return None
return objects[0]
- def __find_dns(self, dn, query=None):
+ def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
+ if scope is None: # one of the flags is 0!!
+ scope = self.ldap.SCOPE_SUBTREE
try:
- res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
return [dn for dn, attributes in res]
- def __find_objects(self, dn, query = None):
+ def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
+ if scope is None: # one of the flags is 0!!
+ scope = self.ldap.SCOPE_SUBTREE
try:
- res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the attributes
@@ -361,7 +365,8 @@ class LdapDriver(object):
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
- '(member=%s)' % self.__uid_to_dn(uid))
+ '(member=%s)' % self.__uid_to_dn(uid),
+ self.ldap.SCOPE_BASE)
return res != None
def __add_to_group(self, uid, group_dn):
@@ -391,7 +396,11 @@ class LdapDriver(object):
if not self.__is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" %
(uid,))
- self.__safe_remove_from_group(uid, group_dn)
+ # NOTE(vish): remove user from group and any sub_groups
+ sub_dns = self.__find_group_dns_with_member(
+ group_dn, uid)
+ for sub_dn in sub_dns:
+ self.__safe_remove_from_group(uid, sub_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index b9b1e23e0..d44ed52b2 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -36,6 +36,7 @@ from nova import objectstore # for flags
from nova import utils
from nova.auth import ldapdriver # for flags
from nova.auth import signer
+from nova.network import vpn
FLAGS = flags.FLAGS
@@ -50,13 +51,14 @@ flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
-flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
+flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf',
+ 'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
@@ -64,19 +66,11 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
-flags.DEFINE_integer('vpn_start_port', 1000,
- 'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
- 'End port for the cloudpipe VPN servers')
-
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
-flags.DEFINE_string('vpn_ip', '127.0.0.1',
- 'Public IP for the cloudpipe VPN servers')
-
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
@@ -228,86 +222,6 @@ class Project(AuthBase):
self.member_ids)
-class NoMorePorts(exception.Error):
- pass
-
-
-class Vpn(datastore.BasicModel):
- """Manages vpn ips and ports for projects"""
- def __init__(self, project_id):
- self.project_id = project_id
- super(Vpn, self).__init__()
-
- @property
- def identifier(self):
- """Identifier used for key in redis"""
- return self.project_id
-
- @classmethod
- def create(cls, project_id):
- """Creates a vpn for project
-
- This method finds a free ip and port and stores the associated
- values in the datastore.
- """
- # TODO(vish): get list of vpn ips from redis
- port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
- vpn = cls(project_id)
- # save ip for project
- vpn['project'] = project_id
- vpn['ip'] = FLAGS.vpn_ip
- vpn['port'] = port
- vpn.save()
- return vpn
-
- @classmethod
- def find_free_port_for_ip(cls, ip):
- """Finds a free port for a given ip from the redis set"""
- # TODO(vish): these redis commands should be generalized and
- # placed into a base class. Conceptually, it is
- # similar to an association, but we are just
- # storing a set of values instead of keys that
- # should be turned into objects.
- redis = datastore.Redis.instance()
- key = 'ip:%s:ports' % ip
- # TODO(vish): these ports should be allocated through an admin
- # command instead of a flag
- if (not redis.exists(key) and
- not redis.exists(cls._redis_association_name('ip', ip))):
- for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
- redis.sadd(key, i)
-
- port = redis.spop(key)
- if not port:
- raise NoMorePorts()
- return port
-
- @classmethod
- def num_ports_for_ip(cls, ip):
- """Calculates the number of free ports for a given ip"""
- return datastore.Redis.instance().scard('ip:%s:ports' % ip)
-
- @property
- def ip(self):
- """The ip assigned to the project"""
- return self['ip']
-
- @property
- def port(self):
- """The port assigned to the project"""
- return int(self['port'])
-
- def save(self):
- """Saves the association to the given ip"""
- self.associate_with('ip', self.ip)
- super(Vpn, self).save()
-
- def destroy(self):
- """Cleans up datastore and adds port back to pool"""
- self.unassociate_with('ip', self.ip)
- datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
- super(Vpn, self).destroy()
-
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
@@ -585,8 +499,6 @@ class AuthManager(object):
description,
member_users)
if project_dict:
- if FLAGS.use_vpn:
- Vpn.create(project_dict['id'])
return Project(**project_dict)
def add_to_project(self, user, project):
@@ -623,10 +535,10 @@ class AuthManager(object):
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
- vpn = Vpn.lookup(Project.safe_id(project))
- if not vpn:
- return None, None
- return (vpn.ip, vpn.port)
+ network_data = vpn.NetworkData.lookup(Project.safe_id(project))
+ if not network_data:
+ raise exception.NotFound('project network data has not been set')
+ return (network_data.ip, network_data.port)
def delete_project(self, project):
"""Deletes a project"""
@@ -757,25 +669,27 @@ class AuthManager(object):
rc = self.__generate_rc(user.access, user.secret, pid)
private_key, signed_cert = self._generate_x509_cert(user.id, pid)
- vpn = Vpn.lookup(pid)
- if not vpn:
- raise exception.Error("No vpn data allocated for project %s" %
- project.name)
- configfile = open(FLAGS.vpn_client_template,"r")
- s = string.Template(configfile.read())
- configfile.close()
- config = s.substitute(keyfile=FLAGS.credential_key_file,
- certfile=FLAGS.credential_cert_file,
- ip=vpn.ip,
- port=vpn.port)
-
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
- zippy.writestr("nebula-client.conf", config)
+
+ network_data = vpn.NetworkData.lookup(pid)
+ if network_data:
+ configfile = open(FLAGS.vpn_client_template,"r")
+ s = string.Template(configfile.read())
+ configfile.close()
+ config = s.substitute(keyfile=FLAGS.credential_key_file,
+ certfile=FLAGS.credential_cert_file,
+ ip=network_data.ip,
+ port=network_data.port)
+ zippy.writestr(FLAGS.credential_vpn_file, config)
+ else:
+ logging.warn("No vpn data for project %s" %
+ pid)
+
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
with open(zf, 'rb') as f:
@@ -784,6 +698,15 @@ class AuthManager(object):
shutil.rmtree(tmpdir)
return buffer
+ def get_environment_rc(self, user, project=None):
+ """Get credential zip for user in project"""
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ if project is None:
+ project = user.id
+ pid = Project.safe_id(project)
+ return self.__generate_rc(user.access, user.secret, pid)
+
def __generate_rc(self, access, secret, pid):
"""Generate rc file for user"""
rc = open(FLAGS.credentials_template).read()
diff --git a/nova/compute/model.py b/nova/compute/model.py
index 212830d3c..266a93b9a 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -41,9 +41,6 @@ True
"""
import datetime
-import logging
-import time
-import redis
import uuid
from nova import datastore
@@ -72,19 +69,22 @@ class InstanceDirectory(object):
for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
yield Instance(instance_id)
- def by_node(self, node_id):
+ @datastore.absorb_connection_error
+ def by_node(self, node):
"""returns a list of instances for a node"""
+ for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node):
+ yield Instance(instance_id)
- for instance in self.all:
- if instance['node_name'] == node_id:
- yield instance
-
- def by_ip(self, ip_address):
+ def by_ip(self, ip):
"""returns an instance object that is using the IP"""
- for instance in self.all:
- if instance['private_dns_name'] == ip_address:
- return instance
- return None
+ # NOTE(vish): The ip association should be just a single value, but
+ # to maintain consistency it is using the standard
+ # association and the ugly method for retrieving
+ # the first item in the set below.
+ result = datastore.Redis.instance().smembers('ip:%s:instances' % ip)
+ if not result:
+ return None
+ return Instance(list(result)[0])
def by_volume(self, volume_id):
"""returns the instance a volume is attached to"""
@@ -122,7 +122,8 @@ class Instance(datastore.BasicModel):
'instance_id': self.instance_id,
'node_name': 'unassigned',
'project_id': 'unassigned',
- 'user_id': 'unassigned'}
+ 'user_id': 'unassigned',
+ 'private_dns_name': 'unassigned'}
@property
def identifier(self):
@@ -148,19 +149,23 @@ class Instance(datastore.BasicModel):
"""Call into superclass to save object, then save associations"""
# NOTE(todd): doesn't track migration between projects/nodes,
# it just adds the first one
- should_update_project = self.is_new_record()
- should_update_node = self.is_new_record()
+ is_new = self.is_new_record()
+ node_set = (self.state['node_name'] != 'unassigned' and
+ self.initial_state.get('node_name', 'unassigned')
+ == 'unassigned')
success = super(Instance, self).save()
- if success and should_update_project:
+ if success and is_new:
self.associate_with("project", self.project)
- if success and should_update_node:
- self.associate_with("node", self['node_name'])
+ self.associate_with("ip", self.state['private_dns_name'])
+ if success and node_set:
+ self.associate_with("node", self.state['node_name'])
return True
def destroy(self):
"""Destroy associations, then destroy the object"""
self.unassociate_with("project", self.project)
- self.unassociate_with("node", self['node_name'])
+ self.unassociate_with("node", self.state['node_name'])
+ self.unassociate_with("ip", self.state['private_dns_name'])
return super(Instance, self).destroy()
class Host(datastore.BasicModel):
diff --git a/nova/compute/service.py b/nova/compute/service.py
index 9b162edc7..820116453 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -39,9 +39,9 @@ from nova import service
from nova import utils
from nova.compute import disk
from nova.compute import model
-from nova.compute import network
from nova.compute import power_state
from nova.compute.instance_types import INSTANCE_TYPES
+from nova.network import service as network_service
from nova.objectstore import image # for image_path flag
from nova.virt import connection as virt_connection
from nova.volume import service as volume_service
@@ -117,12 +117,17 @@ class ComputeService(service.Service):
""" launch a new instance with specified options """
logging.debug("Starting instance %s..." % (instance_id))
inst = self.instdir.get(instance_id)
- if not FLAGS.simple_network:
- # TODO: Get the real security group of launch in here
- security_group = "default"
- net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
- inst['project_id'],
- security_group).express()
+ # TODO: Get the real security group of launch in here
+ security_group = "default"
+ # NOTE(vish): passing network type allows us to express the
+ # network without making a call to network to find
+ # out which type of network to setup
+ network_service.setup_compute_network(
+ inst.get('network_type', 'vlan'),
+ inst['user_id'],
+ inst['project_id'],
+ security_group)
+
inst['node_name'] = FLAGS.node_name
inst.save()
# TODO(vish) check to make sure the availability zone matches
diff --git a/nova/datastore.py b/nova/datastore.py
index 9c2592334..51ef7a758 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -90,13 +90,15 @@ class BasicModel(object):
@absorb_connection_error
def __init__(self):
- self.initial_state = {}
- self.state = Redis.instance().hgetall(self.__redis_key)
- if self.state:
- self.initial_state = self.state
+ state = Redis.instance().hgetall(self.__redis_key)
+ if state:
+ self.initial_state = state
+ self.state = dict(self.initial_state)
else:
+ self.initial_state = {}
self.state = self.default_state()
+
def default_state(self):
"""You probably want to define this in your subclass"""
return {}
@@ -239,7 +241,7 @@ class BasicModel(object):
for key, val in self.state.iteritems():
Redis.instance().hset(self.__redis_key, key, val)
self.add_to_index()
- self.initial_state = self.state
+ self.initial_state = dict(self.state)
return True
@absorb_connection_error
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 0ee278f84..878d54a15 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -36,11 +36,11 @@ from nova import utils
from nova.auth import rbac
from nova.auth import manager
from nova.compute import model
-from nova.compute import network
from nova.compute.instance_types import INSTANCE_TYPES
-from nova.compute import service as compute_service
from nova.endpoint import images
-from nova.volume import service as volume_service
+from nova.network import service as network_service
+from nova.network import model as network_model
+from nova.volume import service
FLAGS = flags.FLAGS
@@ -64,7 +64,6 @@ class CloudController(object):
"""
def __init__(self):
self.instdir = model.InstanceDirectory()
- self.network = network.PublicNetworkController()
self.setup()
@property
@@ -76,7 +75,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = volume_service.get_volume(volume_id)
+ volume = service.get_volume(volume_id)
yield volume
def __str__(self):
@@ -222,7 +221,7 @@ class CloudController(object):
callback=_complete)
return d
- except users.UserError, e:
+ except manager.UserError as e:
raise
@rbac.allow('all')
@@ -308,7 +307,7 @@ class CloudController(object):
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
- address = self.network.get_host(public_ip)
+ address = network_model.PublicAddress.lookup(public_ip)
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
return address
raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -330,7 +329,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- volume = volume_service.get_volume(volume_id)
+ volume = service.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -417,7 +416,7 @@ class CloudController(object):
'code': instance.get('state', 0),
'name': instance.get('state_description', 'pending')
}
- i['public_dns_name'] = self.network.get_public_ip_for_instance(
+ i['public_dns_name'] = network_model.get_public_ip_for_instance(
i['instance_id'])
i['private_dns_name'] = instance.get('private_dns_name', None)
if not i['public_dns_name']:
@@ -452,10 +451,10 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- for address in self.network.host_objs:
+ for address in network_model.PublicAddress.all():
# TODO(vish): implement a by_project iterator for addresses
if (context.user.is_admin() or
- address['project_id'] == self.project.id):
+ address['project_id'] == context.project.id):
address_rv = {
'public_ip': address['address'],
'instance_id' : address.get('instance_id', 'free')
@@ -470,41 +469,63 @@ class CloudController(object):
return {'addressesSet': addresses}
@rbac.allow('netadmin')
+ @defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
- address = self.network.allocate_ip(
- context.user.id, context.project.id, 'public')
- return defer.succeed({'addressSet': [{'publicIp' : address}]})
+ network_topic = yield self._get_network_topic(context)
+ alloc_result = yield rpc.call(network_topic,
+ {"method": "allocate_elastic_ip",
+ "args": {"user_id": context.user.id,
+ "project_id": context.project.id}})
+ public_ip = alloc_result['result']
+ defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
@rbac.allow('netadmin')
+ @defer.inlineCallbacks
def release_address(self, context, public_ip, **kwargs):
- self.network.deallocate_ip(public_ip)
- return defer.succeed({'releaseResponse': ["Address released."]})
+ # NOTE(vish): Should we make sure this works?
+ network_topic = yield self._get_network_topic(context)
+ rpc.cast(network_topic,
+ {"method": "deallocate_elastic_ip",
+ "args": {"elastic_ip": public_ip}})
+ defer.returnValue({'releaseResponse': ["Address released."]})
@rbac.allow('netadmin')
- def associate_address(self, context, instance_id, **kwargs):
+ @defer.inlineCallbacks
+ def associate_address(self, context, instance_id, public_ip, **kwargs):
instance = self._get_instance(context, instance_id)
- self.network.associate_address(
- kwargs['public_ip'],
- instance['private_dns_name'],
- instance_id)
- return defer.succeed({'associateResponse': ["Address associated."]})
+ address = self._get_address(context, public_ip)
+ network_topic = yield self._get_network_topic(context)
+ rpc.cast(network_topic,
+ {"method": "associate_elastic_ip",
+ "args": {"elastic_ip": address['address'],
+ "fixed_ip": instance['private_dns_name'],
+ "instance_id": instance['instance_id']}})
+ defer.returnValue({'associateResponse': ["Address associated."]})
@rbac.allow('netadmin')
+ @defer.inlineCallbacks
def disassociate_address(self, context, public_ip, **kwargs):
address = self._get_address(context, public_ip)
- self.network.disassociate_address(public_ip)
- # TODO - Strip the IP from the instance
- return defer.succeed({'disassociateResponse': ["Address disassociated."]})
+ network_topic = yield self._get_network_topic(context)
+ rpc.cast(network_topic,
+ {"method": "disassociate_elastic_ip",
+ "args": {"elastic_ip": address['address']}})
+ defer.returnValue({'disassociateResponse': ["Address disassociated."]})
- def release_ip(self, context, private_ip, **kwargs):
- self.network.release_ip(private_ip)
- return defer.succeed({'releaseResponse': ["Address released."]})
-
- def lease_ip(self, context, private_ip, **kwargs):
- self.network.lease_ip(private_ip)
- return defer.succeed({'leaseResponse': ["Address leased."]})
+ @defer.inlineCallbacks
+ def _get_network_topic(self, context):
+ """Retrieves the network host for a project"""
+ host = network_service.get_host_for_project(context.project.id)
+ if not host:
+ result = yield rpc.call(FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"user_id": context.user.id,
+ "project_id": context.project.id}})
+ host = result['result']
+ defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
+ @defer.inlineCallbacks
def run_instances(self, context, **kwargs):
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
@@ -536,15 +557,20 @@ class CloudController(object):
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
+ network_topic = yield self._get_network_topic(context)
# TODO: Get the real security group of launch in here
security_group = "default"
- if FLAGS.simple_network:
- bridge_name = FLAGS.simple_network_bridge
- else:
- net = network.BridgedNetwork.get_network_for_project(
- context.user.id, context.project.id, security_group)
- bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
+ vpn = False
+ if image_id == FLAGS.vpn_image_id:
+ vpn = True
+ allocate_result = yield rpc.call(network_topic,
+ {"method": "allocate_fixed_ip",
+ "args": {"user_id": context.user.id,
+ "project_id": context.project.id,
+ "security_group": security_group,
+ "vpn": vpn}})
+ allocate_data = allocate_result['result']
inst = self.instdir.new()
inst['image_id'] = image_id
inst['kernel_id'] = kernel_id
@@ -557,24 +583,11 @@ class CloudController(object):
inst['key_name'] = kwargs.get('key_name', '')
inst['user_id'] = context.user.id
inst['project_id'] = context.project.id
- inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
- inst['bridge_name'] = bridge_name
- if FLAGS.simple_network:
- address = network.allocate_simple_ip()
- else:
- if inst['image_id'] == FLAGS.vpn_image_id:
- address = network.allocate_vpn_ip(
- inst['user_id'],
- inst['project_id'],
- mac=inst['mac_address'])
- else:
- address = network.allocate_ip(
- inst['user_id'],
- inst['project_id'],
- mac=inst['mac_address'])
- inst['private_dns_name'] = str(address)
- # TODO: allocate expresses on the router node
+ inst['security_group'] = security_group
+ for (key, value) in allocate_data.iteritems():
+ inst[key] = value
+
inst.save()
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
@@ -582,40 +595,49 @@ class CloudController(object):
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make Network figure out the network name from ip.
- return defer.succeed(self._format_instances(
- context, reservation_id))
+ defer.returnValue(self._format_instances(context, reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
+ @defer.inlineCallbacks
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
+ network_topic = yield self._get_network_topic(context)
for i in instance_id:
logging.debug("Going to try and terminate %s" % i)
try:
instance = self._get_instance(context, i)
except exception.NotFound:
- logging.warning("Instance %s was not found during terminate" % i)
+ logging.warning("Instance %s was not found during terminate"
+ % i)
continue
- try:
- self.network.disassociate_address(
- instance.get('public_dns_name', 'bork'))
- except:
- pass
- if instance.get('private_dns_name', None):
- logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
- if FLAGS.simple_network:
- network.deallocate_simple_ip(instance.get('private_dns_name', None))
- else:
- try:
- self.network.deallocate_ip(instance.get('private_dns_name', None))
- except Exception, _err:
- pass
- if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
+ elastic_ip = network_model.get_public_ip_for_instance(i)
+ if elastic_ip:
+ logging.debug("Disassociating address %s" % elastic_ip)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # disassociated. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(network_topic,
+ {"method": "disassociate_elastic_ip",
+ "args": {"elastic_ip": elastic_ip}})
+
+ fixed_ip = instance.get('private_dns_name', None)
+ if fixed_ip:
+ logging.debug("Deallocating address %s" % fixed_ip)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # actually removed. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(network_topic,
+ {"method": "deallocate_fixed_ip",
+ "args": {"fixed_ip": fixed_ip}})
+
+ if instance.get('node_name', 'unassigned') != 'unassigned':
+ # NOTE(joshua?): It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
- {"method": "terminate_instance",
- "args" : {"instance_id": i}})
+ {"method": "terminate_instance",
+ "args": {"instance_id": i}})
else:
instance.destroy()
- return defer.succeed(True)
+ defer.returnValue(True)
@rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
@@ -677,6 +699,8 @@ class CloudController(object):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
+ if not 'user_group' in kwargs:
+ raise exception.ApiError('user or group not specified')
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:
diff --git a/nova/compute/exception.py b/nova/network/exception.py
index 13e4f0a51..5722e9672 100644
--- a/nova/compute/exception.py
+++ b/nova/network/exception.py
@@ -17,7 +17,7 @@
# under the License.
"""
-Exceptions for Compute Node errors, mostly network addressing.
+Exceptions for network errors.
"""
from nova.exception import Error
diff --git a/nova/compute/linux_net.py b/nova/network/linux_net.py
index 4a4b4c8a8..4a4b4c8a8 100644
--- a/nova/compute/linux_net.py
+++ b/nova/network/linux_net.py
diff --git a/nova/compute/network.py b/nova/network/model.py
index 62d892e58..daac035e4 100644
--- a/nova/compute/network.py
+++ b/nova/network/model.py
@@ -17,7 +17,7 @@
# under the License.
"""
-Classes for network control, including VLANs, DHCP, and IP allocation.
+Model Classes for network control, including VLANs, DHCP, and IP allocation.
"""
import IPy
@@ -26,12 +26,12 @@ import os
import time
from nova import datastore
-from nova import exception
+from nova import exception as nova_exception
from nova import flags
from nova import utils
from nova.auth import manager
-from nova.compute import exception as compute_exception
-from nova.compute import linux_net
+from nova.network import exception
+from nova.network import linux_net
FLAGS = flags.FLAGS
@@ -53,26 +53,6 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
flags.DEFINE_integer('cloudpipe_start_port', 12000,
'Starting port for mapped CloudPipe external ports')
-flags.DEFINE_boolean('simple_network', False,
- 'Use simple networking instead of vlans')
-flags.DEFINE_string('simple_network_bridge', 'br100',
- 'Bridge for simple network instances')
-flags.DEFINE_list('simple_network_ips', ['192.168.0.2'],
- 'Available ips for simple network')
-flags.DEFINE_string('simple_network_template',
- utils.abspath('compute/interfaces.template'),
- 'Template file for simple network')
-flags.DEFINE_string('simple_network_netmask', '255.255.255.0',
- 'Netmask for simple network')
-flags.DEFINE_string('simple_network_network', '192.168.0.0',
- 'Network for simple network')
-flags.DEFINE_string('simple_network_gateway', '192.168.0.1',
- 'Broadcast for simple network')
-flags.DEFINE_string('simple_network_broadcast', '192.168.0.255',
- 'Broadcast for simple network')
-flags.DEFINE_string('simple_network_dns', '8.8.4.4',
- 'Dns for simple network')
-
logging.getLogger().setLevel(logging.DEBUG)
@@ -156,7 +136,6 @@ class Vlan(datastore.BasicModel):
# CLEANUP:
# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
-# TODO(ja): use singleton for usermanager instead of self.manager in vlanpool et al
# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
@@ -241,7 +220,7 @@ class BaseNetwork(datastore.BasicModel):
for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)):
address = str(self.network[idx])
if not address in self.hosts.keys():
- yield str(address)
+ yield address
@property
def num_static_ips(self):
@@ -253,7 +232,7 @@ class BaseNetwork(datastore.BasicModel):
self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
- raise compute_exception.NoMoreAddresses("Project %s with network %s" %
+ raise exception.NoMoreAddresses("Project %s with network %s" %
(project_id, str(self.network)))
def lease_ip(self, ip_str):
@@ -261,7 +240,7 @@ class BaseNetwork(datastore.BasicModel):
def release_ip(self, ip_str):
if not ip_str in self.assigned:
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
self.deexpress(address=ip_str)
self._rem_host(ip_str)
@@ -349,14 +328,14 @@ class DHCPNetwork(BridgedNetwork):
logging.debug("Not launching dnsmasq: no hosts.")
self.express_cloudpipe()
- def allocate_vpn_ip(self, mac):
+ def allocate_vpn_ip(self, user_id, project_id, mac):
address = str(self.network[2])
- self._add_host(self['user_id'], self['project_id'], address, mac)
+ self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
def express_cloudpipe(self):
- private_ip = self.network[2]
+ private_ip = str(self.network[2])
linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
% (private_ip, ))
linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
@@ -394,6 +373,7 @@ class PublicAddress(datastore.BasicModel):
addr.save()
return addr
+
DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
class PublicNetworkController(BaseNetwork):
override_type = 'network'
@@ -420,12 +400,6 @@ class PublicNetworkController(BaseNetwork):
for address in self.assigned:
yield PublicAddress(address)
- def get_public_ip_for_instance(self, instance_id):
- # FIXME: this should be a lookup - iteration won't scale
- for address_record in self.host_objs:
- if address_record.get('instance_id', 'available') == instance_id:
- return address_record['address']
-
def get_host(self, host):
if host in self.assigned:
return PublicAddress(host)
@@ -439,16 +413,20 @@ class PublicNetworkController(BaseNetwork):
PublicAddress(host).destroy()
datastore.Redis.instance().hdel(self._hosts_key, host)
+ def deallocate_ip(self, ip_str):
+ # NOTE(vish): cleanup is now done on release by the parent class
+ self.release_ip(ip_str)
+
def associate_address(self, public_ip, private_ip, instance_id):
if not public_ip in self.assigned:
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
# TODO(joshua): Keep an index going both ways
for addr in self.host_objs:
if addr.get('private_ip', None) == private_ip:
- raise compute_exception.AddressAlreadyAssociated()
+ raise exception.AddressAlreadyAssociated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') != 'available':
- raise compute_exception.AddressAlreadyAssociated()
+ raise exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
addr['instance_id'] = instance_id
addr.save()
@@ -456,10 +434,10 @@ class PublicNetworkController(BaseNetwork):
def disassociate_address(self, public_ip):
if not public_ip in self.assigned:
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') == 'available':
- raise compute_exception.AddressNotAssociated()
+ raise exception.AddressNotAssociated()
self.deexpress(address=public_ip)
addr['private_ip'] = 'available'
addr['instance_id'] = 'available'
@@ -535,63 +513,42 @@ def get_vlan_for_project(project_id):
return vlan
else:
return Vlan.create(project_id, vnum)
- raise compute_exception.AddressNotAllocated("Out of VLANs")
+ raise exception.AddressNotAllocated("Out of VLANs")
+
+def get_project_network(project_id, security_group='default'):
+ """ get a project's private network, allocating one if needed """
+ project = manager.AuthManager().get_project(project_id)
+ if not project:
+ raise nova_exception.NotFound("Project %s doesn't exist." % project_id)
+ manager_id = project.project_manager_id
+ return DHCPNetwork.get_network_for_project(manager_id,
+ project.id,
+ security_group)
-def get_network_by_interface(iface, security_group='default'):
- vlan = iface.rpartition("br")[2]
- return get_project_network(Vlan.dict_by_vlan().get(vlan), security_group)
def get_network_by_address(address):
+ # TODO(vish): This is completely the wrong way to do this, but
+ # I'm getting the network binary working before I
+ # tackle doing this the right way.
logging.debug("Get Network By Address: %s" % address)
for project in manager.AuthManager().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
logging.debug("Found %s in %s" % (address, project.id))
return net
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
-def allocate_simple_ip():
- redis = datastore.Redis.instance()
- if not redis.exists('ips') and not len(redis.keys('instances:*')):
- for address in FLAGS.simple_network_ips:
- redis.sadd('ips', address)
- address = redis.spop('ips')
- if not address:
- raise exception.NoMoreAddresses()
- return address
-def deallocate_simple_ip(address):
- datastore.Redis.instance().sadd('ips', address)
-
-
-def allocate_vpn_ip(user_id, project_id, mac):
- return get_project_network(project_id).allocate_vpn_ip(mac)
-
-def allocate_ip(user_id, project_id, mac):
- return get_project_network(project_id).allocate_ip(user_id, project_id, mac)
-
-def deallocate_ip(address):
- return get_network_by_address(address).deallocate_ip(address)
-
-def release_ip(address):
- return get_network_by_address(address).release_ip(address)
+def get_network_by_interface(iface, security_group='default'):
+ vlan = iface.rpartition("br")[2]
+ project_id = Vlan.dict_by_vlan().get(vlan)
+ return get_project_network(project_id, security_group)
-def lease_ip(address):
- return get_network_by_address(address).lease_ip(address)
-def get_project_network(project_id, security_group='default'):
- """ get a project's private network, allocating one if needed """
- # TODO(todd): It looks goofy to get a project from a UserManager.
- # Refactor to still use the LDAP backend, but not User specific.
- project = manager.AuthManager().get_project(project_id)
- if not project:
- raise exception.Error("Project %s doesn't exist, uhoh." %
- project_id)
- return DHCPNetwork.get_network_for_project(project.project_manager_id,
- project.id, security_group)
+def get_public_ip_for_instance(instance_id):
+ # FIXME: this should be a lookup - iteration won't scale
+ for address_record in PublicAddress.all():
+ if address_record.get('instance_id', 'available') == instance_id:
+ return address_record['address']
-def restart_nets():
- """ Ensure the network for each user is enabled"""
- for project in manager.AuthManager().get_projects():
- get_project_network(project.id).express()
diff --git a/nova/network/service.py b/nova/network/service.py
index 9d87e05e6..1a61f49d4 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -20,16 +20,211 @@
Network Nodes are responsible for allocating ips and setting up network
"""
-import logging
-
+from nova import datastore
from nova import flags
from nova import service
-
+from nova import utils
+from nova.auth import manager
+from nova.exception import NotFound
+from nova.network import exception
+from nova.network import model
+from nova.network import vpn
FLAGS = flags.FLAGS
-class NetworkService(service.Service):
- """Allocates ips and sets up networks"""
+flags.DEFINE_string('network_type',
+ 'flat',
+ 'Service Class for Networking')
+flags.DEFINE_string('flat_network_bridge', 'br100',
+ 'Bridge for simple network instances')
+flags.DEFINE_list('flat_network_ips',
+ ['192.168.0.2','192.168.0.3','192.168.0.4'],
+ 'Available ips for simple network')
+flags.DEFINE_string('flat_network_network', '192.168.0.0',
+ 'Network for simple network')
+flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
+ 'Netmask for simple network')
+flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
+ 'Broadcast for simple network')
+flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
+ 'Broadcast for simple network')
+flags.DEFINE_string('flat_network_dns', '8.8.4.4',
+ 'Dns for simple network')
+
+def type_to_class(network_type):
+ if network_type == 'flat':
+ return FlatNetworkService
+ elif network_type == 'vlan':
+ return VlanNetworkService
+ raise NotFound("Couldn't find %s network type" % network_type)
+
+
+def setup_compute_network(network_type, user_id, project_id, security_group):
+ srv = type_to_class(network_type)
+ srv.setup_compute_network(network_type, user_id, project_id, security_group)
+
+
+def get_host_for_project(project_id):
+ redis = datastore.Redis.instance()
+ return redis.get(_host_key(project_id))
+
+
+def _host_key(project_id):
+ return "network_host:%s" % project_id
+
+
+class BaseNetworkService(service.Service):
+ """Implements common network service functionality
+
+ This class must be subclassed.
+ """
+ def __init__(self, *args, **kwargs):
+ self.network = model.PublicNetworkController()
+
+ def set_network_host(self, user_id, project_id, *args, **kwargs):
+ """Safely sets the host of the projects network"""
+ redis = datastore.Redis.instance()
+ key = _host_key(project_id)
+ if redis.setnx(key, FLAGS.node_name):
+ self._on_set_network_host(user_id, project_id,
+ security_group='default',
+ *args, **kwargs)
+ return FLAGS.node_name
+ else:
+ return redis.get(key)
+
+ def allocate_fixed_ip(self, user_id, project_id,
+ security_group='default',
+ *args, **kwargs):
+ """Subclass implements getting fixed ip from the pool"""
+ raise NotImplementedError()
+
+ def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
+ """Subclass implements return of ip to the pool"""
+ raise NotImplementedError()
+
+ def _on_set_network_host(self, user_id, project_id,
+ *args, **kwargs):
+ """Called when this host becomes the host for a project"""
+ pass
+
+ @classmethod
+ def setup_compute_network(self, user_id, project_id, security_group,
+ *args, **kwargs):
+ """Sets up matching network for compute hosts"""
+ raise NotImplementedError()
+
+ def allocate_elastic_ip(self, user_id, project_id):
+ """Gets a elastic ip from the pool"""
+ # NOTE(vish): Replicating earlier decision to use 'public' as
+ # mac address name, although this should probably
+ # be done inside of the PublicNetworkController
+ return self.network.allocate_ip(user_id, project_id, 'public')
+
+ def associate_elastic_ip(self, elastic_ip, fixed_ip, instance_id):
+ """Associates an elastic ip to a fixed ip"""
+ self.network.associate_address(elastic_ip, fixed_ip, instance_id)
+
+ def disassociate_elastic_ip(self, elastic_ip):
+ """Disassociates a elastic ip"""
+ self.network.disassociate_address(elastic_ip)
+
+ def deallocate_elastic_ip(self, elastic_ip):
+ """Returns a elastic ip to the pool"""
+ self.network.deallocate_ip(elastic_ip)
+
+
+class FlatNetworkService(BaseNetworkService):
+ """Basic network where no vlans are used"""
+
+ @classmethod
+ def setup_compute_network(self, user_id, project_id, security_group,
+ *args, **kwargs):
+ """Network is created manually"""
+ pass
+
+ def allocate_fixed_ip(self, user_id, project_id,
+ security_group='default',
+ *args, **kwargs):
+ """Gets a fixed ip from the pool
+
+ Flat network just grabs the next available ip from the pool
+ """
+ # NOTE(vish): Some automation could be done here. For example,
+ # creating the flat_network_bridge and setting up
+ # a gateway. This is all done manually atm
+ redis = datastore.Redis.instance()
+ if not redis.exists('ips') and not len(redis.keys('instances:*')):
+ for fixed_ip in FLAGS.flat_network_ips:
+ redis.sadd('ips', fixed_ip)
+ fixed_ip = redis.spop('ips')
+ if not fixed_ip:
+ raise exception.NoMoreAddresses()
+ return {'inject_network': True,
+ 'network_type': FLAGS.network_type,
+ 'mac_address': utils.generate_mac(),
+ 'private_dns_name': str(fixed_ip),
+ 'bridge_name': FLAGS.flat_network_bridge,
+ 'network_network': FLAGS.flat_network_network,
+ 'network_netmask': FLAGS.flat_network_netmask,
+ 'network_gateway': FLAGS.flat_network_gateway,
+ 'network_broadcast': FLAGS.flat_network_broadcast,
+ 'network_dns': FLAGS.flat_network_dns}
+
+ def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
+ """Returns an ip to the pool"""
+ datastore.Redis.instance().sadd('ips', fixed_ip)
+
+class VlanNetworkService(BaseNetworkService):
+ """Vlan network with dhcp"""
+ # NOTE(vish): A lot of the interactions with network/model.py can be
+ # simplified and improved. Also there it may be useful
+ # to support vlans separately from dhcp, instead of having
+ # both of them together in this class.
+ def allocate_fixed_ip(self, user_id, project_id,
+ security_group='default',
+ vpn=False, *args, **kwargs):
+ """Gets a fixed ip from the pool """
+ mac = utils.generate_mac()
+ net = model.get_project_network(project_id)
+ if vpn:
+ fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
+ else:
+ fixed_ip = net.allocate_ip(user_id, project_id, mac)
+ return {'network_type': FLAGS.network_type,
+ 'bridge_name': net['bridge_name'],
+ 'mac_address': mac,
+ 'private_dns_name' : fixed_ip}
+
+ def deallocate_fixed_ip(self, fixed_ip,
+ *args, **kwargs):
+ """Returns an ip to the pool"""
+ return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
+
+ def lease_ip(self, address):
+ return model.get_network_by_address(address).lease_ip(address)
+
+ def release_ip(self, address):
+ return model.get_network_by_address(address).release_ip(address)
+
+ def restart_nets(self):
+ """Ensure the network for each user is enabled"""
+ for project in manager.AuthManager().get_projects():
+ model.get_project_network(project.id).express()
+
+ def _on_set_network_host(self, user_id, project_id,
+ *args, **kwargs):
+ """Called when this host becomes the host for a project"""
+ vpn.NetworkData.create(project_id)
- def __init__(self):
- logging.debug("Network node working")
+ @classmethod
+ def setup_compute_network(self, user_id, project_id, security_group,
+ *args, **kwargs):
+ """Sets up matching network for compute hosts"""
+ # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because
+ # we don't want to run dnsmasq on the client machines
+ net = model.BridgedNetwork.get_network_for_project(
+ user_id,
+ project_id,
+ security_group)
+ net.express()
diff --git a/nova/network/vpn.py b/nova/network/vpn.py
new file mode 100644
index 000000000..cec84287c
--- /dev/null
+++ b/nova/network/vpn.py
@@ -0,0 +1,116 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Network Data for projects"""
+
+from nova import datastore
+from nova import exception
+from nova import flags
+from nova import utils
+
+FLAGS = flags.FLAGS
+
+
+flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+ 'Public IP for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_start_port', 1000,
+ 'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+ 'End port for the cloudpipe VPN servers')
+
+class NoMorePorts(exception.Error):
+ pass
+
+
+class NetworkData(datastore.BasicModel):
+ """Manages network host, and vpn ip and port for projects"""
+ def __init__(self, project_id):
+ self.project_id = project_id
+ super(NetworkData, self).__init__()
+
+ @property
+ def identifier(self):
+ """Identifier used for key in redis"""
+ return self.project_id
+
+ @classmethod
+ def create(cls, project_id):
+ """Creates a vpn for project
+
+ This method finds a free ip and port and stores the associated
+ values in the datastore.
+ """
+ # TODO(vish): will we ever need multiiple ips per host?
+ port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+ network_data = cls(project_id)
+ # save ip for project
+ network_data['host'] = FLAGS.node_name
+ network_data['project'] = project_id
+ network_data['ip'] = FLAGS.vpn_ip
+ network_data['port'] = port
+ network_data.save()
+ return network_data
+
+ @classmethod
+ def find_free_port_for_ip(cls, ip):
+ """Finds a free port for a given ip from the redis set"""
+ # TODO(vish): these redis commands should be generalized and
+ # placed into a base class. Conceptually, it is
+ # similar to an association, but we are just
+ # storing a set of values instead of keys that
+ # should be turned into objects.
+ redis = datastore.Redis.instance()
+ key = 'ip:%s:ports' % ip
+ # TODO(vish): these ports should be allocated through an admin
+ # command instead of a flag
+ if (not redis.exists(key) and
+ not redis.exists(cls._redis_association_name('ip', ip))):
+ for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+ redis.sadd(key, i)
+
+ port = redis.spop(key)
+ if not port:
+ raise NoMorePorts()
+ return port
+
+ @classmethod
+ def num_ports_for_ip(cls, ip):
+ """Calculates the number of free ports for a given ip"""
+ return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+ @property
+ def ip(self):
+ """The ip assigned to the project"""
+ return self['ip']
+
+ @property
+ def port(self):
+ """The port assigned to the project"""
+ return int(self['port'])
+
+ def save(self):
+ """Saves the association to the given ip"""
+ self.associate_with('ip', self.ip)
+ super(NetworkData, self).save()
+
+ def destroy(self):
+ """Cleans up datastore and adds port back to pool"""
+ self.unassociate_with('ip', self.ip)
+ datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+ super(NetworkData, self).destroy()
+
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index b4d7e6179..f625a2aa1 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -266,7 +266,8 @@ class ImagesResource(Resource):
""" returns a json listing of all images
that a user has permissions to see """
- images = [i for i in image.Image.all() if i.is_authorized(request.context)]
+ images = [i for i in image.Image.all() \
+ if i.is_authorized(request.context, readonly=True)]
request.write(json.dumps([i.metadata for i in images]))
request.finish()
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index bea2e9637..860298ba6 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -65,9 +65,13 @@ class Image(object):
except:
pass
- def is_authorized(self, context):
+ def is_authorized(self, context, readonly=False):
+ # NOTE(devcamcar): Public images can be read by anyone,
+ # but only modified by admin or owner.
try:
- return self.metadata['isPublic'] or context.user.is_admin() or self.metadata['imageOwnerId'] == context.project.id
+ return (self.metadata['isPublic'] and readonly) or \
+ context.user.is_admin() or \
+ self.metadata['imageOwnerId'] == context.project.id
except:
return False
diff --git a/nova/rpc.py b/nova/rpc.py
index ebf140d92..2a550c3ae 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -238,12 +238,12 @@ def send_message(topic, message, wait=True):
exchange=msg_id,
auto_delete=True,
exchange_type="direct",
- routing_key=msg_id,
- durable=False)
+ routing_key=msg_id)
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange,
+ durable=False,
exchange_type="topic",
routing_key=topic)
publisher.send(message)
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 2167c2385..f7e0625a3 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -135,10 +135,18 @@ class AuthTestCase(test.BaseTestCase):
self.manager.add_to_project('test2', 'testproj')
self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
- def test_208_can_remove_user_from_project(self):
+ def test_207_can_remove_user_from_project(self):
self.manager.remove_from_project('test2', 'testproj')
self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
+ def test_208_can_remove_add_user_with_role(self):
+ self.manager.add_to_project('test2', 'testproj')
+ self.manager.add_role('test2', 'developer', 'testproj')
+ self.manager.remove_from_project('test2', 'testproj')
+ self.assertFalse(self.manager.has_role('test2', 'developer', 'testproj'))
+ self.manager.add_to_project('test2', 'testproj')
+ self.manager.remove_from_project('test2', 'testproj')
+
def test_209_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
@@ -179,20 +187,6 @@ class AuthTestCase(test.BaseTestCase):
self.manager.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
- def test_212_vpn_ip_and_port_looks_valid(self):
- project = self.manager.get_project('testproj')
- self.assert_(project.vpn_ip)
- self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
- self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
-
- def test_213_too_many_vpns(self):
- vpns = []
- for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
- vpns.append(manager.Vpn.create("vpnuser%s" % i))
- self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
- for vpn in vpns:
- vpn.destroy()
-
def test_214_can_retrieve_project_by_user(self):
project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2'])
self.assert_(len(self.manager.get_projects()) > 1)
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
index 6825cfe2a..dc2441c24 100644
--- a/nova/tests/model_unittest.py
+++ b/nova/tests/model_unittest.py
@@ -19,9 +19,7 @@
from datetime import datetime, timedelta
import logging
import time
-from twisted.internet import defer
-from nova import exception
from nova import flags
from nova import test
from nova import utils
@@ -49,9 +47,9 @@ class ModelTestCase(test.TrialTestCase):
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type'] = 'm1.tiny'
- inst['node_name'] = FLAGS.node_name
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
+ inst['private_dns_name'] = '10.0.0.1'
inst.save()
return inst
@@ -71,118 +69,126 @@ class ModelTestCase(test.TrialTestCase):
session_token.save()
return session_token
- @defer.inlineCallbacks
def test_create_instance(self):
"""store with create_instace, then test that a load finds it"""
- instance = yield self.create_instance()
- old = yield model.Instance(instance.identifier)
+ instance = self.create_instance()
+ old = model.Instance(instance.identifier)
self.assertFalse(old.is_new_record())
- @defer.inlineCallbacks
def test_delete_instance(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_instance()
- yield instance.destroy()
- newinst = yield model.Instance('i-test')
+ instance = self.create_instance()
+ instance.destroy()
+ newinst = model.Instance('i-test')
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_instance_added_to_set(self):
- """create, then check that it is listed for the project"""
- instance = yield self.create_instance()
+ """create, then check that it is listed in global set"""
+ instance = self.create_instance()
found = False
for x in model.InstanceDirectory().all:
if x.identifier == 'i-test':
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_instance_associates_project(self):
"""create, then check that it is listed for the project"""
- instance = yield self.create_instance()
+ instance = self.create_instance()
found = False
for x in model.InstanceDirectory().by_project(instance.project):
if x.identifier == 'i-test':
found = True
self.assert_(found)
- @defer.inlineCallbacks
+ def test_instance_associates_ip(self):
+ """create, then check that it is listed for the ip"""
+ instance = self.create_instance()
+ found = False
+ x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
+ self.assertEqual(x.identifier, 'i-test')
+
+ def test_instance_associates_node(self):
+ """create, then check that it is listed for the node_name"""
+ instance = self.create_instance()
+ found = False
+ for x in model.InstanceDirectory().by_node(FLAGS.node_name):
+ if x.identifier == 'i-test':
+ found = True
+ self.assertFalse(found)
+ instance['node_name'] = 'test_node'
+ instance.save()
+ for x in model.InstanceDirectory().by_node('test_node'):
+ if x.identifier == 'i-test':
+ found = True
+ self.assert_(found)
+
+
def test_host_class_finds_hosts(self):
- host = yield self.create_host()
+ host = self.create_host()
self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
- @defer.inlineCallbacks
def test_host_class_doesnt_find_missing_hosts(self):
- rv = yield model.Host.lookup('woahnelly')
+ rv = model.Host.lookup('woahnelly')
self.assertEqual(None, rv)
- @defer.inlineCallbacks
def test_create_host(self):
"""store with create_host, then test that a load finds it"""
- host = yield self.create_host()
- old = yield model.Host(host.identifier)
+ host = self.create_host()
+ old = model.Host(host.identifier)
self.assertFalse(old.is_new_record())
- @defer.inlineCallbacks
def test_delete_host(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_host()
- yield instance.destroy()
- newinst = yield model.Host('testhost')
+ instance = self.create_host()
+ instance.destroy()
+ newinst = model.Host('testhost')
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_host_added_to_set(self):
"""create, then check that it is included in list"""
- instance = yield self.create_host()
+ instance = self.create_host()
found = False
for x in model.Host.all():
if x.identifier == 'testhost':
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_create_daemon_two_args(self):
"""create a daemon with two arguments"""
- d = yield self.create_daemon()
+ d = self.create_daemon()
d = model.Daemon('testhost', 'nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_create_daemon_single_arg(self):
"""Create a daemon using the combined host:bin format"""
- d = yield model.Daemon("testhost:nova-testdaemon")
+ d = model.Daemon("testhost:nova-testdaemon")
d.save()
d = model.Daemon('testhost:nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_equality_of_daemon_single_and_double_args(self):
"""Create a daemon using the combined host:bin arg, find with 2"""
- d = yield model.Daemon("testhost:nova-testdaemon")
+ d = model.Daemon("testhost:nova-testdaemon")
d.save()
d = model.Daemon('testhost', 'nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_equality_daemon_of_double_and_single_args(self):
"""Create a daemon using the combined host:bin arg, find with 2"""
- d = yield self.create_daemon()
+ d = self.create_daemon()
d = model.Daemon('testhost:nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_delete_daemon(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_daemon()
- yield instance.destroy()
- newinst = yield model.Daemon('testhost', 'nova-testdaemon')
+ instance = self.create_daemon()
+ instance.destroy()
+ newinst = model.Daemon('testhost', 'nova-testdaemon')
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_daemon_heartbeat(self):
"""Create a daemon, sleep, heartbeat, check for update"""
- d = yield self.create_daemon()
+ d = self.create_daemon()
ts = d['updated_at']
time.sleep(2)
d.heartbeat()
@@ -190,70 +196,62 @@ class ModelTestCase(test.TrialTestCase):
ts2 = d2['updated_at']
self.assert_(ts2 > ts)
- @defer.inlineCallbacks
def test_daemon_added_to_set(self):
"""create, then check that it is included in list"""
- instance = yield self.create_daemon()
+ instance = self.create_daemon()
found = False
for x in model.Daemon.all():
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_daemon_associates_host(self):
"""create, then check that it is listed for the host"""
- instance = yield self.create_daemon()
+ instance = self.create_daemon()
found = False
for x in model.Daemon.by_host('testhost'):
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assertTrue(found)
- @defer.inlineCallbacks
def test_create_session_token(self):
"""create"""
- d = yield self.create_session_token()
+ d = self.create_session_token()
d = model.SessionToken(d.token)
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_delete_session_token(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_session_token()
- yield instance.destroy()
- newinst = yield model.SessionToken(instance.token)
+ instance = self.create_session_token()
+ instance.destroy()
+ newinst = model.SessionToken(instance.token)
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_session_token_added_to_set(self):
"""create, then check that it is included in list"""
- instance = yield self.create_session_token()
+ instance = self.create_session_token()
found = False
for x in model.SessionToken.all():
if x.identifier == instance.token:
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_session_token_associates_user(self):
"""create, then check that it is listed for the user"""
- instance = yield self.create_session_token()
+ instance = self.create_session_token()
found = False
for x in model.SessionToken.associated_to('user', 'testuser'):
if x.identifier == instance.identifier:
found = True
self.assertTrue(found)
- @defer.inlineCallbacks
def test_session_token_generation(self):
- instance = yield model.SessionToken.generate('username', 'TokenType')
+ instance = model.SessionToken.generate('username', 'TokenType')
self.assertFalse(instance.is_new_record())
- @defer.inlineCallbacks
def test_find_generated_session_token(self):
- instance = yield model.SessionToken.generate('username', 'TokenType')
- found = yield model.SessionToken.lookup(instance.identifier)
+ instance = model.SessionToken.generate('username', 'TokenType')
+ found = model.SessionToken.lookup(instance.identifier)
self.assert_(found)
def test_update_session_token_expiry(self):
@@ -264,34 +262,29 @@ class ModelTestCase(test.TrialTestCase):
expiry = utils.parse_isotime(instance['expiry'])
self.assert_(expiry > datetime.utcnow())
- @defer.inlineCallbacks
def test_session_token_lookup_when_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
instance.save()
inst = model.SessionToken.lookup(instance.identifier)
self.assertFalse(inst)
- @defer.inlineCallbacks
def test_session_token_lookup_when_not_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
inst = model.SessionToken.lookup(instance.identifier)
self.assert_(inst)
- @defer.inlineCallbacks
def test_session_token_is_expired_when_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
self.assert_(instance.is_expired())
- @defer.inlineCallbacks
def test_session_token_is_expired_when_not_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
self.assertFalse(instance.is_expired())
- @defer.inlineCallbacks
def test_session_token_ttl(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
now = datetime.utcnow()
delta = timedelta(hours=1)
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index f24eefb0d..879ee02a4 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -24,8 +24,10 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.compute import network
-from nova.compute.exception import NoMoreAddresses
+from nova.network import model
+from nova.network import service
+from nova.network import vpn
+from nova.network.exception import NoMoreAddresses
FLAGS = flags.FLAGS
@@ -52,7 +54,8 @@ class NetworkTestCase(test.TrialTestCase):
self.projects.append(self.manager.create_project(name,
'netuser',
name))
- self.network = network.PublicNetworkController()
+ self.network = model.PublicNetworkController()
+ self.service = service.VlanNetworkService()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
@@ -66,16 +69,17 @@ class NetworkTestCase(test.TrialTestCase):
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
- def test_allocate_deallocate_ip(self):
- address = network.allocate_ip(
- self.user.id, self.projects[0].id, utils.generate_mac())
+ def test_allocate_deallocate_fixed_ip(self):
+ result = yield self.service.allocate_fixed_ip(
+ self.user.id, self.projects[0].id)
+ address = result['private_dns_name']
+ mac = result['mac_address']
logging.debug("Was allocated %s" % (address))
- net = network.get_project_network(self.projects[0].id, "default")
+ net = model.get_project_network(self.projects[0].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
- mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
- rv = network.deallocate_ip(address)
+ rv = self.service.deallocate_fixed_ip(address)
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, self.projects[0].id))
@@ -84,15 +88,18 @@ class NetworkTestCase(test.TrialTestCase):
self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
- mac = utils.generate_mac()
- secondmac = utils.generate_mac()
hostname = "test-host"
- address = network.allocate_ip(
- self.user.id, self.projects[0].id, mac)
- secondaddress = network.allocate_ip(
- self.user, self.projects[1].id, secondmac)
- net = network.get_project_network(self.projects[0].id, "default")
- secondnet = network.get_project_network(self.projects[1].id, "default")
+ result = yield self.service.allocate_fixed_ip(
+ self.user.id, self.projects[0].id)
+ mac = result['mac_address']
+ address = result['private_dns_name']
+ result = yield self.service.allocate_fixed_ip(
+ self.user, self.projects[1].id)
+ secondmac = result['mac_address']
+ secondaddress = result['private_dns_name']
+
+ net = model.get_project_network(self.projects[0].id, "default")
+ secondnet = model.get_project_network(self.projects[1].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
@@ -103,46 +110,64 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.issue_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
- rv = network.deallocate_ip(address)
+ rv = self.service.deallocate_fixed_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
- rv = network.deallocate_ip(secondaddress)
+ rv = self.service.deallocate_fixed_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
def test_subnet_edge(self):
- secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
- utils.generate_mac())
+ result = yield self.service.allocate_fixed_ip(self.user.id,
+ self.projects[0].id)
+ firstaddress = result['private_dns_name']
hostname = "toomany-hosts"
for i in range(1,5):
project_id = self.projects[i].id
- mac = utils.generate_mac()
- mac2 = utils.generate_mac()
- mac3 = utils.generate_mac()
- address = network.allocate_ip(
- self.user, project_id, mac)
- address2 = network.allocate_ip(
- self.user, project_id, mac2)
- address3 = network.allocate_ip(
- self.user, project_id, mac3)
+ result = yield self.service.allocate_fixed_ip(
+ self.user, project_id)
+ mac = result['mac_address']
+ address = result['private_dns_name']
+ result = yield self.service.allocate_fixed_ip(
+ self.user, project_id)
+ mac2 = result['mac_address']
+ address2 = result['private_dns_name']
+ result = yield self.service.allocate_fixed_ip(
+ self.user, project_id)
+ mac3 = result['mac_address']
+ address3 = result['private_dns_name']
self.assertEqual(False, is_in_project(address, self.projects[0].id))
self.assertEqual(False, is_in_project(address2, self.projects[0].id))
self.assertEqual(False, is_in_project(address3, self.projects[0].id))
- rv = network.deallocate_ip(address)
- rv = network.deallocate_ip(address2)
- rv = network.deallocate_ip(address3)
- net = network.get_project_network(project_id, "default")
+ rv = self.service.deallocate_fixed_ip(address)
+ rv = self.service.deallocate_fixed_ip(address2)
+ rv = self.service.deallocate_fixed_ip(address3)
+ net = model.get_project_network(project_id, "default")
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
- net = network.get_project_network(self.projects[0].id, "default")
- rv = network.deallocate_ip(secondaddress)
- self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
+ net = model.get_project_network(self.projects[0].id, "default")
+ rv = self.service.deallocate_fixed_ip(firstaddress)
+ self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
+
+ def test_212_vpn_ip_and_port_looks_valid(self):
+ vpn.NetworkData.create(self.projects[0].id)
+ self.assert_(self.projects[0].vpn_ip)
+ self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
+ self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
+
+ def test_too_many_vpns(self):
+ vpns = []
+ for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
+ vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
+ self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
+ for network_datum in vpns:
+ network_datum.destroy()
def test_release_before_deallocate(self):
pass
@@ -169,7 +194,7 @@ class NetworkTestCase(test.TrialTestCase):
NUM_RESERVED_VPN_IPS)
usable addresses
"""
- net = network.get_project_network(self.projects[0].id, "default")
+ net = model.get_project_network(self.projects[0].id, "default")
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
@@ -183,22 +208,23 @@ class NetworkTestCase(test.TrialTestCase):
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
- macs[i] = utils.generate_mac()
- addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
+ result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
+ macs[i] = result['mac_address']
+ addresses[i] = result['private_dns_name']
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
- self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
+ self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses)
for i in range(0, (num_available_ips - 1)):
- rv = network.deallocate_ip(addresses[i])
+ rv = self.service.deallocate_fixed_ip(addresses[i])
self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
def is_in_project(address, project_id):
- return address in network.get_project_network(project_id).list_addresses()
+ return address in model.get_project_network(project_id).list_addresses()
def _get_project_addresses(project_id):
project_addresses = []
- for addr in network.get_project_network(project_id).list_addresses():
+ for addr in model.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 872eb6d6a..48a87b514 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -65,7 +65,7 @@ def _fetch_s3_image(image, path, user, project):
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
-def _fetch_local_image(image, path, _):
+def _fetch_local_image(image, path, user, project):
source = _image_path('%s/image' % image)
return process.simple_execute('cp %s %s' % (source, path))
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index fe4967fbe..551ba6e54 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -46,6 +46,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('compute/libvirt.xml.template'),
'Libvirt XML Template')
+flags.DEFINE_string('injected_network_template',
+ utils.abspath('compute/interfaces.template'),
+ 'Template file for injected network')
flags.DEFINE_string('libvirt_type',
'kvm',
@@ -205,14 +208,14 @@ class LibvirtConnection(object):
key = data['key_data']
net = None
- if FLAGS.simple_network:
- with open(FLAGS.simple_network_template) as f:
+ if data.get('inject_network', False):
+ with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': data['private_dns_name'],
- 'network': FLAGS.simple_network_network,
- 'netmask': FLAGS.simple_network_netmask,
- 'gateway': FLAGS.simple_network_gateway,
- 'broadcast': FLAGS.simple_network_broadcast,
- 'dns': FLAGS.simple_network_dns}
+ 'network': data['network_network'],
+ 'netmask': data['network_netmask'],
+ 'gateway': data['network_gateway'],
+ 'broadcast': data['network_broadcast'],
+ 'dns': data['network_dns']}
if key or net:
logging.info('Injecting data into image %s', data['image_id'])
yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)