summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEwan Mellor <ewan.mellor@citrix.com>2010-08-11 14:23:59 +0100
committerEwan Mellor <ewan.mellor@citrix.com>2010-08-11 14:23:59 +0100
commit57beed337d3c267b69eaf73f166fd00ea29f2498 (patch)
tree24065950eb39fd4f7517d050f454faff3dd85544
parent035f93aa7dc19656bf22de9b7ccfe12b28cde61b (diff)
parent57f834ba7881c116adedfa3b3f1065bf0f0c072e (diff)
downloadnova-57beed337d3c267b69eaf73f166fd00ea29f2498.tar.gz
nova-57beed337d3c267b69eaf73f166fd00ea29f2498.tar.xz
nova-57beed337d3c267b69eaf73f166fd00ea29f2498.zip
Merged with trunk.
-rwxr-xr-xbin/nova-api6
-rwxr-xr-xbin/nova-dhcpbridge56
-rwxr-xr-xbin/nova-import-canonical-imagestore33
-rwxr-xr-xbin/nova-instancemonitor18
-rwxr-xr-xbin/nova-manage153
-rwxr-xr-xbin/nova-network6
-rwxr-xr-xbin/nova-objectstore8
-rwxr-xr-xbin/nova-rsapi32
-rw-r--r--nova/auth/fakeldap.py11
-rw-r--r--nova/auth/ldapdriver.py60
-rw-r--r--nova/auth/manager.py181
-rw-r--r--nova/auth/signer.py3
-rw-r--r--nova/compute/libvirt.xml.template3
-rw-r--r--nova/compute/model.py45
-rw-r--r--nova/compute/service.py19
-rw-r--r--nova/datastore.py12
-rw-r--r--nova/endpoint/cloud.py290
-rw-r--r--nova/endpoint/images.py18
-rw-r--r--nova/endpoint/rackspace.py243
-rw-r--r--nova/flags.py141
-rw-r--r--nova/network/exception.py (renamed from nova/compute/exception.py)2
-rw-r--r--nova/network/linux_net.py (renamed from nova/compute/linux_net.py)0
-rw-r--r--nova/network/model.py (renamed from nova/compute/network.py)163
-rw-r--r--nova/network/service.py209
-rw-r--r--nova/network/vpn.py116
-rw-r--r--nova/objectstore/handler.py3
-rw-r--r--nova/objectstore/image.py8
-rw-r--r--nova/rpc.py4
-rw-r--r--nova/server.py20
-rw-r--r--nova/test.py98
-rw-r--r--nova/tests/auth_unittest.py40
-rw-r--r--nova/tests/declare_flags.py (renamed from nova/endpoint/wsgi.py)23
-rw-r--r--nova/tests/flags_unittest.py87
-rw-r--r--nova/tests/model_unittest.py135
-rw-r--r--nova/tests/network_unittest.py114
-rw-r--r--nova/tests/objectstore_unittest.py237
-rw-r--r--nova/tests/runtime_flags.py (renamed from exercise_rsapi.py)34
-rw-r--r--nova/tests/volume_unittest.py91
-rw-r--r--nova/utils.py2
-rw-r--r--nova/virt/images.py16
-rw-r--r--nova/virt/libvirt_conn.py41
-rw-r--r--nova/volume/service.py154
-rw-r--r--nova/wsgi.py173
-rw-r--r--pylintrc19
-rw-r--r--run_tests.py4
-rwxr-xr-xrun_tests.sh9
-rw-r--r--tools/install_venv.py62
-rw-r--r--tools/pip-requires3
48 files changed, 1991 insertions, 1214 deletions
diff --git a/bin/nova-api b/bin/nova-api
index 1f2009c30..13baf22a7 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -29,8 +29,6 @@ from nova import flags
from nova import rpc
from nova import server
from nova import utils
-from nova.auth import manager
-from nova.compute import model
from nova.endpoint import admin
from nova.endpoint import api
from nova.endpoint import cloud
@@ -39,10 +37,10 @@ FLAGS = flags.FLAGS
def main(_argv):
+ """Load the controllers and start the tornado I/O loop."""
controllers = {
'Cloud': cloud.CloudController(),
- 'Admin': admin.AdminController()
- }
+ 'Admin': admin.AdminController()}
_app = api.APIServerApplication(controllers)
conn = rpc.Connection.instance()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 0db241b5e..ed1af206a 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -18,8 +18,6 @@
# under the License.
"""
-nova-dhcpbridge
-
Handle lease database updates from DHCP servers.
"""
@@ -35,39 +33,50 @@ sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
from nova import flags
from nova import rpc
from nova import utils
-from nova.compute import linux_net
-from nova.compute import network
-
+from nova.network import linux_net
+from nova.network import model
+from nova.network import service
FLAGS = flags.FLAGS
-def add_lease(mac, ip, hostname, interface):
+def add_lease(_mac, ip, _hostname, _interface):
+ """Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
- network.lease_ip(ip)
+ service.VlanNetworkService().lease_ip(ip)
else:
- rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
- "args" : {"address": ip}})
+ rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
+ {"method": "lease_ip",
+ "args": {"fixed_ip": ip}})
+
-def old_lease(mac, ip, hostname, interface):
+def old_lease(_mac, _ip, _hostname, _interface):
+ """Do nothing, just an old lease update."""
logging.debug("Adopted old lease or got a change of mac/hostname")
-def del_lease(mac, ip, hostname, interface):
+
+def del_lease(_mac, ip, _hostname, _interface):
+ """Remove the leased IP from the databases."""
if FLAGS.fake_rabbit:
- network.release_ip(ip)
+ service.VlanNetworkService().release_ip(ip)
else:
- rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
- "args" : {"address": ip}})
+ rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
+ {"method": "release_ip",
+ "args": {"fixed_ip": ip}})
+
def init_leases(interface):
- net = network.get_network_by_interface(interface)
+ """Get the list of hosts for an interface."""
+ net = model.get_network_by_interface(interface)
res = ""
for host_name in net.hosts:
- res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
+ res += "%s\n" % linux_net.hostDHCP(net, host_name,
+ net.hosts[host_name])
return res
def main():
+ """Parse environment and arguments and call the approproate action."""
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
@@ -77,18 +86,19 @@ def main():
FLAGS.redis_db = 8
FLAGS.network_size = 32
FLAGS.connection_type = 'fake'
- FLAGS.fake_network=True
- FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver'
+ FLAGS.fake_network = True
+ FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
action = argv[1]
- if action in ['add','del','old']:
+ if action in ['add', 'del', 'old']:
mac = argv[2]
ip = argv[3]
hostname = argv[4]
- logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface))
- globals()[action+'_lease'](mac, ip, hostname, interface)
+ logging.debug("Called %s for mac %s with ip %s and "
+ "hostname %s on interface %s",
+ action, mac, ip, hostname, interface)
+ globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
- exit(0)
if __name__ == "__main__":
- sys.exit(main())
+ main()
diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore
index 2e79f09b7..5165109b2 100755
--- a/bin/nova-import-canonical-imagestore
+++ b/bin/nova-import-canonical-imagestore
@@ -37,20 +37,17 @@ FLAGS = flags.FLAGS
api_url = 'https://imagestore.canonical.com/api/dashboard'
-image_cache = None
-def images():
- global image_cache
- if not image_cache:
- try:
- images = json.load(urllib2.urlopen(api_url))['images']
- image_cache = [i for i in images if i['title'].find('amd64') > -1]
- except Exception:
- print 'unable to download canonical image list'
- sys.exit(1)
- return image_cache
-
-# FIXME(ja): add checksum/signature checks
+
+def get_images():
+ """Get a list of the images from the imagestore URL."""
+ images = json.load(urllib2.urlopen(api_url))['images']
+ images = [img for img in images if img['title'].find('amd64') > -1]
+ return images
+
+
def download(img):
+ """Download an image to the local filesystem."""
+ # FIXME(ja): add checksum/signature checks
tempdir = tempfile.mkdtemp(prefix='cis-')
kernel_id = None
@@ -79,20 +76,22 @@ def download(img):
shutil.rmtree(tempdir)
+
def main():
+ """Main entry point."""
utils.default_flagfile()
argv = FLAGS(sys.argv)
+ images = get_images()
if len(argv) == 2:
- for img in images():
+ for img in images:
if argv[1] == 'all' or argv[1] == img['title']:
download(img)
else:
print 'usage: %s (title|all)'
print 'available images:'
- for image in images():
- print image['title']
+ for img in images:
+ print img['title']
if __name__ == '__main__':
main()
-
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index b195089b7..911fb6f42 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -22,7 +22,6 @@
"""
import logging
-from twisted.internet import task
from twisted.application import service
from nova import twistd
@@ -30,7 +29,11 @@ from nova.compute import monitor
logging.getLogger('boto').setLevel(logging.WARN)
-def main():
+
+if __name__ == '__main__':
+ twistd.serve(__file__)
+
+if __name__ == '__builtin__':
logging.warn('Starting instance monitor')
m = monitor.InstanceMonitor()
@@ -38,14 +41,3 @@ def main():
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-instancemonitor')
m.setServiceParent(application)
- return application
-
-if __name__ == '__main__':
- twistd.serve(__file__)
-
-if __name__ == '__builtin__':
- application = main()
-
-
-
-
diff --git a/bin/nova-manage b/bin/nova-manage
index b0f0029ed..6af092922 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -29,24 +29,23 @@ from nova import flags
from nova import utils
from nova.auth import manager
from nova.compute import model
-from nova.compute import network
from nova.cloudpipe import pipelib
from nova.endpoint import cloud
FLAGS = flags.FLAGS
-class NetworkCommands(object):
- def restart(self):
- network.restart_nets()
class VpnCommands(object):
+ """Class for managing VPNs."""
+
def __init__(self):
self.manager = manager.AuthManager()
self.instdir = model.InstanceDirectory()
self.pipe = pipelib.CloudPipe(cloud.CloudController())
def list(self):
+ """Print a listing of the VPNs for all projects."""
print "%-12s\t" % 'project',
print "%-12s\t" % 'ip:port',
print "%s" % 'state'
@@ -54,9 +53,10 @@ class VpnCommands(object):
print "%-12s\t" % project.name,
print "%s:%s\t" % (project.vpn_ip, project.vpn_port),
- vpn = self.__vpn_for(project.id)
+ vpn = self._vpn_for(project.id)
if vpn:
- out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'])
+ command = "ping -c1 -w1 %s > /dev/null; echo $?"
+ out, _err = utils.execute(command % vpn['private_dns_name'])
if out.strip() == '0':
net = 'up'
else:
@@ -70,25 +70,32 @@ class VpnCommands(object):
else:
print None
- def __vpn_for(self, project_id):
+ def _vpn_for(self, project_id):
+ """Get the VPN instance for a project ID."""
for instance in self.instdir.all:
- if (instance.state.has_key('image_id')
+ if ('image_id' in instance.state
and instance['image_id'] == FLAGS.vpn_image_id
- and not instance['state_description'] in ['shutting_down', 'shutdown']
+ and not instance['state_description'] in
+ ['shutting_down', 'shutdown']
and instance['project_id'] == project_id):
return instance
def spawn(self):
+ """Run all VPNs."""
for p in reversed(self.manager.get_projects()):
- if not self.__vpn_for(p.id):
- print 'spawning %s' % p.id
- self.pipe.launch_vpn_instance(p.id)
- time.sleep(10)
+ if not self._vpn_for(p.id):
+ print 'spawning %s' % p.id
+ self.pipe.launch_vpn_instance(p.id)
+ time.sleep(10)
def run(self, project_id):
+ """Start the VPN for a given project."""
self.pipe.launch_vpn_instance(project_id)
+
class RoleCommands(object):
+ """Class for managing roles."""
+
def __init__(self):
self.manager = manager.AuthManager()
@@ -111,25 +118,24 @@ class RoleCommands(object):
arguments: user, role [project]"""
self.manager.remove_role(user, role, project)
+
class UserCommands(object):
+ """Class for managing users."""
+
def __init__(self):
self.manager = manager.AuthManager()
- def __print_export(self, user):
- print 'export EC2_ACCESS_KEY=%s' % user.access
- print 'export EC2_SECRET_KEY=%s' % user.secret
-
def admin(self, name, access=None, secret=None):
"""creates a new admin and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, True)
- self.__print_export(user)
+ print_export(user)
def create(self, name, access=None, secret=None):
"""creates a new user and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, False)
- self.__print_export(user)
+ print_export(user)
def delete(self, name):
"""deletes an existing user
@@ -141,7 +147,7 @@ class UserCommands(object):
arguments: name"""
user = self.manager.get_user(name)
if user:
- self.__print_export(user)
+ print_export(user)
else:
print "User %s doesn't exist" % name
@@ -151,53 +157,61 @@ class UserCommands(object):
for user in self.manager.get_users():
print user.name
+
+def print_export(user):
+ """Print export variables to use with API."""
+ print 'export EC2_ACCESS_KEY=%s' % user.access
+ print 'export EC2_SECRET_KEY=%s' % user.secret
+
+
class ProjectCommands(object):
+ """Class for managing projects."""
+
def __init__(self):
self.manager = manager.AuthManager()
def add(self, project, user):
- """adds user to project
+ """Adds user to project
arguments: project user"""
self.manager.add_to_project(user, project)
def create(self, name, project_manager, description=None):
- """creates a new project
+ """Creates a new project
arguments: name project_manager [description]"""
- user = self.manager.create_project(name, project_manager, description)
+ self.manager.create_project(name, project_manager, description)
def delete(self, name):
- """deletes an existing project
+ """Deletes an existing project
arguments: name"""
self.manager.delete_project(name)
+ def environment(self, project_id, user_id, filename='novarc'):
+ """Exports environment variables to an sourcable file
+ arguments: project_id user_id [filename='novarc]"""
+ rc = self.manager.get_environment_rc(project_id, user_id)
+ with open(filename, 'w') as f:
+ f.write(rc)
+
def list(self):
- """lists all projects
+ """Lists all projects
arguments: <none>"""
for project in self.manager.get_projects():
print project.name
def remove(self, project, user):
- """removes user from project
+ """Removes user from project
arguments: project user"""
self.manager.remove_from_project(user, project)
- def zip(self, project_id, user_id, filename='nova.zip'):
- """exports credentials for user to a zip file
+ def zipfile(self, project_id, user_id, filename='nova.zip'):
+ """Exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]"""
- project = self.manager.get_project(project_id)
- if project:
- with open(filename, 'w') as f:
- f.write(project.get_credentials(user_id))
- else:
- print "Project %s doesn't exist" % project
-
-
-def usage(script_name):
- print script_name + " category action [<args>]"
+ zip_file = self.manager.get_credentials(project_id, user_id)
+ with open(filename, 'w') as f:
+ f.write(zip_file)
categories = [
- ('network', NetworkCommands),
('user', UserCommands),
('project', ProjectCommands),
('role', RoleCommands),
@@ -206,62 +220,61 @@ categories = [
def lazy_match(name, key_value_tuples):
- """finds all objects that have a key that case insensitively contains [name]
- key_value_tuples is a list of tuples of the form (key, value)
+ """Finds all objects that have a key that case insensitively contains
+ [name] key_value_tuples is a list of tuples of the form (key, value)
returns a list of tuples of the form (key, value)"""
- return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0]
+ result = []
+ for (k, v) in key_value_tuples:
+ if k.lower().find(name.lower()) == 0:
+ result.append((k, v))
+ if len(result) == 0:
+ print "%s does not match any options:" % name
+ for k, _v in key_value_tuples:
+ print "\t%s" % k
+ sys.exit(2)
+ if len(result) > 1:
+ print "%s matched multiple options:" % name
+ for k, _v in result:
+ print "\t%s" % k
+ sys.exit(2)
+ return result
def methods_of(obj):
- """get all callable methods of an object that don't start with underscore
+ """Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)"""
- return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')]
+ result = []
+ for i in dir(obj):
+ if callable(getattr(obj, i)) and not i.startswith('_'):
+ result.append((i, getattr(obj, i)))
+ return result
-if __name__ == '__main__':
+def main():
+ """Parse options and call the appropriate class/method."""
utils.default_flagfile('/etc/nova/nova-manage.conf')
argv = FLAGS(sys.argv)
script_name = argv.pop(0)
if len(argv) < 1:
- usage(script_name)
+ print script_name + " category action [<args>]"
print "Available categories:"
- for k, v in categories:
+ for k, _ in categories:
print "\t%s" % k
sys.exit(2)
category = argv.pop(0)
matches = lazy_match(category, categories)
- if len(matches) == 0:
- print "%s does not match any categories:" % category
- for k, v in categories:
- print "\t%s" % k
- sys.exit(2)
- if len(matches) > 1:
- print "%s matched multiple categories:" % category
- for k, v in matches:
- print "\t%s" % k
- sys.exit(2)
# instantiate the command group object
category, fn = matches[0]
command_object = fn()
actions = methods_of(command_object)
if len(argv) < 1:
- usage(script_name)
+ print script_name + " category action [<args>]"
print "Available actions for %s category:" % category
- for k, v in actions:
+ for k, _v in actions:
print "\t%s" % k
sys.exit(2)
action = argv.pop(0)
matches = lazy_match(action, actions)
- if len(matches) == 0:
- print "%s does not match any actions" % action
- for k, v in actions:
- print "\t%s" % k
- sys.exit(2)
- if len(matches) > 1:
- print "%s matched multiple actions:" % action
- for k, v in matches:
- print "\t%s" % k
- sys.exit(2)
action, fn = matches[0]
# call the action with the remaining arguments
try:
@@ -272,3 +285,5 @@ if __name__ == '__main__':
print "%s %s: %s" % (category, action, fn.__doc__)
sys.exit(2)
+if __name__ == '__main__':
+ main()
diff --git a/bin/nova-network b/bin/nova-network
index 52d6cb70a..ba9063f56 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -21,12 +21,16 @@
Twistd daemon for the nova network nodes.
"""
+from nova import flags
from nova import twistd
+
from nova.network import service
+FLAGS = flags.FLAGS
+
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.NetworkService.create()
+ application = service.type_to_class(FLAGS.network_type).create()
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index c0fa815c0..02f2bcb48 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -30,15 +30,9 @@ from nova.objectstore import handler
FLAGS = flags.FLAGS
-def main():
- app = handler.get_application()
- print app
- return app
-
-# NOTE(soren): Stolen from nova-compute
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
utils.default_flagfile()
- application = main()
+ application = handler.get_application()
diff --git a/bin/nova-rsapi b/bin/nova-rsapi
index 306a1fc60..026880d5a 100755
--- a/bin/nova-rsapi
+++ b/bin/nova-rsapi
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -17,42 +18,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
- WSGI daemon for the main API endpoint.
+ Daemon for the Rackspace API endpoint.
"""
-import logging
-from tornado import ioloop
-from wsgiref import simple_server
-
from nova import flags
-from nova import rpc
-from nova import server
from nova import utils
-from nova.auth import manager
+from nova import wsgi
from nova.endpoint import rackspace
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
-def main(_argv):
- user_manager = manager.AuthManager()
- api_instance = rackspace.Api(user_manager)
- conn = rpc.Connection.instance()
- rpc_consumer = rpc.AdapterConsumer(connection=conn,
- topic=FLAGS.cloud_topic,
- proxy=api_instance)
-
-# TODO: fire rpc response listener (without attach to tornado)
-# io_inst = ioloop.IOLoop.instance()
-# _injected = consumer.attach_to_tornado(io_inst)
-
- http_server = simple_server.WSGIServer(('0.0.0.0', FLAGS.cc_port), simple_server.WSGIRequestHandler)
- http_server.set_app(api_instance.handler)
- logging.debug('Started HTTP server on port %i' % FLAGS.cc_port)
- while True:
- http_server.handle_request()
-# io_inst.start()
-
if __name__ == '__main__':
utils.default_flagfile()
- server.serve('nova-rsapi', main)
+ wsgi.run_server(rackspace.API(), FLAGS.cc_port)
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 4c32ed9d9..b420924af 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -28,6 +28,8 @@ import json
from nova import datastore
+SCOPE_BASE = 0
+SCOPE_ONELEVEL = 1 # not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
@@ -188,15 +190,18 @@ class FakeLDAP(object):
Args:
dn -- dn to search under
- scope -- only SCOPE_SUBTREE is supported
+ scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
- if scope != SCOPE_SUBTREE:
+ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
redis = datastore.Redis.instance()
- keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
+ if scope == SCOPE_BASE:
+ keys = ["%s%s" % (self.__redis_prefix, dn)]
+ else:
+ keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
objects = []
for key in keys:
# get the attributes from redis
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 055e8332b..453fa196c 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -182,7 +182,8 @@ class LdapDriver(object):
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created "
- "because user %s doesn't exist" % member_uid)
+ "because user %s doesn't exist"
+ % member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -236,6 +237,26 @@ class LdapDriver(object):
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
+ def get_user_roles(self, uid, project_id=None):
+ """Retrieve list of roles for user (or user and project)"""
+ if project_id is None:
+ # NOTE(vish): This is unneccesarily slow, but since we can't
+ # guarantee that the global roles are located
+ # together in the ldap tree, we're doing this version.
+ roles = []
+ for role in FLAGS.allowed_roles:
+ role_dn = self.__role_to_dn(role)
+ if self.__is_in_group(uid, role_dn):
+ roles.append(role)
+ return roles
+ else:
+ project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ roles = self.__find_objects(project_dn,
+ '(&(&(objectclass=groupOfNames)'
+ '(!(objectclass=novaProject)))'
+ '(member=%s))' % self.__uid_to_dn(uid))
+ return [role['cn'][0] for role in roles]
+
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
@@ -253,45 +274,49 @@ class LdapDriver(object):
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.ldap_user_subtree))
- def delete_project(self, name):
+ def delete_project(self, project_id):
"""Delete a project"""
- project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree)
+ project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
- def __user_exists(self, name):
+ def __user_exists(self, uid):
"""Check if user exists"""
- return self.get_user(name) != None
+ return self.get_user(uid) != None
def __key_pair_exists(self, uid, key_name):
"""Check if key pair exists"""
return self.get_user(uid) != None
return self.get_key_pair(uid, key_name) != None
- def __project_exists(self, name):
+ def __project_exists(self, project_id):
"""Check if project exists"""
- return self.get_project(name) != None
+ return self.get_project(project_id) != None
- def __find_object(self, dn, query = None):
+ def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
- objects = self.__find_objects(dn, query)
+ objects = self.__find_objects(dn, query, scope)
if len(objects) == 0:
return None
return objects[0]
- def __find_dns(self, dn, query=None):
+ def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
+ if scope is None: # one of the flags is 0!!
+ scope = self.ldap.SCOPE_SUBTREE
try:
- res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
return [dn for dn, attributes in res]
- def __find_objects(self, dn, query = None):
+ def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
+ if scope is None: # one of the flags is 0!!
+ scope = self.ldap.SCOPE_SUBTREE
try:
- res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the attributes
@@ -361,7 +386,8 @@ class LdapDriver(object):
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
- '(member=%s)' % self.__uid_to_dn(uid))
+ '(member=%s)' % self.__uid_to_dn(uid),
+ self.ldap.SCOPE_BASE)
return res != None
def __add_to_group(self, uid, group_dn):
@@ -391,7 +417,11 @@ class LdapDriver(object):
if not self.__is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" %
(uid,))
- self.__safe_remove_from_group(uid, group_dn)
+ # NOTE(vish): remove user from group and any sub_groups
+ sub_dns = self.__find_group_dns_with_member(
+ group_dn, uid)
+ for sub_dn in sub_dns:
+ self.__safe_remove_from_group(uid, sub_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 2da53a736..064fd78bc 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -24,22 +24,24 @@ import logging
import os
import shutil
import string
-import sys
import tempfile
import uuid
import zipfile
from nova import crypto
-from nova import datastore
from nova import exception
from nova import flags
-from nova import objectstore # for flags
from nova import utils
-from nova.auth import ldapdriver # for flags
from nova.auth import signer
+from nova.network import vpn
+
FLAGS = flags.FLAGS
+flags.DEFINE_list('allowed_roles',
+ ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
+ 'Allowed roles for project')
+
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
@@ -51,13 +53,14 @@ flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
-flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
+flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf',
+ 'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
@@ -65,19 +68,11 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
-flags.DEFINE_integer('vpn_start_port', 1000,
- 'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
- 'End port for the cloudpipe VPN servers')
-
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
-flags.DEFINE_string('vpn_ip', '127.0.0.1',
- 'Public IP for the cloudpipe VPN servers')
-
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
@@ -106,6 +101,7 @@ class AuthBase(object):
class User(AuthBase):
"""Object representing a user"""
def __init__(self, id, name, access, secret, admin):
+ AuthBase.__init__(self)
self.id = id
self.name = name
self.access = access
@@ -166,6 +162,7 @@ class KeyPair(AuthBase):
fingerprint is stored. The user's private key is not saved.
"""
def __init__(self, id, name, owner_id, public_key, fingerprint):
+ AuthBase.__init__(self)
self.id = id
self.name = name
self.owner_id = owner_id
@@ -183,6 +180,7 @@ class KeyPair(AuthBase):
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
def __init__(self, id, name, project_manager_id, description, member_ids):
+ AuthBase.__init__(self)
self.id = id
self.name = name
self.project_manager_id = project_manager_id
@@ -229,86 +227,6 @@ class Project(AuthBase):
self.member_ids)
-class NoMorePorts(exception.Error):
- pass
-
-
-class Vpn(datastore.BasicModel):
- """Manages vpn ips and ports for projects"""
- def __init__(self, project_id):
- self.project_id = project_id
- super(Vpn, self).__init__()
-
- @property
- def identifier(self):
- """Identifier used for key in redis"""
- return self.project_id
-
- @classmethod
- def create(cls, project_id):
- """Creates a vpn for project
-
- This method finds a free ip and port and stores the associated
- values in the datastore.
- """
- # TODO(vish): get list of vpn ips from redis
- port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
- vpn = cls(project_id)
- # save ip for project
- vpn['project'] = project_id
- vpn['ip'] = FLAGS.vpn_ip
- vpn['port'] = port
- vpn.save()
- return vpn
-
- @classmethod
- def find_free_port_for_ip(cls, ip):
- """Finds a free port for a given ip from the redis set"""
- # TODO(vish): these redis commands should be generalized and
- # placed into a base class. Conceptually, it is
- # similar to an association, but we are just
- # storing a set of values instead of keys that
- # should be turned into objects.
- redis = datastore.Redis.instance()
- key = 'ip:%s:ports' % ip
- # TODO(vish): these ports should be allocated through an admin
- # command instead of a flag
- if (not redis.exists(key) and
- not redis.exists(cls._redis_association_name('ip', ip))):
- for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
- redis.sadd(key, i)
-
- port = redis.spop(key)
- if not port:
- raise NoMorePorts()
- return port
-
- @classmethod
- def num_ports_for_ip(cls, ip):
- """Calculates the number of free ports for a given ip"""
- return datastore.Redis.instance().scard('ip:%s:ports' % ip)
-
- @property
- def ip(self):
- """The ip assigned to the project"""
- return self['ip']
-
- @property
- def port(self):
- """The port assigned to the project"""
- return int(self['port'])
-
- def save(self):
- """Saves the association to the given ip"""
- self.associate_with('ip', self.ip)
- super(Vpn, self).save()
-
- def destroy(self):
- """Cleans up datastore and adds port back to pool"""
- self.unassociate_with('ip', self.ip)
- datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
- super(Vpn, self).destroy()
-
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
@@ -321,12 +239,11 @@ class AuthManager(object):
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
- _instance=None
+ _instance = None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance:
- cls._instance = super(AuthManager, cls).__new__(
- cls, *args, **kwargs)
+ cls._instance = super(AuthManager, cls).__new__(cls)
return cls._instance
def __init__(self, driver=None, *args, **kwargs):
@@ -336,7 +253,7 @@ class AuthManager(object):
reset the driver if it is not set or a new driver is specified.
"""
if driver or not getattr(self, 'driver', None):
- self.driver = utils.import_class(driver or FLAGS.auth_driver)
+ self.driver = utils.import_class(driver or FLAGS.auth_driver)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
@@ -419,6 +336,12 @@ class AuthManager(object):
raise exception.NotAuthorized('Signature does not match')
return (user, project)
+ def get_access_key(self, user, project):
+ """Get an access key that includes user and project"""
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ return "%s:%s" % (user.access, Project.safe_id(project))
+
def is_superuser(self, user):
"""Checks for superuser status, allowing user to bypass rbac
@@ -513,6 +436,10 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to add local role.
"""
+ if role not in FLAGS.allowed_roles:
+ raise exception.NotFound("The %s role can not be found" % role)
+ if project is not None and role in FLAGS.global_roles:
+ raise exception.NotFound("The %s role is global only" % role)
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -536,6 +463,19 @@ class AuthManager(object):
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+ def get_roles(self, project_roles=True):
+ """Get list of allowed roles"""
+ if project_roles:
+ return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
+ else:
+ return FLAGS.allowed_roles
+
+ def get_user_roles(self, user, project=None):
+ """Get user global or per-project roles"""
+ with self.driver() as drv:
+ return drv.get_user_roles(User.safe_id(user),
+ Project.safe_id(project))
+
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
@@ -581,8 +521,6 @@ class AuthManager(object):
description,
member_users)
if project_dict:
- if FLAGS.use_vpn:
- Vpn.create(project_dict['id'])
return Project(**project_dict)
def add_to_project(self, user, project):
@@ -619,10 +557,10 @@ class AuthManager(object):
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
- vpn = Vpn.lookup(Project.safe_id(project))
- if not vpn:
- return None, None
- return (vpn.ip, vpn.port)
+ network_data = vpn.NetworkData.lookup(Project.safe_id(project))
+ if not network_data:
+ raise exception.NotFound('project network data has not been set')
+ return (network_data.ip, network_data.port)
def delete_project(self, project):
"""Deletes a project"""
@@ -753,25 +691,27 @@ class AuthManager(object):
rc = self.__generate_rc(user.access, user.secret, pid)
private_key, signed_cert = self._generate_x509_cert(user.id, pid)
- vpn = Vpn.lookup(pid)
- if not vpn:
- raise exception.Error("No vpn data allocated for project %s" %
- project.name)
- configfile = open(FLAGS.vpn_client_template,"r")
- s = string.Template(configfile.read())
- configfile.close()
- config = s.substitute(keyfile=FLAGS.credential_key_file,
- certfile=FLAGS.credential_cert_file,
- ip=vpn.ip,
- port=vpn.port)
-
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
- zippy.writestr("nebula-client.conf", config)
+
+ network_data = vpn.NetworkData.lookup(pid)
+ if network_data:
+ configfile = open(FLAGS.vpn_client_template,"r")
+ s = string.Template(configfile.read())
+ configfile.close()
+ config = s.substitute(keyfile=FLAGS.credential_key_file,
+ certfile=FLAGS.credential_cert_file,
+ ip=network_data.ip,
+ port=network_data.port)
+ zippy.writestr(FLAGS.credential_vpn_file, config)
+ else:
+ logging.warn("No vpn data for project %s" %
+ pid)
+
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
with open(zf, 'rb') as f:
@@ -780,6 +720,15 @@ class AuthManager(object):
shutil.rmtree(tmpdir)
return buffer
+ def get_environment_rc(self, user, project=None):
+ """Get credential zip for user in project"""
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ if project is None:
+ project = user.id
+ pid = Project.safe_id(project)
+ return self.__generate_rc(user.access, user.secret, pid)
+
def __generate_rc(self, access, secret, pid):
"""Generate rc file for user"""
rc = open(FLAGS.credentials_template).read()
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 3b9bc8f2c..634f22f0d 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -48,7 +48,8 @@ import hashlib
import hmac
import logging
import urllib
-import boto
+import boto # NOTE(vish): for new boto
+import boto.utils # NOTE(vish): for old boto
from nova.exception import Error
diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template
index a763e8a4d..307f9d03a 100644
--- a/nova/compute/libvirt.xml.template
+++ b/nova/compute/libvirt.xml.template
@@ -1,4 +1,4 @@
-<domain type='kvm'>
+<domain type='%(type)s'>
<name>%(name)s</name>
<os>
<type>hvm</type>
@@ -12,7 +12,6 @@
<memory>%(memory_kb)s</memory>
<vcpu>%(vcpus)s</vcpu>
<devices>
- <emulator>/usr/bin/kvm</emulator>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='vda' bus='virtio'/>
diff --git a/nova/compute/model.py b/nova/compute/model.py
index 212830d3c..266a93b9a 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -41,9 +41,6 @@ True
"""
import datetime
-import logging
-import time
-import redis
import uuid
from nova import datastore
@@ -72,19 +69,22 @@ class InstanceDirectory(object):
for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
yield Instance(instance_id)
- def by_node(self, node_id):
+ @datastore.absorb_connection_error
+ def by_node(self, node):
"""returns a list of instances for a node"""
+ for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node):
+ yield Instance(instance_id)
- for instance in self.all:
- if instance['node_name'] == node_id:
- yield instance
-
- def by_ip(self, ip_address):
+ def by_ip(self, ip):
"""returns an instance object that is using the IP"""
- for instance in self.all:
- if instance['private_dns_name'] == ip_address:
- return instance
- return None
+ # NOTE(vish): The ip association should be just a single value, but
+ # to maintain consistency it is using the standard
+ # association and the ugly method for retrieving
+ # the first item in the set below.
+ result = datastore.Redis.instance().smembers('ip:%s:instances' % ip)
+ if not result:
+ return None
+ return Instance(list(result)[0])
def by_volume(self, volume_id):
"""returns the instance a volume is attached to"""
@@ -122,7 +122,8 @@ class Instance(datastore.BasicModel):
'instance_id': self.instance_id,
'node_name': 'unassigned',
'project_id': 'unassigned',
- 'user_id': 'unassigned'}
+ 'user_id': 'unassigned',
+ 'private_dns_name': 'unassigned'}
@property
def identifier(self):
@@ -148,19 +149,23 @@ class Instance(datastore.BasicModel):
"""Call into superclass to save object, then save associations"""
# NOTE(todd): doesn't track migration between projects/nodes,
# it just adds the first one
- should_update_project = self.is_new_record()
- should_update_node = self.is_new_record()
+ is_new = self.is_new_record()
+ node_set = (self.state['node_name'] != 'unassigned' and
+ self.initial_state.get('node_name', 'unassigned')
+ == 'unassigned')
success = super(Instance, self).save()
- if success and should_update_project:
+ if success and is_new:
self.associate_with("project", self.project)
- if success and should_update_node:
- self.associate_with("node", self['node_name'])
+ self.associate_with("ip", self.state['private_dns_name'])
+ if success and node_set:
+ self.associate_with("node", self.state['node_name'])
return True
def destroy(self):
"""Destroy associations, then destroy the object"""
self.unassociate_with("project", self.project)
- self.unassociate_with("node", self['node_name'])
+ self.unassociate_with("node", self.state['node_name'])
+ self.unassociate_with("ip", self.state['private_dns_name'])
return super(Instance, self).destroy()
class Host(datastore.BasicModel):
diff --git a/nova/compute/service.py b/nova/compute/service.py
index 9b162edc7..820116453 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -39,9 +39,9 @@ from nova import service
from nova import utils
from nova.compute import disk
from nova.compute import model
-from nova.compute import network
from nova.compute import power_state
from nova.compute.instance_types import INSTANCE_TYPES
+from nova.network import service as network_service
from nova.objectstore import image # for image_path flag
from nova.virt import connection as virt_connection
from nova.volume import service as volume_service
@@ -117,12 +117,17 @@ class ComputeService(service.Service):
""" launch a new instance with specified options """
logging.debug("Starting instance %s..." % (instance_id))
inst = self.instdir.get(instance_id)
- if not FLAGS.simple_network:
- # TODO: Get the real security group of launch in here
- security_group = "default"
- net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
- inst['project_id'],
- security_group).express()
+ # TODO: Get the real security group of launch in here
+ security_group = "default"
+ # NOTE(vish): passing network type allows us to express the
+ # network without making a call to network to find
+ # out which type of network to setup
+ network_service.setup_compute_network(
+ inst.get('network_type', 'vlan'),
+ inst['user_id'],
+ inst['project_id'],
+ security_group)
+
inst['node_name'] = FLAGS.node_name
inst.save()
# TODO(vish) check to make sure the availability zone matches
diff --git a/nova/datastore.py b/nova/datastore.py
index f6c11d2c9..9bda0c858 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -90,13 +90,15 @@ class BasicModel(object):
@absorb_connection_error
def __init__(self):
- self.initial_state = {}
- self.state = Redis.instance().hgetall(self.__redis_key)
- if self.state:
- self.initial_state = self.state
+ state = Redis.instance().hgetall(self.__redis_key)
+ if state:
+ self.initial_state = state
+ self.state = dict(self.initial_state)
else:
+ self.initial_state = {}
self.state = self.default_state()
+
def default_state(self):
"""You probably want to define this in your subclass"""
return {}
@@ -242,7 +244,7 @@ class BasicModel(object):
for key, val in self.state.iteritems():
Redis.instance().hset(self.__redis_key, key, val)
self.add_to_index()
- self.initial_state = self.state
+ self.initial_state = dict(self.state)
return True
@absorb_connection_error
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 67fc04502..ad9188ff3 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -36,17 +36,18 @@ from nova import utils
from nova.auth import rbac
from nova.auth import manager
from nova.compute import model
-from nova.compute import network
from nova.compute.instance_types import INSTANCE_TYPES
-from nova.compute import service as compute_service
from nova.endpoint import images
-from nova.volume import service as volume_service
+from nova.network import service as network_service
+from nova.network import model as network_model
+from nova.volume import service
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
+
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
try:
@@ -64,7 +65,6 @@ class CloudController(object):
"""
def __init__(self):
self.instdir = model.InstanceDirectory()
- self.network = network.PublicNetworkController()
self.setup()
@property
@@ -76,7 +76,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = volume_service.get_volume(volume_id)
+ volume = service.get_volume(volume_id)
yield volume
def __str__(self):
@@ -103,15 +103,16 @@ class CloudController(object):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
- line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ line = '%s slots=%d' % (instance['private_dns_name'],
+ INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
result[instance['key_name']] = [line]
return result
- def get_metadata(self, ip):
- i = self.get_instance_by_ip(ip)
+ def get_metadata(self, ipaddress):
+ i = self.get_instance_by_ip(ipaddress)
if i is None:
return None
mpi = self._get_mpi_data(i['project_id'])
@@ -148,7 +149,7 @@ class CloudController(object):
},
'public-hostname': i.get('dns_name', ''),
'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP
- 'public-keys' : keys,
+ 'public-keys': keys,
'ramdisk-id': i.get('ramdisk_id', ''),
'reservation-id': i['reservation_id'],
'security-groups': i.get('groups', ''),
@@ -204,26 +205,22 @@ class CloudController(object):
'keyFingerprint': key_pair.fingerprint,
})
- return { 'keypairsSet': result }
+ return {'keypairsSet': result}
@rbac.allow('all')
def create_key_pair(self, context, key_name, **kwargs):
- try:
- d = defer.Deferred()
- p = context.handler.application.settings.get('pool')
- def _complete(kwargs):
- if 'exception' in kwargs:
- d.errback(kwargs['exception'])
- return
- d.callback({'keyName': key_name,
- 'keyFingerprint': kwargs['fingerprint'],
- 'keyMaterial': kwargs['private_key']})
- p.apply_async(_gen_key, [context.user.id, key_name],
- callback=_complete)
- return d
-
- except users.UserError, e:
- raise
+ dcall = defer.Deferred()
+ pool = context.handler.application.settings.get('pool')
+ def _complete(kwargs):
+ if 'exception' in kwargs:
+ dcall.errback(kwargs['exception'])
+ return
+ dcall.callback({'keyName': key_name,
+ 'keyFingerprint': kwargs['fingerprint'],
+ 'keyMaterial': kwargs['private_key']})
+ pool.apply_async(_gen_key, [context.user.id, key_name],
+ callback=_complete)
+ return dcall
@rbac.allow('all')
def delete_key_pair(self, context, key_name, **kwargs):
@@ -233,7 +230,7 @@ class CloudController(object):
@rbac.allow('all')
def describe_security_groups(self, context, group_names, **kwargs):
- groups = { 'securityGroupSet': [] }
+ groups = {'securityGroupSet': []}
# Stubbed for now to unblock other things.
return groups
@@ -252,7 +249,7 @@ class CloudController(object):
instance = self._get_instance(context, instance_id[0])
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "get_console_output",
- "args" : {"instance_id": instance_id[0]}})
+ "args": {"instance_id": instance_id[0]}})
def _get_user_id(self, context):
if context and context.user:
@@ -286,30 +283,29 @@ class CloudController(object):
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': volume['delete_on_termination'],
- 'device' : volume['mountpoint'],
- 'instanceId' : volume['instance_id'],
- 'status' : 'attached',
- 'volume_id' : volume['volume_id']}]
+ 'device': volume['mountpoint'],
+ 'instanceId': volume['instance_id'],
+ 'status': 'attached',
+ 'volume_id': volume['volume_id']}]
else:
v['attachmentSet'] = [{}]
return v
@rbac.allow('projectmanager', 'sysadmin')
+ @defer.inlineCallbacks
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell service to create it
- res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
- "args" : {"size": size,
+ result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
+ "args": {"size": size,
"user_id": context.user.id,
"project_id": context.project.id}})
- def _format_result(result):
- volume = self._get_volume(context, result['result'])
- return {'volumeSet': [self.format_volume(context, volume)]}
- res.addCallback(_format_result)
- return res
+ # NOTE(vish): rpc returned value is in the result key in the dictionary
+ volume = self._get_volume(context, result['result'])
+ defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
- address = self.network.get_host(public_ip)
+ address = network_model.PublicAddress.lookup(public_ip)
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
return address
raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -331,7 +327,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- volume = volume_service.get_volume(volume_id)
+ volume = service.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -350,15 +346,15 @@ class CloudController(object):
compute_node = instance['node_name']
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "attach_volume",
- "args" : {"volume_id": volume_id,
- "instance_id" : instance_id,
- "mountpoint" : device}})
- return defer.succeed({'attachTime' : volume['attach_time'],
- 'device' : volume['mountpoint'],
- 'instanceId' : instance_id,
- 'requestId' : context.request_id,
- 'status' : volume['attach_status'],
- 'volumeId' : volume_id})
+ "args": {"volume_id": volume_id,
+ "instance_id": instance_id,
+ "mountpoint": device}})
+ return defer.succeed({'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
+ 'requestId': context.request_id,
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id})
@rbac.allow('projectmanager', 'sysadmin')
@@ -374,18 +370,18 @@ class CloudController(object):
instance = self._get_instance(context, instance_id)
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "detach_volume",
- "args" : {"instance_id": instance_id,
+ "args": {"instance_id": instance_id,
"volume_id": volume_id}})
except exception.NotFound:
# If the instance doesn't exist anymore,
# then we need to call detach blind
volume.finish_detach()
- return defer.succeed({'attachTime' : volume['attach_time'],
- 'device' : volume['mountpoint'],
- 'instanceId' : instance_id,
- 'requestId' : context.request_id,
- 'status' : volume['attach_status'],
- 'volumeId' : volume_id})
+ return defer.succeed({'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
+ 'requestId': context.request_id,
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id})
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -418,7 +414,7 @@ class CloudController(object):
'code': instance.get('state', 0),
'name': instance.get('state_description', 'pending')
}
- i['public_dns_name'] = self.network.get_public_ip_for_instance(
+ i['public_dns_name'] = network_model.get_public_ip_for_instance(
i['instance_id'])
i['private_dns_name'] = instance.get('private_dns_name', None)
if not i['public_dns_name']:
@@ -427,7 +423,8 @@ class CloudController(object):
i['key_name'] = instance.get('key_name', None)
if context.user.is_admin():
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
- instance.get('project_id', None), instance.get('node_name',''))
+ instance.get('project_id', None),
+ instance.get('node_name', ''))
i['product_codes_set'] = self._convert_to_set(
instance.get('product_codes', None), 'product_code')
i['instance_type'] = instance.get('instance_type', None)
@@ -444,7 +441,7 @@ class CloudController(object):
reservations[res_id] = r
reservations[res_id]['instances_set'].append(i)
- instance_response = {'reservationSet' : list(reservations.values()) }
+ instance_response = {'reservationSet': list(reservations.values())}
return instance_response
@rbac.allow('all')
@@ -453,13 +450,13 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- for address in self.network.host_objs:
+ for address in network_model.PublicAddress.all():
# TODO(vish): implement a by_project iterator for addresses
if (context.user.is_admin() or
- address['project_id'] == self.project.id):
+ address['project_id'] == context.project.id):
address_rv = {
'public_ip': address['address'],
- 'instance_id' : address.get('instance_id', 'free')
+ 'instance_id': address.get('instance_id', 'free')
}
if context.user.is_admin():
address_rv['instance_id'] = "%s (%s, %s)" % (
@@ -471,41 +468,63 @@ class CloudController(object):
return {'addressesSet': addresses}
@rbac.allow('netadmin')
+ @defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
- address = self.network.allocate_ip(
- context.user.id, context.project.id, 'public')
- return defer.succeed({'addressSet': [{'publicIp' : address}]})
+ network_topic = yield self._get_network_topic(context)
+ alloc_result = yield rpc.call(network_topic,
+ {"method": "allocate_elastic_ip",
+ "args": {"user_id": context.user.id,
+ "project_id": context.project.id}})
+ public_ip = alloc_result['result']
+ defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
@rbac.allow('netadmin')
+ @defer.inlineCallbacks
def release_address(self, context, public_ip, **kwargs):
- self.network.deallocate_ip(public_ip)
- return defer.succeed({'releaseResponse': ["Address released."]})
+ # NOTE(vish): Should we make sure this works?
+ network_topic = yield self._get_network_topic(context)
+ rpc.cast(network_topic,
+ {"method": "deallocate_elastic_ip",
+ "args": {"elastic_ip": public_ip}})
+ defer.returnValue({'releaseResponse': ["Address released."]})
@rbac.allow('netadmin')
- def associate_address(self, context, instance_id, **kwargs):
+ @defer.inlineCallbacks
+ def associate_address(self, context, instance_id, public_ip, **kwargs):
instance = self._get_instance(context, instance_id)
- self.network.associate_address(
- kwargs['public_ip'],
- instance['private_dns_name'],
- instance_id)
- return defer.succeed({'associateResponse': ["Address associated."]})
+ address = self._get_address(context, public_ip)
+ network_topic = yield self._get_network_topic(context)
+ rpc.cast(network_topic,
+ {"method": "associate_elastic_ip",
+ "args": {"elastic_ip": address['address'],
+ "fixed_ip": instance['private_dns_name'],
+ "instance_id": instance['instance_id']}})
+ defer.returnValue({'associateResponse': ["Address associated."]})
@rbac.allow('netadmin')
+ @defer.inlineCallbacks
def disassociate_address(self, context, public_ip, **kwargs):
address = self._get_address(context, public_ip)
- self.network.disassociate_address(public_ip)
- # TODO - Strip the IP from the instance
- return defer.succeed({'disassociateResponse': ["Address disassociated."]})
-
- def release_ip(self, context, private_ip, **kwargs):
- self.network.release_ip(private_ip)
- return defer.succeed({'releaseResponse': ["Address released."]})
-
- def lease_ip(self, context, private_ip, **kwargs):
- self.network.lease_ip(private_ip)
- return defer.succeed({'leaseResponse': ["Address leased."]})
+ network_topic = yield self._get_network_topic(context)
+ rpc.cast(network_topic,
+ {"method": "disassociate_elastic_ip",
+ "args": {"elastic_ip": address['address']}})
+ defer.returnValue({'disassociateResponse': ["Address disassociated."]})
+
+ @defer.inlineCallbacks
+ def _get_network_topic(self, context):
+ """Retrieves the network host for a project"""
+ host = network_service.get_host_for_project(context.project.id)
+ if not host:
+ result = yield rpc.call(FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"user_id": context.user.id,
+ "project_id": context.project.id}})
+ host = result['result']
+ defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
+ @defer.inlineCallbacks
def run_instances(self, context, **kwargs):
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
@@ -537,15 +556,20 @@ class CloudController(object):
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
+ network_topic = yield self._get_network_topic(context)
# TODO: Get the real security group of launch in here
security_group = "default"
- if FLAGS.simple_network:
- bridge_name = FLAGS.simple_network_bridge
- else:
- net = network.BridgedNetwork.get_network_for_project(
- context.user.id, context.project.id, security_group)
- bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
+ vpn = False
+ if image_id == FLAGS.vpn_image_id:
+ vpn = True
+ allocate_result = yield rpc.call(network_topic,
+ {"method": "allocate_fixed_ip",
+ "args": {"user_id": context.user.id,
+ "project_id": context.project.id,
+ "security_group": security_group,
+ "vpn": vpn}})
+ allocate_data = allocate_result['result']
inst = self.instdir.new()
inst['image_id'] = image_id
inst['kernel_id'] = kernel_id
@@ -558,65 +582,61 @@ class CloudController(object):
inst['key_name'] = kwargs.get('key_name', '')
inst['user_id'] = context.user.id
inst['project_id'] = context.project.id
- inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
- inst['bridge_name'] = bridge_name
- if FLAGS.simple_network:
- address = network.allocate_simple_ip()
- else:
- if inst['image_id'] == FLAGS.vpn_image_id:
- address = network.allocate_vpn_ip(
- inst['user_id'],
- inst['project_id'],
- mac=inst['mac_address'])
- else:
- address = network.allocate_ip(
- inst['user_id'],
- inst['project_id'],
- mac=inst['mac_address'])
- inst['private_dns_name'] = str(address)
- # TODO: allocate expresses on the router node
+ inst['security_group'] = security_group
+ for (key, value) in allocate_data.iteritems():
+ inst[key] = value
+
inst.save()
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
- "args": {"instance_id" : inst.instance_id}})
+ "args": {"instance_id": inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make Network figure out the network name from ip.
- return defer.succeed(self._format_instances(
- context, reservation_id))
+ defer.returnValue(self._format_instances(context, reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
+ @defer.inlineCallbacks
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
+ network_topic = yield self._get_network_topic(context)
for i in instance_id:
logging.debug("Going to try and terminate %s" % i)
try:
instance = self._get_instance(context, i)
except exception.NotFound:
- logging.warning("Instance %s was not found during terminate" % i)
+ logging.warning("Instance %s was not found during terminate"
+ % i)
continue
- try:
- self.network.disassociate_address(
- instance.get('public_dns_name', 'bork'))
- except:
- pass
- if instance.get('private_dns_name', None):
- logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
- if FLAGS.simple_network:
- network.deallocate_simple_ip(instance.get('private_dns_name', None))
- else:
- try:
- self.network.deallocate_ip(instance.get('private_dns_name', None))
- except Exception, _err:
- pass
- if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
+ elastic_ip = network_model.get_public_ip_for_instance(i)
+ if elastic_ip:
+ logging.debug("Disassociating address %s" % elastic_ip)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # disassociated. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(network_topic,
+ {"method": "disassociate_elastic_ip",
+ "args": {"elastic_ip": elastic_ip}})
+
+ fixed_ip = instance.get('private_dns_name', None)
+ if fixed_ip:
+ logging.debug("Deallocating address %s" % fixed_ip)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # actually removed. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(network_topic,
+ {"method": "deallocate_fixed_ip",
+ "args": {"fixed_ip": fixed_ip}})
+
+ if instance.get('node_name', 'unassigned') != 'unassigned':
+ # NOTE(joshua?): It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
- {"method": "terminate_instance",
- "args" : {"instance_id": i}})
+ {"method": "terminate_instance",
+ "args": {"instance_id": i}})
else:
instance.destroy()
- return defer.succeed(True)
+ defer.returnValue(True)
@rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
@@ -625,7 +645,7 @@ class CloudController(object):
instance = self._get_instance(context, i)
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "reboot_instance",
- "args" : {"instance_id": i}})
+ "args": {"instance_id": i}})
return defer.succeed(True)
@rbac.allow('projectmanager', 'sysadmin')
@@ -635,7 +655,7 @@ class CloudController(object):
volume_node = volume['node_name']
rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
{"method": "delete_volume",
- "args" : {"volume_id": volume_id}})
+ "args": {"volume_id": volume_id}})
return defer.succeed(True)
@rbac.allow('all')
@@ -668,9 +688,9 @@ class CloudController(object):
image = images.list(context, image_id)[0]
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
- result = { 'image_id': image_id, 'launchPermission': [] }
+ result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
- result['launchPermission'].append({ 'group': 'all' })
+ result['launchPermission'].append({'group': 'all'})
return defer.succeed(result)
@rbac.allow('projectmanager', 'sysadmin')
@@ -678,6 +698,8 @@ class CloudController(object):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
+ if not 'user_group' in kwargs:
+ raise exception.ApiError('user or group not specified')
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:
diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py
index 32f7cc228..fe7cb5d11 100644
--- a/nova/endpoint/images.py
+++ b/nova/endpoint/images.py
@@ -27,6 +27,7 @@ import urllib
from nova import flags
from nova import utils
+from nova.auth import manager
FLAGS = flags.FLAGS
@@ -75,13 +76,16 @@ def deregister(context, image_id):
query_args=qs({'image_id': image_id}))
def conn(context):
- return boto.s3.connection.S3Connection (
- aws_access_key_id=str('%s:%s' % (context.user.access, context.project.name)),
- aws_secret_access_key=str(context.user.secret),
- is_secure=False,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- port=FLAGS.s3_port,
- host=FLAGS.s3_host)
+ access = manager.AuthManager().get_access_key(context.user,
+ context.project)
+ secret = str(context.user.secret)
+ calling = boto.s3.connection.OrdinaryCallingFormat()
+ return boto.s3.connection.S3Connection(aws_access_key_id=access,
+ aws_secret_access_key=secret,
+ is_secure=False,
+ calling_format=calling,
+ port=FLAGS.s3_port,
+ host=FLAGS.s3_host)
def qs(params):
diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py
index de05ba2da..75b828e91 100644
--- a/nova/endpoint/rackspace.py
+++ b/nova/endpoint/rackspace.py
@@ -17,153 +17,95 @@
# under the License.
"""
-Rackspace API
+Rackspace API Endpoint
"""
-import base64
import json
-import logging
-import multiprocessing
-import os
import time
-import tornado.web
-from twisted.internet import defer
-from nova import datastore
-from nova import exception
+import webob.dec
+import webob.exc
+
from nova import flags
from nova import rpc
from nova import utils
+from nova import wsgi
from nova.auth import manager
-from nova.compute import model
-from nova.compute import network
-from nova.endpoint import images
-from nova.endpoint import wsgi
+from nova.compute import model as compute
+from nova.network import model as network
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
-# TODO(todd): subclass Exception so we can bubble meaningful errors
-
-
-class Api(object):
-
- def __init__(self, rpc_mechanism):
- self.controllers = {
- "v1.0": RackspaceAuthenticationApi(),
- "servers": RackspaceCloudServerApi()
- }
- self.rpc_mechanism = rpc_mechanism
-
- def handler(self, environ, responder):
- environ['nova.context'] = self.build_context(environ)
- controller, path = wsgi.Util.route(
- environ['PATH_INFO'],
- self.controllers
- )
- if not controller:
- # TODO(todd): Exception (404)
- raise Exception("Missing Controller")
- rv = controller.process(path, environ)
- if type(rv) is tuple:
- responder(rv[0], rv[1])
- rv = rv[2]
- else:
- responder("200 OK", [])
- return rv
-
- def build_context(self, env):
- rv = {}
- if env.has_key("HTTP_X_AUTH_TOKEN"):
- rv['user'] = manager.AuthManager().get_user_from_access_key(
- env['HTTP_X_AUTH_TOKEN']
- )
- if rv['user']:
- rv['project'] = manager.AuthManager().get_project(
- rv['user'].name
- )
- return rv
-
-
-class RackspaceApiEndpoint(object):
- def process(self, path, env):
- if not self.check_authentication(env):
- # TODO(todd): Exception (Unauthorized)
- raise Exception("Unable to authenticate")
-
- if len(path) == 0:
- return self.index(env)
-
- action = path.pop(0)
- if hasattr(self, action):
- method = getattr(self, action)
- return method(path, env)
- else:
- # TODO(todd): Exception (404)
- raise Exception("Missing method %s" % path[0])
-
- def check_authentication(self, env):
- if hasattr(self, "process_without_authentication") \
- and getattr(self, "process_without_authentication"):
- return True
- if not env['nova.context']['user']:
- return False
- return True
-
-
-class RackspaceAuthenticationApi(RackspaceApiEndpoint):
+class API(wsgi.Middleware):
+ """Entry point for all requests."""
def __init__(self):
- self.process_without_authentication = True
-
- # TODO(todd): make a actual session with a unique token
- # just pass the auth key back through for now
- def index(self, env):
- response = '204 No Content'
- headers = [
- ('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']),
- ('X-Storage-Url', 'http://%s' % env['HTTP_HOST']),
- ('X-CDN-Managment-Url', 'http://%s' % env['HTTP_HOST']),
- ('X-Auth-Token', env['HTTP_X_AUTH_KEY'])
- ]
- body = ""
- return (response, headers, body)
-
-
-class RackspaceCloudServerApi(RackspaceApiEndpoint):
+ super(API, self).__init__(Router(webob.exc.HTTPNotFound()))
+
+ def __call__(self, environ, start_response):
+ context = {}
+ if "HTTP_X_AUTH_TOKEN" in environ:
+ context['user'] = manager.AuthManager().get_user_from_access_key(
+ environ['HTTP_X_AUTH_TOKEN'])
+ if context['user']:
+ context['project'] = manager.AuthManager().get_project(
+ context['user'].name)
+ if "user" not in context:
+ return webob.exc.HTTPForbidden()(environ, start_response)
+ environ['nova.context'] = context
+ return self.application(environ, start_response)
+
+
+class Router(wsgi.Router):
+ """Route requests to the next WSGI application."""
+
+ def _build_map(self):
+ """Build routing map for authentication and cloud."""
+ self._connect("/v1.0", controller=AuthenticationAPI())
+ cloud = CloudServerAPI()
+ self._connect("/servers", controller=cloud.launch_server,
+ conditions={"method": ["POST"]})
+ self._connect("/servers/{server_id}", controller=cloud.delete_server,
+ conditions={'method': ["DELETE"]})
+ self._connect("/servers", controller=cloud)
+
+
+class AuthenticationAPI(wsgi.Application):
+ """Handle all authorization requests through WSGI applications."""
+
+ @webob.dec.wsgify
+ def __call__(self, req): # pylint: disable-msg=W0221
+ # TODO(todd): make a actual session with a unique token
+ # just pass the auth key back through for now
+ res = webob.Response()
+ res.status = '204 No Content'
+ res.headers.add('X-Server-Management-Url', req.host_url)
+ res.headers.add('X-Storage-Url', req.host_url)
+ res.headers.add('X-CDN-Managment-Url', req.host_url)
+ res.headers.add('X-Auth-Token', req.headers['X-Auth-Key'])
+ return res
+
+
+class CloudServerAPI(wsgi.Application):
+ """Handle all server requests through WSGI applications."""
def __init__(self):
- self.instdir = model.InstanceDirectory()
+ super(CloudServerAPI, self).__init__()
+ self.instdir = compute.InstanceDirectory()
self.network = network.PublicNetworkController()
- def index(self, env):
- if env['REQUEST_METHOD'] == 'GET':
- return self.detail(env)
- elif env['REQUEST_METHOD'] == 'POST':
- return self.launch_server(env)
-
- def detail(self, args, env):
- value = {
- "servers":
- []
- }
+ @webob.dec.wsgify
+ def __call__(self, req): # pylint: disable-msg=W0221
+ value = {"servers": []}
for inst in self.instdir.all:
value["servers"].append(self.instance_details(inst))
-
return json.dumps(value)
- ##
- ##
-
- def launch_server(self, env):
- data = json.loads(env['wsgi.input'].read(int(env['CONTENT_LENGTH'])))
- inst = self.build_server_instance(data, env['nova.context'])
- self.schedule_launch_of_instance(inst)
- return json.dumps({"server": self.instance_details(inst)})
-
- def instance_details(self, inst):
+ def instance_details(self, inst): # pylint: disable-msg=R0201
+ """Build the data structure to represent details for an instance."""
return {
"id": inst.get("instance_id", None),
"imageId": inst.get("image_id", None),
@@ -171,11 +113,9 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint):
"hostId": inst.get("node_name", None),
"status": inst.get("state", "pending"),
"addresses": {
- "public": [self.network.get_public_ip_for_instance(
- inst.get("instance_id", None)
- )],
- "private": [inst.get("private_dns_name", None)]
- },
+ "public": [network.get_public_ip_for_instance(
+ inst.get("instance_id", None))],
+ "private": [inst.get("private_dns_name", None)]},
# implemented only by Rackspace, not AWS
"name": inst.get("name", "Not-Specified"),
@@ -184,11 +124,22 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint):
"progress": "Not-Supported",
"metadata": {
"Server Label": "Not-Supported",
- "Image Version": "Not-Supported"
- }
- }
+ "Image Version": "Not-Supported"}}
+
+ @webob.dec.wsgify
+ def launch_server(self, req):
+ """Launch a new instance."""
+ data = json.loads(req.body)
+ inst = self.build_server_instance(data, req.environ['nova.context'])
+ rpc.cast(
+ FLAGS.compute_topic, {
+ "method": "run_instance",
+ "args": {"instance_id": inst.instance_id}})
+
+ return json.dumps({"server": self.instance_details(inst)})
def build_server_instance(self, env, context):
+ """Build instance data structure and save it to the data store."""
reservation = utils.generate_uid('r')
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
inst = self.instdir.new()
@@ -200,27 +151,33 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint):
inst['reservation_id'] = reservation
inst['launch_time'] = ltime
inst['mac_address'] = utils.generate_mac()
- address = network.allocate_ip(
+ address = self.network.allocate_ip(
inst['user_id'],
inst['project_id'],
- mac=inst['mac_address']
- )
+ mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
inst['user_id'],
inst['project_id'],
- 'default' # security group
- )['bridge_name']
+ 'default')['bridge_name']
# key_data, key_name, ami_launch_index
# TODO(todd): key data or root password
inst.save()
return inst
- def schedule_launch_of_instance(self, inst):
- rpc.cast(
- FLAGS.compute_topic,
- {
- "method": "run_instance",
- "args": {"instance_id": inst.instance_id}
- }
- )
+ @webob.dec.wsgify
+ @wsgi.route_args
+ def delete_server(self, req, route_args): # pylint: disable-msg=R0201
+ """Delete an instance."""
+ owner_hostname = None
+ instance = compute.Instance.lookup(route_args['server_id'])
+ if instance:
+ owner_hostname = instance["node_name"]
+ if not owner_hostname:
+ return webob.exc.HTTPNotFound("Did not find image, or it was "
+ "not in a running state.")
+ rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname)
+ rpc.cast(rpc_transport,
+ {"method": "reboot_instance",
+ "args": {"instance_id": route_args['server_id']}})
+ req.status = "202 Accepted"
diff --git a/nova/flags.py b/nova/flags.py
index f35f5fa10..b3bdd088f 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -21,16 +21,145 @@ Package-level global flags are defined here, the rest are defined
where they're used.
"""
+import getopt
import socket
+import sys
+import gflags
-from gflags import *
-# This keeps pylint from barfing on the imports
-FLAGS = FLAGS
-DEFINE_string = DEFINE_string
-DEFINE_integer = DEFINE_integer
-DEFINE_bool = DEFINE_bool
+class FlagValues(gflags.FlagValues):
+ """Extension of gflags.FlagValues that allows undefined and runtime flags.
+
+ Unknown flags will be ignored when parsing the command line, but the
+ command line will be kept so that it can be replayed if new flags are
+ defined after the initial parsing.
+
+ """
+
+ def __init__(self):
+ gflags.FlagValues.__init__(self)
+ self.__dict__['__dirty'] = []
+ self.__dict__['__was_already_parsed'] = False
+ self.__dict__['__stored_argv'] = []
+
+ def __call__(self, argv):
+ # We're doing some hacky stuff here so that we don't have to copy
+ # out all the code of the original verbatim and then tweak a few lines.
+ # We're hijacking the output of getopt so we can still return the
+ # leftover args at the end
+ sneaky_unparsed_args = {"value": None}
+ original_argv = list(argv)
+
+ if self.IsGnuGetOpt():
+ orig_getopt = getattr(getopt, 'gnu_getopt')
+ orig_name = 'gnu_getopt'
+ else:
+ orig_getopt = getattr(getopt, 'getopt')
+ orig_name = 'getopt'
+
+ def _sneaky(*args, **kw):
+ optlist, unparsed_args = orig_getopt(*args, **kw)
+ sneaky_unparsed_args['value'] = unparsed_args
+ return optlist, unparsed_args
+
+ try:
+ setattr(getopt, orig_name, _sneaky)
+ args = gflags.FlagValues.__call__(self, argv)
+ except gflags.UnrecognizedFlagError:
+ # Undefined args were found, for now we don't care so just
+ # act like everything went well
+ # (these three lines are copied pretty much verbatim from the end
+ # of the __call__ function we are wrapping)
+ unparsed_args = sneaky_unparsed_args['value']
+ if unparsed_args:
+ if self.IsGnuGetOpt():
+ args = argv[:1] + unparsed
+ else:
+ args = argv[:1] + original_argv[-len(unparsed_args):]
+ else:
+ args = argv[:1]
+ finally:
+ setattr(getopt, orig_name, orig_getopt)
+
+ # Store the arguments for later, we'll need them for new flags
+ # added at runtime
+ self.__dict__['__stored_argv'] = original_argv
+ self.__dict__['__was_already_parsed'] = True
+ self.ClearDirty()
+ return args
+
+ def SetDirty(self, name):
+ """Mark a flag as dirty so that accessing it will case a reparse."""
+ self.__dict__['__dirty'].append(name)
+
+ def IsDirty(self, name):
+ return name in self.__dict__['__dirty']
+
+ def ClearDirty(self):
+ self.__dict__['__is_dirty'] = []
+
+ def WasAlreadyParsed(self):
+ return self.__dict__['__was_already_parsed']
+
+ def ParseNewFlags(self):
+ if '__stored_argv' not in self.__dict__:
+ return
+ new_flags = FlagValues()
+ for k in self.__dict__['__dirty']:
+ new_flags[k] = gflags.FlagValues.__getitem__(self, k)
+
+ new_flags(self.__dict__['__stored_argv'])
+ for k in self.__dict__['__dirty']:
+ setattr(self, k, getattr(new_flags, k))
+ self.ClearDirty()
+
+ def __setitem__(self, name, flag):
+ gflags.FlagValues.__setitem__(self, name, flag)
+ if self.WasAlreadyParsed():
+ self.SetDirty(name)
+
+ def __getitem__(self, name):
+ if self.IsDirty(name):
+ self.ParseNewFlags()
+ return gflags.FlagValues.__getitem__(self, name)
+
+ def __getattr__(self, name):
+ if self.IsDirty(name):
+ self.ParseNewFlags()
+ return gflags.FlagValues.__getattr__(self, name)
+
+
+FLAGS = FlagValues()
+
+
+def _wrapper(func):
+ def _wrapped(*args, **kw):
+ kw.setdefault('flag_values', FLAGS)
+ func(*args, **kw)
+ _wrapped.func_name = func.func_name
+ return _wrapped
+
+
+DEFINE_string = _wrapper(gflags.DEFINE_string)
+DEFINE_integer = _wrapper(gflags.DEFINE_integer)
+DEFINE_bool = _wrapper(gflags.DEFINE_bool)
+DEFINE_boolean = _wrapper(gflags.DEFINE_boolean)
+DEFINE_float = _wrapper(gflags.DEFINE_float)
+DEFINE_enum = _wrapper(gflags.DEFINE_enum)
+DEFINE_list = _wrapper(gflags.DEFINE_list)
+DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
+DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
+DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
+
+
+def DECLARE(name, module_string, flag_values=FLAGS):
+ if module_string not in sys.modules:
+ __import__(module_string, globals(), locals())
+ if name not in flag_values:
+ raise gflags.UnrecognizedFlag(
+ "%s not defined by %s" % (name, module_string))
+
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
diff --git a/nova/compute/exception.py b/nova/network/exception.py
index 13e4f0a51..5722e9672 100644
--- a/nova/compute/exception.py
+++ b/nova/network/exception.py
@@ -17,7 +17,7 @@
# under the License.
"""
-Exceptions for Compute Node errors, mostly network addressing.
+Exceptions for network errors.
"""
from nova.exception import Error
diff --git a/nova/compute/linux_net.py b/nova/network/linux_net.py
index 4a4b4c8a8..4a4b4c8a8 100644
--- a/nova/compute/linux_net.py
+++ b/nova/network/linux_net.py
diff --git a/nova/compute/network.py b/nova/network/model.py
index 62d892e58..eada776c7 100644
--- a/nova/compute/network.py
+++ b/nova/network/model.py
@@ -17,7 +17,7 @@
# under the License.
"""
-Classes for network control, including VLANs, DHCP, and IP allocation.
+Model Classes for network control, including VLANs, DHCP, and IP allocation.
"""
import IPy
@@ -26,12 +26,12 @@ import os
import time
from nova import datastore
-from nova import exception
+from nova import exception as nova_exception
from nova import flags
from nova import utils
from nova.auth import manager
-from nova.compute import exception as compute_exception
-from nova.compute import linux_net
+from nova.network import exception
+from nova.network import linux_net
FLAGS = flags.FLAGS
@@ -53,26 +53,6 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
flags.DEFINE_integer('cloudpipe_start_port', 12000,
'Starting port for mapped CloudPipe external ports')
-flags.DEFINE_boolean('simple_network', False,
- 'Use simple networking instead of vlans')
-flags.DEFINE_string('simple_network_bridge', 'br100',
- 'Bridge for simple network instances')
-flags.DEFINE_list('simple_network_ips', ['192.168.0.2'],
- 'Available ips for simple network')
-flags.DEFINE_string('simple_network_template',
- utils.abspath('compute/interfaces.template'),
- 'Template file for simple network')
-flags.DEFINE_string('simple_network_netmask', '255.255.255.0',
- 'Netmask for simple network')
-flags.DEFINE_string('simple_network_network', '192.168.0.0',
- 'Network for simple network')
-flags.DEFINE_string('simple_network_gateway', '192.168.0.1',
- 'Broadcast for simple network')
-flags.DEFINE_string('simple_network_broadcast', '192.168.0.255',
- 'Broadcast for simple network')
-flags.DEFINE_string('simple_network_dns', '8.8.4.4',
- 'Dns for simple network')
-
logging.getLogger().setLevel(logging.DEBUG)
@@ -117,11 +97,11 @@ class Vlan(datastore.BasicModel):
def dict_by_vlan(cls):
"""a hash of vlan:project"""
set_name = cls._redis_set_name(cls.__name__)
- rv = {}
- h = datastore.Redis.instance().hgetall(set_name)
- for v in h.keys():
- rv[h[v]] = v
- return rv
+ retvals = {}
+ hashset = datastore.Redis.instance().hgetall(set_name)
+ for val in hashset.keys():
+ retvals[hashset[val]] = val
+ return retvals
@classmethod
@datastore.absorb_connection_error
@@ -156,8 +136,8 @@ class Vlan(datastore.BasicModel):
# CLEANUP:
# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
-# TODO(ja): use singleton for usermanager instead of self.manager in vlanpool et al
-# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
+# TODO(ja): does vlanpool "keeper" need to know the min/max -
+# shouldn't FLAGS always win?
# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
class BaseNetwork(datastore.BasicModel):
@@ -238,10 +218,12 @@ class BaseNetwork(datastore.BasicModel):
def available(self):
# the .2 address is always CloudPipe
# and the top <n> are for vpn clients
- for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)):
+ num_ips = self.num_static_ips
+ num_clients = FLAGS.cnt_vpn_clients
+ for idx in range(num_ips, len(self.network)-(1 + num_clients)):
address = str(self.network[idx])
if not address in self.hosts.keys():
- yield str(address)
+ yield address
@property
def num_static_ips(self):
@@ -253,7 +235,7 @@ class BaseNetwork(datastore.BasicModel):
self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
- raise compute_exception.NoMoreAddresses("Project %s with network %s" %
+ raise exception.NoMoreAddresses("Project %s with network %s" %
(project_id, str(self.network)))
def lease_ip(self, ip_str):
@@ -261,7 +243,7 @@ class BaseNetwork(datastore.BasicModel):
def release_ip(self, ip_str):
if not ip_str in self.assigned:
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
self.deexpress(address=ip_str)
self._rem_host(ip_str)
@@ -349,18 +331,19 @@ class DHCPNetwork(BridgedNetwork):
logging.debug("Not launching dnsmasq: no hosts.")
self.express_cloudpipe()
- def allocate_vpn_ip(self, mac):
+ def allocate_vpn_ip(self, user_id, project_id, mac):
address = str(self.network[2])
- self._add_host(self['user_id'], self['project_id'], address, mac)
+ self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
def express_cloudpipe(self):
- private_ip = self.network[2]
+ private_ip = str(self.network[2])
linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
% (private_ip, ))
- linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
- % (self.project.vpn_ip, self.project.vpn_port, private_ip))
+ linux_net.confirm_rule(
+ "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
+ % (self.project.vpn_ip, self.project.vpn_port, private_ip))
def deexpress(self, address=None):
# if this is the last address, stop dns
@@ -394,13 +377,15 @@ class PublicAddress(datastore.BasicModel):
addr.save()
return addr
-DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
+
+DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
class PublicNetworkController(BaseNetwork):
override_type = 'network'
def __init__(self, *args, **kwargs):
network_id = "public:default"
- super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range)
+ super(PublicNetworkController, self).__init__(network_id,
+ FLAGS.public_range)
self['user_id'] = "public"
self['project_id'] = "public"
self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
@@ -420,12 +405,6 @@ class PublicNetworkController(BaseNetwork):
for address in self.assigned:
yield PublicAddress(address)
- def get_public_ip_for_instance(self, instance_id):
- # FIXME: this should be a lookup - iteration won't scale
- for address_record in self.host_objs:
- if address_record.get('instance_id', 'available') == instance_id:
- return address_record['address']
-
def get_host(self, host):
if host in self.assigned:
return PublicAddress(host)
@@ -439,16 +418,20 @@ class PublicNetworkController(BaseNetwork):
PublicAddress(host).destroy()
datastore.Redis.instance().hdel(self._hosts_key, host)
+ def deallocate_ip(self, ip_str):
+ # NOTE(vish): cleanup is now done on release by the parent class
+ self.release_ip(ip_str)
+
def associate_address(self, public_ip, private_ip, instance_id):
if not public_ip in self.assigned:
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
# TODO(joshua): Keep an index going both ways
for addr in self.host_objs:
if addr.get('private_ip', None) == private_ip:
- raise compute_exception.AddressAlreadyAssociated()
+ raise exception.AddressAlreadyAssociated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') != 'available':
- raise compute_exception.AddressAlreadyAssociated()
+ raise exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
addr['instance_id'] = instance_id
addr.save()
@@ -456,10 +439,10 @@ class PublicNetworkController(BaseNetwork):
def disassociate_address(self, public_ip):
if not public_ip in self.assigned:
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') == 'available':
- raise compute_exception.AddressNotAssociated()
+ raise exception.AddressNotAssociated()
self.deexpress(address=public_ip)
addr['private_ip'] = 'available'
addr['instance_id'] = 'available'
@@ -483,8 +466,9 @@ class PublicNetworkController(BaseNetwork):
linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT"
% (private_ip))
for (protocol, port) in DEFAULT_PORTS:
- linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT"
- % (private_ip, protocol, port))
+ linux_net.confirm_rule(
+ "FORWARD -d %s -p %s --dport %s -j ACCEPT"
+ % (private_ip, protocol, port))
def deexpress(self, address=None):
addr = self.get_host(address)
@@ -535,63 +519,42 @@ def get_vlan_for_project(project_id):
return vlan
else:
return Vlan.create(project_id, vnum)
- raise compute_exception.AddressNotAllocated("Out of VLANs")
+ raise exception.AddressNotAllocated("Out of VLANs")
+
+def get_project_network(project_id, security_group='default'):
+ """ get a project's private network, allocating one if needed """
+ project = manager.AuthManager().get_project(project_id)
+ if not project:
+ raise nova_exception.NotFound("Project %s doesn't exist." % project_id)
+ manager_id = project.project_manager_id
+ return DHCPNetwork.get_network_for_project(manager_id,
+ project.id,
+ security_group)
-def get_network_by_interface(iface, security_group='default'):
- vlan = iface.rpartition("br")[2]
- return get_project_network(Vlan.dict_by_vlan().get(vlan), security_group)
def get_network_by_address(address):
+ # TODO(vish): This is completely the wrong way to do this, but
+ # I'm getting the network binary working before I
+ # tackle doing this the right way.
logging.debug("Get Network By Address: %s" % address)
for project in manager.AuthManager().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
logging.debug("Found %s in %s" % (address, project.id))
return net
- raise compute_exception.AddressNotAllocated()
+ raise exception.AddressNotAllocated()
-def allocate_simple_ip():
- redis = datastore.Redis.instance()
- if not redis.exists('ips') and not len(redis.keys('instances:*')):
- for address in FLAGS.simple_network_ips:
- redis.sadd('ips', address)
- address = redis.spop('ips')
- if not address:
- raise exception.NoMoreAddresses()
- return address
-def deallocate_simple_ip(address):
- datastore.Redis.instance().sadd('ips', address)
-
-
-def allocate_vpn_ip(user_id, project_id, mac):
- return get_project_network(project_id).allocate_vpn_ip(mac)
-
-def allocate_ip(user_id, project_id, mac):
- return get_project_network(project_id).allocate_ip(user_id, project_id, mac)
-
-def deallocate_ip(address):
- return get_network_by_address(address).deallocate_ip(address)
-
-def release_ip(address):
- return get_network_by_address(address).release_ip(address)
+def get_network_by_interface(iface, security_group='default'):
+ vlan = iface.rpartition("br")[2]
+ project_id = Vlan.dict_by_vlan().get(vlan)
+ return get_project_network(project_id, security_group)
-def lease_ip(address):
- return get_network_by_address(address).lease_ip(address)
-def get_project_network(project_id, security_group='default'):
- """ get a project's private network, allocating one if needed """
- # TODO(todd): It looks goofy to get a project from a UserManager.
- # Refactor to still use the LDAP backend, but not User specific.
- project = manager.AuthManager().get_project(project_id)
- if not project:
- raise exception.Error("Project %s doesn't exist, uhoh." %
- project_id)
- return DHCPNetwork.get_network_for_project(project.project_manager_id,
- project.id, security_group)
+def get_public_ip_for_instance(instance_id):
+ # FIXME: this should be a lookup - iteration won't scale
+ for address_record in PublicAddress.all():
+ if address_record.get('instance_id', 'available') == instance_id:
+ return address_record['address']
-def restart_nets():
- """ Ensure the network for each user is enabled"""
- for project in manager.AuthManager().get_projects():
- get_project_network(project.id).express()
diff --git a/nova/network/service.py b/nova/network/service.py
index 9d87e05e6..1a61f49d4 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -20,16 +20,211 @@
Network Nodes are responsible for allocating ips and setting up network
"""
-import logging
-
+from nova import datastore
from nova import flags
from nova import service
-
+from nova import utils
+from nova.auth import manager
+from nova.exception import NotFound
+from nova.network import exception
+from nova.network import model
+from nova.network import vpn
FLAGS = flags.FLAGS
-class NetworkService(service.Service):
- """Allocates ips and sets up networks"""
+flags.DEFINE_string('network_type',
+ 'flat',
+ 'Service Class for Networking')
+flags.DEFINE_string('flat_network_bridge', 'br100',
+ 'Bridge for simple network instances')
+flags.DEFINE_list('flat_network_ips',
+ ['192.168.0.2','192.168.0.3','192.168.0.4'],
+ 'Available ips for simple network')
+flags.DEFINE_string('flat_network_network', '192.168.0.0',
+ 'Network for simple network')
+flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
+ 'Netmask for simple network')
+flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
+ 'Broadcast for simple network')
+flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
+ 'Broadcast for simple network')
+flags.DEFINE_string('flat_network_dns', '8.8.4.4',
+ 'Dns for simple network')
+
+def type_to_class(network_type):
+ if network_type == 'flat':
+ return FlatNetworkService
+ elif network_type == 'vlan':
+ return VlanNetworkService
+ raise NotFound("Couldn't find %s network type" % network_type)
+
+
+def setup_compute_network(network_type, user_id, project_id, security_group):
+ srv = type_to_class(network_type)
+ srv.setup_compute_network(network_type, user_id, project_id, security_group)
+
+
+def get_host_for_project(project_id):
+ redis = datastore.Redis.instance()
+ return redis.get(_host_key(project_id))
+
+
+def _host_key(project_id):
+ return "network_host:%s" % project_id
+
+
+class BaseNetworkService(service.Service):
+ """Implements common network service functionality
+
+ This class must be subclassed.
+ """
+ def __init__(self, *args, **kwargs):
+ self.network = model.PublicNetworkController()
+
+ def set_network_host(self, user_id, project_id, *args, **kwargs):
+ """Safely sets the host of the projects network"""
+ redis = datastore.Redis.instance()
+ key = _host_key(project_id)
+ if redis.setnx(key, FLAGS.node_name):
+ self._on_set_network_host(user_id, project_id,
+ security_group='default',
+ *args, **kwargs)
+ return FLAGS.node_name
+ else:
+ return redis.get(key)
+
+ def allocate_fixed_ip(self, user_id, project_id,
+ security_group='default',
+ *args, **kwargs):
+ """Subclass implements getting fixed ip from the pool"""
+ raise NotImplementedError()
+
+ def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
+ """Subclass implements return of ip to the pool"""
+ raise NotImplementedError()
+
+ def _on_set_network_host(self, user_id, project_id,
+ *args, **kwargs):
+ """Called when this host becomes the host for a project"""
+ pass
+
+ @classmethod
+ def setup_compute_network(self, user_id, project_id, security_group,
+ *args, **kwargs):
+ """Sets up matching network for compute hosts"""
+ raise NotImplementedError()
+
+ def allocate_elastic_ip(self, user_id, project_id):
+ """Gets a elastic ip from the pool"""
+ # NOTE(vish): Replicating earlier decision to use 'public' as
+ # mac address name, although this should probably
+ # be done inside of the PublicNetworkController
+ return self.network.allocate_ip(user_id, project_id, 'public')
+
+ def associate_elastic_ip(self, elastic_ip, fixed_ip, instance_id):
+ """Associates an elastic ip to a fixed ip"""
+ self.network.associate_address(elastic_ip, fixed_ip, instance_id)
+
+ def disassociate_elastic_ip(self, elastic_ip):
+ """Disassociates a elastic ip"""
+ self.network.disassociate_address(elastic_ip)
+
+ def deallocate_elastic_ip(self, elastic_ip):
+ """Returns a elastic ip to the pool"""
+ self.network.deallocate_ip(elastic_ip)
+
+
+class FlatNetworkService(BaseNetworkService):
+ """Basic network where no vlans are used"""
+
+ @classmethod
+ def setup_compute_network(self, user_id, project_id, security_group,
+ *args, **kwargs):
+ """Network is created manually"""
+ pass
+
+ def allocate_fixed_ip(self, user_id, project_id,
+ security_group='default',
+ *args, **kwargs):
+ """Gets a fixed ip from the pool
+
+ Flat network just grabs the next available ip from the pool
+ """
+ # NOTE(vish): Some automation could be done here. For example,
+ # creating the flat_network_bridge and setting up
+ # a gateway. This is all done manually atm
+ redis = datastore.Redis.instance()
+ if not redis.exists('ips') and not len(redis.keys('instances:*')):
+ for fixed_ip in FLAGS.flat_network_ips:
+ redis.sadd('ips', fixed_ip)
+ fixed_ip = redis.spop('ips')
+ if not fixed_ip:
+ raise exception.NoMoreAddresses()
+ return {'inject_network': True,
+ 'network_type': FLAGS.network_type,
+ 'mac_address': utils.generate_mac(),
+ 'private_dns_name': str(fixed_ip),
+ 'bridge_name': FLAGS.flat_network_bridge,
+ 'network_network': FLAGS.flat_network_network,
+ 'network_netmask': FLAGS.flat_network_netmask,
+ 'network_gateway': FLAGS.flat_network_gateway,
+ 'network_broadcast': FLAGS.flat_network_broadcast,
+ 'network_dns': FLAGS.flat_network_dns}
+
+ def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
+ """Returns an ip to the pool"""
+ datastore.Redis.instance().sadd('ips', fixed_ip)
+
+class VlanNetworkService(BaseNetworkService):
+ """Vlan network with dhcp"""
+ # NOTE(vish): A lot of the interactions with network/model.py can be
+ # simplified and improved. Also there it may be useful
+ # to support vlans separately from dhcp, instead of having
+ # both of them together in this class.
+ def allocate_fixed_ip(self, user_id, project_id,
+ security_group='default',
+ vpn=False, *args, **kwargs):
+ """Gets a fixed ip from the pool """
+ mac = utils.generate_mac()
+ net = model.get_project_network(project_id)
+ if vpn:
+ fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
+ else:
+ fixed_ip = net.allocate_ip(user_id, project_id, mac)
+ return {'network_type': FLAGS.network_type,
+ 'bridge_name': net['bridge_name'],
+ 'mac_address': mac,
+ 'private_dns_name' : fixed_ip}
+
+ def deallocate_fixed_ip(self, fixed_ip,
+ *args, **kwargs):
+ """Returns an ip to the pool"""
+ return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
+
+ def lease_ip(self, address):
+ return model.get_network_by_address(address).lease_ip(address)
+
+ def release_ip(self, address):
+ return model.get_network_by_address(address).release_ip(address)
+
+ def restart_nets(self):
+ """Ensure the network for each user is enabled"""
+ for project in manager.AuthManager().get_projects():
+ model.get_project_network(project.id).express()
+
+ def _on_set_network_host(self, user_id, project_id,
+ *args, **kwargs):
+ """Called when this host becomes the host for a project"""
+ vpn.NetworkData.create(project_id)
- def __init__(self):
- logging.debug("Network node working")
+ @classmethod
+ def setup_compute_network(self, user_id, project_id, security_group,
+ *args, **kwargs):
+ """Sets up matching network for compute hosts"""
+ # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because
+ # we don't want to run dnsmasq on the client machines
+ net = model.BridgedNetwork.get_network_for_project(
+ user_id,
+ project_id,
+ security_group)
+ net.express()
diff --git a/nova/network/vpn.py b/nova/network/vpn.py
new file mode 100644
index 000000000..cec84287c
--- /dev/null
+++ b/nova/network/vpn.py
@@ -0,0 +1,116 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Network Data for projects"""
+
+from nova import datastore
+from nova import exception
+from nova import flags
+from nova import utils
+
+FLAGS = flags.FLAGS
+
+
+flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+ 'Public IP for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_start_port', 1000,
+ 'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+ 'End port for the cloudpipe VPN servers')
+
+class NoMorePorts(exception.Error):
+ pass
+
+
+class NetworkData(datastore.BasicModel):
+ """Manages network host, and vpn ip and port for projects"""
+ def __init__(self, project_id):
+ self.project_id = project_id
+ super(NetworkData, self).__init__()
+
+ @property
+ def identifier(self):
+ """Identifier used for key in redis"""
+ return self.project_id
+
+ @classmethod
+ def create(cls, project_id):
+ """Creates a vpn for project
+
+ This method finds a free ip and port and stores the associated
+ values in the datastore.
+ """
+ # TODO(vish): will we ever need multiiple ips per host?
+ port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+ network_data = cls(project_id)
+ # save ip for project
+ network_data['host'] = FLAGS.node_name
+ network_data['project'] = project_id
+ network_data['ip'] = FLAGS.vpn_ip
+ network_data['port'] = port
+ network_data.save()
+ return network_data
+
+ @classmethod
+ def find_free_port_for_ip(cls, ip):
+ """Finds a free port for a given ip from the redis set"""
+ # TODO(vish): these redis commands should be generalized and
+ # placed into a base class. Conceptually, it is
+ # similar to an association, but we are just
+ # storing a set of values instead of keys that
+ # should be turned into objects.
+ redis = datastore.Redis.instance()
+ key = 'ip:%s:ports' % ip
+ # TODO(vish): these ports should be allocated through an admin
+ # command instead of a flag
+ if (not redis.exists(key) and
+ not redis.exists(cls._redis_association_name('ip', ip))):
+ for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+ redis.sadd(key, i)
+
+ port = redis.spop(key)
+ if not port:
+ raise NoMorePorts()
+ return port
+
+ @classmethod
+ def num_ports_for_ip(cls, ip):
+ """Calculates the number of free ports for a given ip"""
+ return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+ @property
+ def ip(self):
+ """The ip assigned to the project"""
+ return self['ip']
+
+ @property
+ def port(self):
+ """The port assigned to the project"""
+ return int(self['port'])
+
+ def save(self):
+ """Saves the association to the given ip"""
+ self.associate_with('ip', self.ip)
+ super(NetworkData, self).save()
+
+ def destroy(self):
+ """Cleans up datastore and adds port back to pool"""
+ self.unassociate_with('ip', self.ip)
+ datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+ super(NetworkData, self).destroy()
+
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index b4d7e6179..f625a2aa1 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -266,7 +266,8 @@ class ImagesResource(Resource):
""" returns a json listing of all images
that a user has permissions to see """
- images = [i for i in image.Image.all() if i.is_authorized(request.context)]
+ images = [i for i in image.Image.all() \
+ if i.is_authorized(request.context, readonly=True)]
request.write(json.dumps([i.metadata for i in images]))
request.finish()
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index bea2e9637..860298ba6 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -65,9 +65,13 @@ class Image(object):
except:
pass
- def is_authorized(self, context):
+ def is_authorized(self, context, readonly=False):
+ # NOTE(devcamcar): Public images can be read by anyone,
+ # but only modified by admin or owner.
try:
- return self.metadata['isPublic'] or context.user.is_admin() or self.metadata['imageOwnerId'] == context.project.id
+ return (self.metadata['isPublic'] and readonly) or \
+ context.user.is_admin() or \
+ self.metadata['imageOwnerId'] == context.project.id
except:
return False
diff --git a/nova/rpc.py b/nova/rpc.py
index ebf140d92..2a550c3ae 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -238,12 +238,12 @@ def send_message(topic, message, wait=True):
exchange=msg_id,
auto_delete=True,
exchange_type="direct",
- routing_key=msg_id,
- durable=False)
+ routing_key=msg_id)
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange,
+ durable=False,
exchange_type="topic",
routing_key=topic)
publisher.send(message)
diff --git a/nova/server.py b/nova/server.py
index 7a1901a2f..96550f078 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -52,13 +52,8 @@ def stop(pidfile):
"""
# Get the pid from the pidfile
try:
- pf = file(pidfile,'r')
- pid = int(pf.read().strip())
- pf.close()
+ pid = int(open(pidfile,'r').read().strip())
except IOError:
- pid = None
-
- if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
return # not an error in a restart
@@ -79,14 +74,15 @@ def stop(pidfile):
def serve(name, main):
+ """Controller for server"""
argv = FLAGS(sys.argv)
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
- logging.debug("Full set of FLAGS: \n\n\n" )
+ logging.debug("Full set of FLAGS: \n\n\n")
for flag in FLAGS:
- logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) ))
+ logging.debug("%s : %s", flag, FLAGS.get(flag, None))
action = 'start'
if len(argv) > 1:
@@ -102,7 +98,11 @@ def serve(name, main):
else:
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
+ daemonize(argv, name, main)
+
+def daemonize(args, name, main):
+ """Does the work of daemonizing the process"""
logging.getLogger('amqplib').setLevel(logging.WARN)
if FLAGS.daemonize:
logger = logging.getLogger()
@@ -115,7 +115,7 @@ def serve(name, main):
else:
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
- logfile = logging.handlers.FileHandler(FLAGS.logfile)
+ logfile = logging.FileHandler(FLAGS.logfile)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
stdin, stdout, stderr = None, None, None
@@ -137,4 +137,4 @@ def serve(name, main):
stdout=stdout,
stderr=stderr
):
- main(argv)
+ main(args)
diff --git a/nova/test.py b/nova/test.py
index 6fbcab5e4..c7e08734f 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -22,15 +22,14 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
-import logging
import mox
import stubout
+import sys
import time
-import unittest
+
from tornado import ioloop
from twisted.internet import defer
-from twisted.python import failure
-from twisted.trial import unittest as trial_unittest
+from twisted.trial import unittest
from nova import fakerabbit
from nova import flags
@@ -41,20 +40,21 @@ flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
-def skip_if_fake(f):
+def skip_if_fake(func):
+ """Decorator that skips a test if running in fake mode"""
def _skipper(*args, **kw):
+ """Wrapped skipper function"""
if FLAGS.fake_tests:
- raise trial_unittest.SkipTest('Test cannot be run in fake mode')
+ raise unittest.SkipTest('Test cannot be run in fake mode')
else:
- return f(*args, **kw)
-
- _skipper.func_name = f.func_name
+ return func(*args, **kw)
return _skipper
-class TrialTestCase(trial_unittest.TestCase):
-
- def setUp(self):
+class TrialTestCase(unittest.TestCase):
+ """Test case base class for all unit tests"""
+ def setUp(self): # pylint: disable-msg=C0103
+ """Run before each test method to initialize test environment"""
super(TrialTestCase, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
@@ -63,7 +63,8 @@ class TrialTestCase(trial_unittest.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flag_overrides = {}
- def tearDown(self):
+ def tearDown(self): # pylint: disable-msg=C0103
+ """Runs after each test method to finalize/tear down test environment"""
super(TrialTestCase, self).tearDown()
self.reset_flags()
self.mox.UnsetStubs()
@@ -75,6 +76,7 @@ class TrialTestCase(trial_unittest.TestCase):
fakerabbit.reset_all()
def flags(self, **kw):
+ """Override flag variables for a test"""
for k, v in kw.iteritems():
if k in self.flag_overrides:
self.reset_flags()
@@ -84,13 +86,17 @@ class TrialTestCase(trial_unittest.TestCase):
setattr(FLAGS, k, v)
def reset_flags(self):
+ """Resets all flag variables for the test. Runs after each test"""
for k, v in self.flag_overrides.iteritems():
setattr(FLAGS, k, v)
class BaseTestCase(TrialTestCase):
- def setUp(self):
+ # TODO(jaypipes): Can this be moved into the TrialTestCase class?
+ """Base test case class for all unit tests."""
+ def setUp(self): # pylint: disable-msg=C0103
+ """Run before each test method to initialize test environment"""
super(BaseTestCase, self).setUp()
# TODO(termie): we could possibly keep a more global registry of
# the injected listeners... this is fine for now though
@@ -98,33 +104,27 @@ class BaseTestCase(TrialTestCase):
self.ioloop = ioloop.IOLoop.instance()
self._waiting = None
- self._doneWaiting = False
- self._timedOut = False
- self.set_up()
-
- def set_up(self):
- pass
-
- def tear_down(self):
- pass
+ self._done_waiting = False
+ self._timed_out = False
- def tearDown(self):
+ def tearDown(self):# pylint: disable-msg=C0103
+ """Runs after each test method to finalize/tear down test environment"""
super(BaseTestCase, self).tearDown()
for x in self.injected:
x.stop()
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
- self.tear_down()
- def _waitForTest(self, timeout=60):
+ def _wait_for_test(self, timeout=60):
""" Push the ioloop along to wait for our test to complete. """
self._waiting = self.ioloop.add_timeout(time.time() + timeout,
self._timeout)
def _wait():
- if self._timedOut:
+ """Wrapped wait function. Called on timeout."""
+ if self._timed_out:
self.fail('test timed out')
self._done()
- if self._doneWaiting:
+ if self._done_waiting:
self.ioloop.stop()
return
# we can use add_callback here but this uses less cpu when testing
@@ -134,15 +134,18 @@ class BaseTestCase(TrialTestCase):
self.ioloop.start()
def _done(self):
+ """Callback used for cleaning up deferred test methods."""
if self._waiting:
try:
self.ioloop.remove_timeout(self._waiting)
- except Exception:
+ except Exception: # pylint: disable-msg=W0703
+ # TODO(jaypipes): This produces a pylint warning. Should
+ # we really be catching Exception and then passing here?
pass
self._waiting = None
- self._doneWaiting = True
+ self._done_waiting = True
- def _maybeInlineCallbacks(self, f):
+ def _maybe_inline_callbacks(self, func):
""" If we're doing async calls in our tests, wait on them.
This is probably the most complicated hunk of code we have so far.
@@ -165,7 +168,7 @@ class BaseTestCase(TrialTestCase):
d.addCallback(_describe)
d.addCallback(_checkDescribe)
d.addCallback(lambda x: self._done())
- self._waitForTest()
+ self._wait_for_test()
Example (inline callbacks! yay!):
@@ -179,16 +182,17 @@ class BaseTestCase(TrialTestCase):
# TODO(termie): this can be a wrapper function instead and
# and we can make a metaclass so that we don't
# have to copy all that "run" code below.
- g = f()
+ g = func()
if not hasattr(g, 'send'):
self._done()
return defer.succeed(g)
- inlined = defer.inlineCallbacks(f)
+ inlined = defer.inlineCallbacks(func)
d = inlined()
return d
- def _catchExceptions(self, result, failure):
+ def _catch_exceptions(self, result, failure):
+ """Catches all exceptions and handles keyboard interrupts."""
exc = (failure.type, failure.value, failure.getTracebackObject())
if isinstance(failure.value, self.failureException):
result.addFailure(self, exc)
@@ -200,44 +204,46 @@ class BaseTestCase(TrialTestCase):
self._done()
def _timeout(self):
+ """Helper method which trips the timeouts"""
self._waiting = False
- self._timedOut = True
+ self._timed_out = True
def run(self, result=None):
- if result is None: result = self.defaultTestResult()
+ """Runs the test case"""
result.startTest(self)
- testMethod = getattr(self, self._testMethodName)
+ test_method = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
- result.addError(self, self._exc_info())
+ result.addError(self, sys.exc_info())
return
ok = False
try:
- d = self._maybeInlineCallbacks(testMethod)
- d.addErrback(lambda x: self._catchExceptions(result, x))
+ d = self._maybe_inline_callbacks(test_method)
+ d.addErrback(lambda x: self._catch_exceptions(result, x))
d.addBoth(lambda x: self._done() and x)
- self._waitForTest()
+ self._wait_for_test()
ok = True
except self.failureException:
- result.addFailure(self, self._exc_info())
+ result.addFailure(self, sys.exc_info())
except KeyboardInterrupt:
raise
except:
- result.addError(self, self._exc_info())
+ result.addError(self, sys.exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
- result.addError(self, self._exc_info())
+ result.addError(self, sys.exc_info())
ok = False
- if ok: result.addSuccess(self)
+ if ok:
+ result.addSuccess(self)
finally:
result.stopTest(self)
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 2167c2385..0b404bfdc 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -135,10 +135,18 @@ class AuthTestCase(test.BaseTestCase):
self.manager.add_to_project('test2', 'testproj')
self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
- def test_208_can_remove_user_from_project(self):
+ def test_207_can_remove_user_from_project(self):
self.manager.remove_from_project('test2', 'testproj')
self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
+ def test_208_can_remove_add_user_with_role(self):
+ self.manager.add_to_project('test2', 'testproj')
+ self.manager.add_role('test2', 'developer', 'testproj')
+ self.manager.remove_from_project('test2', 'testproj')
+ self.assertFalse(self.manager.has_role('test2', 'developer', 'testproj'))
+ self.manager.add_to_project('test2', 'testproj')
+ self.manager.remove_from_project('test2', 'testproj')
+
def test_209_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
@@ -171,7 +179,21 @@ class AuthTestCase(test.BaseTestCase):
project.add_role('test1', 'sysadmin')
self.assertTrue(project.has_role('test1', 'sysadmin'))
- def test_211_can_remove_project_role(self):
+ def test_211_can_list_project_roles(self):
+ project = self.manager.get_project('testproj')
+ user = self.manager.get_user('test1')
+ self.manager.add_role(user, 'netadmin', project)
+ roles = self.manager.get_user_roles(user)
+ self.assertTrue('sysadmin' in roles)
+ self.assertFalse('netadmin' in roles)
+ project_roles = self.manager.get_user_roles(user, project)
+ self.assertTrue('sysadmin' in project_roles)
+ self.assertTrue('netadmin' in project_roles)
+ # has role should be false because global role is missing
+ self.assertFalse(self.manager.has_role(user, 'netadmin', project))
+
+
+ def test_212_can_remove_project_role(self):
project = self.manager.get_project('testproj')
self.assertTrue(project.has_role('test1', 'sysadmin'))
project.remove_role('test1', 'sysadmin')
@@ -179,20 +201,6 @@ class AuthTestCase(test.BaseTestCase):
self.manager.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
- def test_212_vpn_ip_and_port_looks_valid(self):
- project = self.manager.get_project('testproj')
- self.assert_(project.vpn_ip)
- self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
- self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
-
- def test_213_too_many_vpns(self):
- vpns = []
- for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
- vpns.append(manager.Vpn.create("vpnuser%s" % i))
- self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
- for vpn in vpns:
- vpn.destroy()
-
def test_214_can_retrieve_project_by_user(self):
project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2'])
self.assert_(len(self.manager.get_projects()) > 1)
diff --git a/nova/endpoint/wsgi.py b/nova/tests/declare_flags.py
index b7bb588c3..51a55ec72 100644
--- a/nova/endpoint/wsgi.py
+++ b/nova/tests/declare_flags.py
@@ -16,25 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-'''
-Utility methods for working with WSGI servers
-'''
+from nova import flags
-class Util(object):
-
- @staticmethod
- def route(reqstr, controllers):
- if len(reqstr) == 0:
- return Util.select_root_controller(controllers), []
- parts = [x for x in reqstr.split("/") if len(x) > 0]
- if len(parts) == 0:
- return Util.select_root_controller(controllers), []
- return controllers[parts[0]], parts[1:]
-
- @staticmethod
- def select_root_controller(controllers):
- if '' in controllers:
- return controllers['']
- else:
- return None
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('answer', 42, 'test flag')
diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py
new file mode 100644
index 000000000..d49d5dc43
--- /dev/null
+++ b/nova/tests/flags_unittest.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova import flags
+from nova import test
+
+
+class FlagsTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(FlagsTestCase, self).setUp()
+ self.FLAGS = flags.FlagValues()
+ self.global_FLAGS = flags.FLAGS
+
+ def test_define(self):
+ self.assert_('string' not in self.FLAGS)
+ self.assert_('int' not in self.FLAGS)
+ self.assert_('false' not in self.FLAGS)
+ self.assert_('true' not in self.FLAGS)
+
+ flags.DEFINE_string('string', 'default', 'desc', flag_values=self.FLAGS)
+ flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS)
+ flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS)
+ flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS)
+
+ self.assert_(self.FLAGS['string'])
+ self.assert_(self.FLAGS['int'])
+ self.assert_(self.FLAGS['false'])
+ self.assert_(self.FLAGS['true'])
+ self.assertEqual(self.FLAGS.string, 'default')
+ self.assertEqual(self.FLAGS.int, 1)
+ self.assertEqual(self.FLAGS.false, False)
+ self.assertEqual(self.FLAGS.true, True)
+
+ argv = ['flags_test',
+ '--string', 'foo',
+ '--int', '2',
+ '--false',
+ '--notrue']
+
+ self.FLAGS(argv)
+ self.assertEqual(self.FLAGS.string, 'foo')
+ self.assertEqual(self.FLAGS.int, 2)
+ self.assertEqual(self.FLAGS.false, True)
+ self.assertEqual(self.FLAGS.true, False)
+
+ def test_declare(self):
+ self.assert_('answer' not in self.global_FLAGS)
+ flags.DECLARE('answer', 'nova.tests.declare_flags')
+ self.assert_('answer' in self.global_FLAGS)
+ self.assertEqual(self.global_FLAGS.answer, 42)
+
+ # Make sure we don't overwrite anything
+ self.global_FLAGS.answer = 256
+ self.assertEqual(self.global_FLAGS.answer, 256)
+ flags.DECLARE('answer', 'nova.tests.declare_flags')
+ self.assertEqual(self.global_FLAGS.answer, 256)
+
+ def test_runtime_and_unknown_flags(self):
+ self.assert_('runtime_answer' not in self.global_FLAGS)
+
+ argv = ['flags_test', '--runtime_answer=60', 'extra_arg']
+ args = self.global_FLAGS(argv)
+ self.assertEqual(len(args), 2)
+ self.assertEqual(args[1], 'extra_arg')
+
+ self.assert_('runtime_answer' not in self.global_FLAGS)
+
+ import nova.tests.runtime_flags
+
+ self.assert_('runtime_answer' in self.global_FLAGS)
+ self.assertEqual(self.global_FLAGS.runtime_answer, 60)
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
index 6825cfe2a..dc2441c24 100644
--- a/nova/tests/model_unittest.py
+++ b/nova/tests/model_unittest.py
@@ -19,9 +19,7 @@
from datetime import datetime, timedelta
import logging
import time
-from twisted.internet import defer
-from nova import exception
from nova import flags
from nova import test
from nova import utils
@@ -49,9 +47,9 @@ class ModelTestCase(test.TrialTestCase):
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type'] = 'm1.tiny'
- inst['node_name'] = FLAGS.node_name
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
+ inst['private_dns_name'] = '10.0.0.1'
inst.save()
return inst
@@ -71,118 +69,126 @@ class ModelTestCase(test.TrialTestCase):
session_token.save()
return session_token
- @defer.inlineCallbacks
def test_create_instance(self):
"""store with create_instace, then test that a load finds it"""
- instance = yield self.create_instance()
- old = yield model.Instance(instance.identifier)
+ instance = self.create_instance()
+ old = model.Instance(instance.identifier)
self.assertFalse(old.is_new_record())
- @defer.inlineCallbacks
def test_delete_instance(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_instance()
- yield instance.destroy()
- newinst = yield model.Instance('i-test')
+ instance = self.create_instance()
+ instance.destroy()
+ newinst = model.Instance('i-test')
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_instance_added_to_set(self):
- """create, then check that it is listed for the project"""
- instance = yield self.create_instance()
+ """create, then check that it is listed in global set"""
+ instance = self.create_instance()
found = False
for x in model.InstanceDirectory().all:
if x.identifier == 'i-test':
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_instance_associates_project(self):
"""create, then check that it is listed for the project"""
- instance = yield self.create_instance()
+ instance = self.create_instance()
found = False
for x in model.InstanceDirectory().by_project(instance.project):
if x.identifier == 'i-test':
found = True
self.assert_(found)
- @defer.inlineCallbacks
+ def test_instance_associates_ip(self):
+ """create, then check that it is listed for the ip"""
+ instance = self.create_instance()
+ found = False
+ x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
+ self.assertEqual(x.identifier, 'i-test')
+
+ def test_instance_associates_node(self):
+ """create, then check that it is listed for the node_name"""
+ instance = self.create_instance()
+ found = False
+ for x in model.InstanceDirectory().by_node(FLAGS.node_name):
+ if x.identifier == 'i-test':
+ found = True
+ self.assertFalse(found)
+ instance['node_name'] = 'test_node'
+ instance.save()
+ for x in model.InstanceDirectory().by_node('test_node'):
+ if x.identifier == 'i-test':
+ found = True
+ self.assert_(found)
+
+
def test_host_class_finds_hosts(self):
- host = yield self.create_host()
+ host = self.create_host()
self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
- @defer.inlineCallbacks
def test_host_class_doesnt_find_missing_hosts(self):
- rv = yield model.Host.lookup('woahnelly')
+ rv = model.Host.lookup('woahnelly')
self.assertEqual(None, rv)
- @defer.inlineCallbacks
def test_create_host(self):
"""store with create_host, then test that a load finds it"""
- host = yield self.create_host()
- old = yield model.Host(host.identifier)
+ host = self.create_host()
+ old = model.Host(host.identifier)
self.assertFalse(old.is_new_record())
- @defer.inlineCallbacks
def test_delete_host(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_host()
- yield instance.destroy()
- newinst = yield model.Host('testhost')
+ instance = self.create_host()
+ instance.destroy()
+ newinst = model.Host('testhost')
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_host_added_to_set(self):
"""create, then check that it is included in list"""
- instance = yield self.create_host()
+ instance = self.create_host()
found = False
for x in model.Host.all():
if x.identifier == 'testhost':
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_create_daemon_two_args(self):
"""create a daemon with two arguments"""
- d = yield self.create_daemon()
+ d = self.create_daemon()
d = model.Daemon('testhost', 'nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_create_daemon_single_arg(self):
"""Create a daemon using the combined host:bin format"""
- d = yield model.Daemon("testhost:nova-testdaemon")
+ d = model.Daemon("testhost:nova-testdaemon")
d.save()
d = model.Daemon('testhost:nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_equality_of_daemon_single_and_double_args(self):
"""Create a daemon using the combined host:bin arg, find with 2"""
- d = yield model.Daemon("testhost:nova-testdaemon")
+ d = model.Daemon("testhost:nova-testdaemon")
d.save()
d = model.Daemon('testhost', 'nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_equality_daemon_of_double_and_single_args(self):
"""Create a daemon using the combined host:bin arg, find with 2"""
- d = yield self.create_daemon()
+ d = self.create_daemon()
d = model.Daemon('testhost:nova-testdaemon')
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_delete_daemon(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_daemon()
- yield instance.destroy()
- newinst = yield model.Daemon('testhost', 'nova-testdaemon')
+ instance = self.create_daemon()
+ instance.destroy()
+ newinst = model.Daemon('testhost', 'nova-testdaemon')
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_daemon_heartbeat(self):
"""Create a daemon, sleep, heartbeat, check for update"""
- d = yield self.create_daemon()
+ d = self.create_daemon()
ts = d['updated_at']
time.sleep(2)
d.heartbeat()
@@ -190,70 +196,62 @@ class ModelTestCase(test.TrialTestCase):
ts2 = d2['updated_at']
self.assert_(ts2 > ts)
- @defer.inlineCallbacks
def test_daemon_added_to_set(self):
"""create, then check that it is included in list"""
- instance = yield self.create_daemon()
+ instance = self.create_daemon()
found = False
for x in model.Daemon.all():
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_daemon_associates_host(self):
"""create, then check that it is listed for the host"""
- instance = yield self.create_daemon()
+ instance = self.create_daemon()
found = False
for x in model.Daemon.by_host('testhost'):
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assertTrue(found)
- @defer.inlineCallbacks
def test_create_session_token(self):
"""create"""
- d = yield self.create_session_token()
+ d = self.create_session_token()
d = model.SessionToken(d.token)
self.assertFalse(d.is_new_record())
- @defer.inlineCallbacks
def test_delete_session_token(self):
"""create, then destroy, then make sure loads a new record"""
- instance = yield self.create_session_token()
- yield instance.destroy()
- newinst = yield model.SessionToken(instance.token)
+ instance = self.create_session_token()
+ instance.destroy()
+ newinst = model.SessionToken(instance.token)
self.assertTrue(newinst.is_new_record())
- @defer.inlineCallbacks
def test_session_token_added_to_set(self):
"""create, then check that it is included in list"""
- instance = yield self.create_session_token()
+ instance = self.create_session_token()
found = False
for x in model.SessionToken.all():
if x.identifier == instance.token:
found = True
self.assert_(found)
- @defer.inlineCallbacks
def test_session_token_associates_user(self):
"""create, then check that it is listed for the user"""
- instance = yield self.create_session_token()
+ instance = self.create_session_token()
found = False
for x in model.SessionToken.associated_to('user', 'testuser'):
if x.identifier == instance.identifier:
found = True
self.assertTrue(found)
- @defer.inlineCallbacks
def test_session_token_generation(self):
- instance = yield model.SessionToken.generate('username', 'TokenType')
+ instance = model.SessionToken.generate('username', 'TokenType')
self.assertFalse(instance.is_new_record())
- @defer.inlineCallbacks
def test_find_generated_session_token(self):
- instance = yield model.SessionToken.generate('username', 'TokenType')
- found = yield model.SessionToken.lookup(instance.identifier)
+ instance = model.SessionToken.generate('username', 'TokenType')
+ found = model.SessionToken.lookup(instance.identifier)
self.assert_(found)
def test_update_session_token_expiry(self):
@@ -264,34 +262,29 @@ class ModelTestCase(test.TrialTestCase):
expiry = utils.parse_isotime(instance['expiry'])
self.assert_(expiry > datetime.utcnow())
- @defer.inlineCallbacks
def test_session_token_lookup_when_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
instance.save()
inst = model.SessionToken.lookup(instance.identifier)
self.assertFalse(inst)
- @defer.inlineCallbacks
def test_session_token_lookup_when_not_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
inst = model.SessionToken.lookup(instance.identifier)
self.assert_(inst)
- @defer.inlineCallbacks
def test_session_token_is_expired_when_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
self.assert_(instance.is_expired())
- @defer.inlineCallbacks
def test_session_token_is_expired_when_not_expired(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
self.assertFalse(instance.is_expired())
- @defer.inlineCallbacks
def test_session_token_ttl(self):
- instance = yield model.SessionToken.generate("testuser")
+ instance = model.SessionToken.generate("testuser")
now = datetime.utcnow()
delta = timedelta(hours=1)
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index f24eefb0d..879ee02a4 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -24,8 +24,10 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.compute import network
-from nova.compute.exception import NoMoreAddresses
+from nova.network import model
+from nova.network import service
+from nova.network import vpn
+from nova.network.exception import NoMoreAddresses
FLAGS = flags.FLAGS
@@ -52,7 +54,8 @@ class NetworkTestCase(test.TrialTestCase):
self.projects.append(self.manager.create_project(name,
'netuser',
name))
- self.network = network.PublicNetworkController()
+ self.network = model.PublicNetworkController()
+ self.service = service.VlanNetworkService()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
@@ -66,16 +69,17 @@ class NetworkTestCase(test.TrialTestCase):
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
- def test_allocate_deallocate_ip(self):
- address = network.allocate_ip(
- self.user.id, self.projects[0].id, utils.generate_mac())
+ def test_allocate_deallocate_fixed_ip(self):
+ result = yield self.service.allocate_fixed_ip(
+ self.user.id, self.projects[0].id)
+ address = result['private_dns_name']
+ mac = result['mac_address']
logging.debug("Was allocated %s" % (address))
- net = network.get_project_network(self.projects[0].id, "default")
+ net = model.get_project_network(self.projects[0].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
- mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
- rv = network.deallocate_ip(address)
+ rv = self.service.deallocate_fixed_ip(address)
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, self.projects[0].id))
@@ -84,15 +88,18 @@ class NetworkTestCase(test.TrialTestCase):
self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
- mac = utils.generate_mac()
- secondmac = utils.generate_mac()
hostname = "test-host"
- address = network.allocate_ip(
- self.user.id, self.projects[0].id, mac)
- secondaddress = network.allocate_ip(
- self.user, self.projects[1].id, secondmac)
- net = network.get_project_network(self.projects[0].id, "default")
- secondnet = network.get_project_network(self.projects[1].id, "default")
+ result = yield self.service.allocate_fixed_ip(
+ self.user.id, self.projects[0].id)
+ mac = result['mac_address']
+ address = result['private_dns_name']
+ result = yield self.service.allocate_fixed_ip(
+ self.user, self.projects[1].id)
+ secondmac = result['mac_address']
+ secondaddress = result['private_dns_name']
+
+ net = model.get_project_network(self.projects[0].id, "default")
+ secondnet = model.get_project_network(self.projects[1].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
@@ -103,46 +110,64 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.issue_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
- rv = network.deallocate_ip(address)
+ rv = self.service.deallocate_fixed_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
- rv = network.deallocate_ip(secondaddress)
+ rv = self.service.deallocate_fixed_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
def test_subnet_edge(self):
- secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
- utils.generate_mac())
+ result = yield self.service.allocate_fixed_ip(self.user.id,
+ self.projects[0].id)
+ firstaddress = result['private_dns_name']
hostname = "toomany-hosts"
for i in range(1,5):
project_id = self.projects[i].id
- mac = utils.generate_mac()
- mac2 = utils.generate_mac()
- mac3 = utils.generate_mac()
- address = network.allocate_ip(
- self.user, project_id, mac)
- address2 = network.allocate_ip(
- self.user, project_id, mac2)
- address3 = network.allocate_ip(
- self.user, project_id, mac3)
+ result = yield self.service.allocate_fixed_ip(
+ self.user, project_id)
+ mac = result['mac_address']
+ address = result['private_dns_name']
+ result = yield self.service.allocate_fixed_ip(
+ self.user, project_id)
+ mac2 = result['mac_address']
+ address2 = result['private_dns_name']
+ result = yield self.service.allocate_fixed_ip(
+ self.user, project_id)
+ mac3 = result['mac_address']
+ address3 = result['private_dns_name']
self.assertEqual(False, is_in_project(address, self.projects[0].id))
self.assertEqual(False, is_in_project(address2, self.projects[0].id))
self.assertEqual(False, is_in_project(address3, self.projects[0].id))
- rv = network.deallocate_ip(address)
- rv = network.deallocate_ip(address2)
- rv = network.deallocate_ip(address3)
- net = network.get_project_network(project_id, "default")
+ rv = self.service.deallocate_fixed_ip(address)
+ rv = self.service.deallocate_fixed_ip(address2)
+ rv = self.service.deallocate_fixed_ip(address3)
+ net = model.get_project_network(project_id, "default")
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
- net = network.get_project_network(self.projects[0].id, "default")
- rv = network.deallocate_ip(secondaddress)
- self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
+ net = model.get_project_network(self.projects[0].id, "default")
+ rv = self.service.deallocate_fixed_ip(firstaddress)
+ self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
+
+ def test_212_vpn_ip_and_port_looks_valid(self):
+ vpn.NetworkData.create(self.projects[0].id)
+ self.assert_(self.projects[0].vpn_ip)
+ self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
+ self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
+
+ def test_too_many_vpns(self):
+ vpns = []
+ for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
+ vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
+ self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
+ for network_datum in vpns:
+ network_datum.destroy()
def test_release_before_deallocate(self):
pass
@@ -169,7 +194,7 @@ class NetworkTestCase(test.TrialTestCase):
NUM_RESERVED_VPN_IPS)
usable addresses
"""
- net = network.get_project_network(self.projects[0].id, "default")
+ net = model.get_project_network(self.projects[0].id, "default")
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
@@ -183,22 +208,23 @@ class NetworkTestCase(test.TrialTestCase):
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
- macs[i] = utils.generate_mac()
- addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
+ result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
+ macs[i] = result['mac_address']
+ addresses[i] = result['private_dns_name']
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
- self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
+ self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses)
for i in range(0, (num_available_ips - 1)):
- rv = network.deallocate_ip(addresses[i])
+ rv = self.service.deallocate_fixed_ip(addresses[i])
self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
def is_in_project(address, project_id):
- return address in network.get_project_network(project_id).list_addresses()
+ return address in model.get_project_network(project_id).list_addresses()
def _get_project_addresses(project_id):
project_addresses = []
- for addr in network.get_project_network(project_id).list_addresses():
+ for addr in model.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index dd00377e7..dece4b5d5 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -16,6 +16,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Unittets for S3 objectstore clone.
+"""
+
import boto
import glob
import hashlib
@@ -24,76 +28,69 @@ import os
import shutil
import tempfile
+from boto.s3.connection import S3Connection, OrdinaryCallingFormat
+from twisted.internet import reactor, threads, defer
+from twisted.web import http, server
+
from nova import flags
from nova import objectstore
-from nova.objectstore import bucket # for buckets_path flag
-from nova.objectstore import image # for images_path flag
from nova import test
from nova.auth import manager
+from nova.exception import NotEmpty, NotFound
+from nova.objectstore import image
from nova.objectstore.handler import S3
-from nova.exception import NotEmpty, NotFound, NotAuthorized
-from boto.s3.connection import S3Connection, OrdinaryCallingFormat
-from twisted.internet import reactor, threads, defer
-from twisted.web import http, server
FLAGS = flags.FLAGS
-oss_tempdir = tempfile.mkdtemp(prefix='test_oss-')
-
+# Create a unique temporary directory. We don't delete after test to
+# allow checking the contents after running tests. Users and/or tools
+# running the tests need to remove the tests directories.
+OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-# delete tempdirs from previous runs (we don't delete after test to allow
-# checking the contents after running tests)
-# TODO: This fails on the test box with a permission denied error
-# Also, doing these things in a global tempdir means that different runs of
-# the test suite on the same box could clobber each other.
-#for path in glob.glob(os.path.abspath(os.path.join(oss_tempdir, '../test_oss-*'))):
-# if path != oss_tempdir:
-# shutil.rmtree(path)
+# Create bucket/images path
+os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
+os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
-# create bucket/images path
-os.makedirs(os.path.join(oss_tempdir, 'images'))
-os.makedirs(os.path.join(oss_tempdir, 'buckets'))
-
class ObjectStoreTestCase(test.BaseTestCase):
- def setUp(self):
+ """Test objectstore API directly."""
+
+ def setUp(self): # pylint: disable-msg=C0103
+ """Setup users and projects."""
super(ObjectStoreTestCase, self).setUp()
- self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'),
- images_path=os.path.join(oss_tempdir, 'images'),
+ self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+ images_path=os.path.join(OSS_TEMPDIR, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
logging.getLogger().setLevel(logging.DEBUG)
- self.um = manager.AuthManager()
- try:
- self.um.create_user('user1')
- except: pass
- try:
- self.um.create_user('user2')
- except: pass
- try:
- self.um.create_user('admin_user', admin=True)
- except: pass
- try:
- self.um.create_project('proj1', 'user1', 'a proj', ['user1'])
- except: pass
- try:
- self.um.create_project('proj2', 'user2', 'a proj', ['user2'])
- except: pass
- class Context(object): pass
+ self.auth_manager = manager.AuthManager()
+ self.auth_manager.create_user('user1')
+ self.auth_manager.create_user('user2')
+ self.auth_manager.create_user('admin_user', admin=True)
+ self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
+ self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
+
+ class Context(object):
+ """Dummy context for running tests."""
+ user = None
+ project = None
+
self.context = Context()
- def tearDown(self):
- self.um.delete_project('proj1')
- self.um.delete_project('proj2')
- self.um.delete_user('user1')
- self.um.delete_user('user2')
- self.um.delete_user('admin_user')
+ def tearDown(self): # pylint: disable-msg=C0103
+ """Tear down users and projects."""
+ self.auth_manager.delete_project('proj1')
+ self.auth_manager.delete_project('proj2')
+ self.auth_manager.delete_user('user1')
+ self.auth_manager.delete_user('user2')
+ self.auth_manager.delete_user('admin_user')
super(ObjectStoreTestCase, self).tearDown()
def test_buckets(self):
- self.context.user = self.um.get_user('user1')
- self.context.project = self.um.get_project('proj1')
+ """Test the bucket API."""
+ self.context.user = self.auth_manager.get_user('user1')
+ self.context.project = self.auth_manager.get_project('proj1')
objectstore.bucket.Bucket.create('new_bucket', self.context)
bucket = objectstore.bucket.Bucket('new_bucket')
@@ -101,12 +98,12 @@ class ObjectStoreTestCase(test.BaseTestCase):
self.assert_(bucket.is_authorized(self.context))
# another user is not authorized
- self.context.user = self.um.get_user('user2')
- self.context.project = self.um.get_project('proj2')
+ self.context.user = self.auth_manager.get_user('user2')
+ self.context.project = self.auth_manager.get_project('proj2')
self.assertFalse(bucket.is_authorized(self.context))
# admin is authorized to use bucket
- self.context.user = self.um.get_user('admin_user')
+ self.context.user = self.auth_manager.get_user('admin_user')
self.context.project = None
self.assertTrue(bucket.is_authorized(self.context))
@@ -136,8 +133,9 @@ class ObjectStoreTestCase(test.BaseTestCase):
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
- self.context.user = self.um.get_user('user1')
- self.context.project = self.um.get_project('proj1')
+ "Test the image API."
+ self.context.user = self.auth_manager.get_user('user1')
+ self.context.project = self.auth_manager.get_project('proj1')
# create a bucket for our bundle
objectstore.bucket.Bucket.create('image_bucket', self.context)
@@ -149,10 +147,12 @@ class ObjectStoreTestCase(test.BaseTestCase):
bucket[os.path.basename(path)] = open(path, 'rb').read()
# register an image
- objectstore.image.Image.register_aws_image('i-testing', 'image_bucket/1mb.manifest.xml', self.context)
+ image.Image.register_aws_image('i-testing',
+ 'image_bucket/1mb.manifest.xml',
+ self.context)
# verify image
- my_img = objectstore.image.Image('i-testing')
+ my_img = image.Image('i-testing')
result_image_file = os.path.join(my_img.path, 'image')
self.assertEqual(os.stat(result_image_file).st_size, 1048576)
@@ -160,38 +160,48 @@ class ObjectStoreTestCase(test.BaseTestCase):
self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
# verify image permissions
- self.context.user = self.um.get_user('user2')
- self.context.project = self.um.get_project('proj2')
+ self.context.user = self.auth_manager.get_user('user2')
+ self.context.project = self.auth_manager.get_project('proj2')
self.assertFalse(my_img.is_authorized(self.context))
class TestHTTPChannel(http.HTTPChannel):
- # Otherwise we end up with an unclean reactor
- def checkPersistence(self, _, __):
+ """Dummy site required for twisted.web"""
+
+ def checkPersistence(self, _, __): # pylint: disable-msg=C0103
+ """Otherwise we end up with an unclean reactor."""
return False
class TestSite(server.Site):
+ """Dummy site required for twisted.web"""
protocol = TestHTTPChannel
class S3APITestCase(test.TrialTestCase):
- def setUp(self):
+ """Test objectstore through S3 API."""
+
+ def setUp(self): # pylint: disable-msg=C0103
+ """Setup users, projects, and start a test server."""
super(S3APITestCase, self).setUp()
- FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
- FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets')
+ FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver',
+ FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
- self.um = manager.AuthManager()
- self.admin_user = self.um.create_user('admin', admin=True)
- self.admin_project = self.um.create_project('admin', self.admin_user)
+ self.auth_manager = manager.AuthManager()
+ self.admin_user = self.auth_manager.create_user('admin', admin=True)
+ self.admin_project = self.auth_manager.create_project('admin',
+ self.admin_user)
shutil.rmtree(FLAGS.buckets_path)
os.mkdir(FLAGS.buckets_path)
root = S3()
self.site = TestSite(root)
- self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1')
+ # pylint: disable-msg=E1101
+ self.listening_port = reactor.listenTCP(0, self.site,
+ interface='127.0.0.1')
+ # pylint: enable-msg=E1101
self.tcp_port = self.listening_port.getHost().port
@@ -205,65 +215,90 @@ class S3APITestCase(test.TrialTestCase):
is_secure=False,
calling_format=OrdinaryCallingFormat())
- # Don't attempt to reuse connections
def get_http_connection(host, is_secure):
+ """Get a new S3 connection, don't attempt to reuse connections."""
return self.conn.new_http_connection(host, is_secure)
+
self.conn.get_http_connection = get_http_connection
- def _ensure_empty_list(self, l):
- self.assertEquals(len(l), 0, "List was not empty")
+ def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
+ self.assertEquals(len(buckets), 0, "Bucket list was not empty")
return True
- def _ensure_only_bucket(self, l, name):
- self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
- self.assertEquals(l[0].name, name, "Wrong name")
+ def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
+ self.assertEquals(len(buckets), 1,
+ "Bucket list didn't have exactly one element in it")
+ self.assertEquals(buckets[0].name, name, "Wrong name")
+ return True
def test_000_list_buckets(self):
- d = threads.deferToThread(self.conn.get_all_buckets)
- d.addCallback(self._ensure_empty_list)
- return d
+ """Make sure we are starting with no buckets."""
+ deferred = threads.deferToThread(self.conn.get_all_buckets)
+ deferred.addCallback(self._ensure_no_buckets)
+ return deferred
def test_001_create_and_delete_bucket(self):
+ """Test bucket creation and deletion."""
bucket_name = 'testbucket'
- d = threads.deferToThread(self.conn.create_bucket, bucket_name)
- d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
+ deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
+ deferred.addCallback(lambda _:
+ threads.deferToThread(self.conn.get_all_buckets))
- def ensure_only_bucket(l, name):
- self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
- self.assertEquals(l[0].name, name, "Wrong name")
- d.addCallback(ensure_only_bucket, bucket_name)
+ deferred.addCallback(self._ensure_one_bucket, bucket_name)
- d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name))
- d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
- d.addCallback(self._ensure_empty_list)
- return d
+ deferred.addCallback(lambda _:
+ threads.deferToThread(self.conn.delete_bucket,
+ bucket_name))
+ deferred.addCallback(lambda _:
+ threads.deferToThread(self.conn.get_all_buckets))
+ deferred.addCallback(self._ensure_no_buckets)
+ return deferred
def test_002_create_bucket_and_key_and_delete_key_again(self):
+ """Test key operations on buckets."""
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = 'somekey'
- d = threads.deferToThread(self.conn.create_bucket, bucket_name)
- d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name))
- d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents))
+ deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
+ deferred.addCallback(lambda b:
+ threads.deferToThread(b.new_key, key_name))
+ deferred.addCallback(lambda k:
+ threads.deferToThread(k.set_contents_from_string,
+ key_contents))
+
def ensure_key_contents(bucket_name, key_name, contents):
+ """Verify contents for a key in the given bucket."""
bucket = self.conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
- self.assertEquals(key.get_contents_as_string(), contents, "Bad contents")
- d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents))
+ self.assertEquals(key.get_contents_as_string(), contents,
+ "Bad contents")
+
+ deferred.addCallback(lambda _:
+ threads.deferToThread(ensure_key_contents,
+ bucket_name, key_name,
+ key_contents))
+
def delete_key(bucket_name, key_name):
+ """Delete a key for the given bucket."""
bucket = self.conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.delete()
- d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name))
- d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name))
- d.addCallback(lambda b:threads.deferToThread(b.get_all_keys))
- d.addCallback(self._ensure_empty_list)
- return d
-
- def tearDown(self):
- self.um.delete_user('admin')
- self.um.delete_project('admin')
- return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)])
- super(S3APITestCase, self).tearDown()
+
+ deferred.addCallback(lambda _:
+ threads.deferToThread(delete_key, bucket_name,
+ key_name))
+ deferred.addCallback(lambda _:
+ threads.deferToThread(self.conn.get_bucket,
+ bucket_name))
+ deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
+ deferred.addCallback(self._ensure_no_buckets)
+ return deferred
+
+ def tearDown(self): # pylint: disable-msg=C0103
+ """Tear down auth and test server."""
+ self.auth_manager.delete_user('admin')
+ self.auth_manager.delete_project('admin')
+ stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
+ return defer.DeferredList([stop_listening])
diff --git a/exercise_rsapi.py b/nova/tests/runtime_flags.py
index 20589b9cb..1eb501406 100644
--- a/exercise_rsapi.py
+++ b/nova/tests/runtime_flags.py
@@ -16,36 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import cloudservers
+from nova import flags
-class IdFake:
- def __init__(self, id):
- self.id = id
+FLAGS = flags.FLAGS
-# to get your access key:
-# from nova.auth import users
-# users.UserManger.instance().get_users()[0].access
-rscloud = cloudservers.CloudServers(
- 'admin',
- '6cca875e-5ab3-4c60-9852-abf5c5c60cc6'
- )
-rscloud.client.AUTH_URL = 'http://localhost:8773/v1.0'
-
-
-rv = rscloud.servers.list()
-print "SERVERS: %s" % rv
-
-if len(rv) == 0:
- server = rscloud.servers.create(
- "test-server",
- IdFake("ami-tiny"),
- IdFake("m1.tiny")
- )
- print "LAUNCH: %s" % server
-else:
- server = rv[0]
- print "Server to kill: %s" % server
-
-raw_input("press enter key to kill the server")
-
-server.delete()
+flags.DEFINE_integer('runtime_answer', 54, 'test flag')
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index b536ac383..2a07afe69 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -17,6 +17,10 @@
# under the License.
import logging
+import shutil
+import tempfile
+
+from twisted.internet import defer
from nova import compute
from nova import exception
@@ -34,48 +38,60 @@ class VolumeTestCase(test.TrialTestCase):
super(VolumeTestCase, self).setUp()
self.compute = compute.service.ComputeService()
self.volume = None
+ self.tempdir = tempfile.mkdtemp()
self.flags(connection_type='fake',
- fake_storage=True)
+ fake_storage=True,
+ aoe_export_dir=self.tempdir)
self.volume = volume_service.VolumeService()
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+ @defer.inlineCallbacks
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
project_id = 'fake'
- volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+ volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
volume_service.get_volume(volume_id)['volume_id'])
rv = self.volume.delete_volume(volume_id)
- self.assertRaises(exception.Error,
- volume_service.get_volume,
- volume_id)
+ self.assertRaises(exception.Error, volume_service.get_volume, volume_id)
+ @defer.inlineCallbacks
def test_too_big_volume(self):
vol_size = '1001'
user_id = 'fake'
project_id = 'fake'
- self.assertRaises(TypeError,
- self.volume.create_volume,
- vol_size, user_id, project_id)
+ try:
+ yield self.volume.create_volume(vol_size, user_id, project_id)
+ self.fail("Should have thrown TypeError")
+ except TypeError:
+ pass
+ @defer.inlineCallbacks
def test_too_many_volumes(self):
vol_size = '1'
user_id = 'fake'
project_id = 'fake'
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
- total_slots = FLAGS.slots_per_shelf * num_shelves
+ total_slots = FLAGS.blades_per_shelf * num_shelves
vols = []
+ from nova import datastore
+ redis = datastore.Redis.instance()
for i in xrange(total_slots):
- vid = self.volume.create_volume(vol_size, user_id, project_id)
+ vid = yield self.volume.create_volume(vol_size, user_id, project_id)
vols.append(vid)
- self.assertRaises(volume_service.NoMoreVolumes,
- self.volume.create_volume,
- vol_size, user_id, project_id)
+ self.assertFailure(self.volume.create_volume(vol_size,
+ user_id,
+ project_id),
+ volume_service.NoMoreBlades)
for id in vols:
- self.volume.delete_volume(id)
+ yield self.volume.delete_volume(id)
+ @defer.inlineCallbacks
def test_run_attach_detach_volume(self):
# Create one volume and one compute to test with
instance_id = "storage-test"
@@ -83,23 +99,27 @@ class VolumeTestCase(test.TrialTestCase):
user_id = "fake"
project_id = 'fake'
mountpoint = "/dev/sdf"
- volume_id = self.volume.create_volume(vol_size, user_id, project_id)
-
+ volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
volume_obj = volume_service.get_volume(volume_id)
volume_obj.start_attach(instance_id, mountpoint)
- rv = yield self.compute.attach_volume(volume_id,
- instance_id,
- mountpoint)
+ if FLAGS.fake_tests:
+ volume_obj.finish_attach()
+ else:
+ rv = yield self.compute.attach_volume(instance_id,
+ volume_id,
+ mountpoint)
self.assertEqual(volume_obj['status'], "in-use")
- self.assertEqual(volume_obj['attachStatus'], "attached")
+ self.assertEqual(volume_obj['attach_status'], "attached")
self.assertEqual(volume_obj['instance_id'], instance_id)
self.assertEqual(volume_obj['mountpoint'], mountpoint)
- self.assertRaises(exception.Error,
- self.volume.delete_volume,
- volume_id)
-
- rv = yield self.volume.detach_volume(volume_id)
+ self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
+ volume_obj.start_detach()
+ if FLAGS.fake_tests:
+ volume_obj.finish_detach()
+ else:
+ rv = yield self.volume.detach_volume(instance_id,
+ volume_id)
volume_obj = volume_service.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
@@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase):
volume_service.get_volume,
volume_id)
+ @defer.inlineCallbacks
+ def test_multiple_volume_race_condition(self):
+ vol_size = "5"
+ user_id = "fake"
+ project_id = 'fake'
+ shelf_blades = []
+ def _check(volume_id):
+ vol = volume_service.get_volume(volume_id)
+ shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id'])
+ self.assert_(shelf_blade not in shelf_blades)
+ shelf_blades.append(shelf_blade)
+ logging.debug("got %s" % shelf_blade)
+ vol.destroy()
+ deferreds = []
+ for i in range(5):
+ d = self.volume.create_volume(vol_size, user_id, project_id)
+ d.addCallback(_check)
+ d.addErrback(self.fail)
+ deferreds.append(d)
+ yield defer.DeferredList(deferreds)
+
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
diff --git a/nova/utils.py b/nova/utils.py
index 0016b656e..0b23de7cd 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -41,7 +41,7 @@ def import_class(import_str):
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
- except (ImportError, AttributeError):
+ except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
def fetchfile(url, target):
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 698536324..1e23c48b9 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -28,6 +28,7 @@ import urlparse
from nova import flags
from nova import process
from nova.auth import signer
+from nova.auth import manager
FLAGS = flags.FLAGS
@@ -35,14 +36,14 @@ flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
-def fetch(image, path, user):
+def fetch(image, path, user, project):
if FLAGS.use_s3:
f = _fetch_s3_image
else:
f = _fetch_local_image
- return f(image, path, user)
+ return f(image, path, user, project)
-def _fetch_s3_image(image, path, user):
+def _fetch_s3_image(image, path, user, project):
url = image_url(image)
# This should probably move somewhere else, like e.g. a download_as
@@ -52,8 +53,11 @@ def _fetch_s3_image(image, path, user):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
(_, _, url_path, _, _, _) = urlparse.urlparse(url)
- auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', url_path)
- headers['Authorization'] = 'AWS %s:%s' % (user.access, auth)
+ access = manager.AuthManager().get_access_key(user, project)
+ signature = signer.Signer(user.secret.encode()).s3_authorization(headers,
+ 'GET',
+ url_path)
+ headers['Authorization'] = 'AWS %s:%s' % (access, signature)
cmd = ['/usr/bin/curl', '--silent', url]
for (k,v) in headers.iteritems():
@@ -62,7 +66,7 @@ def _fetch_s3_image(image, path, user):
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
-def _fetch_local_image(image, path, _):
+def _fetch_local_image(image, path, user, project):
source = _image_path('%s/image' % image)
return process.simple_execute('cp %s %s' % (source, path))
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index c545e4190..13305be0f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -25,7 +25,6 @@ import json
import logging
import os.path
import shutil
-import sys
from twisted.internet import defer
from twisted.internet import task
@@ -47,6 +46,13 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('compute/libvirt.xml.template'),
'Libvirt XML Template')
+flags.DEFINE_string('injected_network_template',
+ utils.abspath('compute/interfaces.template'),
+ 'Template file for injected network')
+
+flags.DEFINE_string('libvirt_type',
+ 'kvm',
+ 'Libvirt domain type (kvm, qemu, etc)')
def get_connection(read_only):
# These are loaded late so that there's no need to install these
@@ -108,7 +114,8 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.abspath(instance.datamodel['basepath'])
logging.info("Deleting instance files at %s", target)
- shutil.rmtree(target)
+ if os.path.exists(target):
+ shutil.rmtree(target)
@defer.inlineCallbacks
@@ -187,12 +194,13 @@ class LibvirtConnection(object):
f.close()
user = manager.AuthManager().get_user(data['user_id'])
+ project = manager.AuthManager().get_project(data['project_id'])
if not os.path.exists(basepath('disk')):
- yield images.fetch(data['image_id'], basepath('disk-raw'), user)
+ yield images.fetch(data['image_id'], basepath('disk-raw'), user, project)
if not os.path.exists(basepath('kernel')):
- yield images.fetch(data['kernel_id'], basepath('kernel'), user)
+ yield images.fetch(data['kernel_id'], basepath('kernel'), user, project)
if not os.path.exists(basepath('ramdisk')):
- yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user)
+ yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user, project)
execute = lambda cmd, input=None: \
process.simple_execute(cmd=cmd,
@@ -201,14 +209,14 @@ class LibvirtConnection(object):
key = data['key_data']
net = None
- if FLAGS.simple_network:
- with open(FLAGS.simple_network_template) as f:
+ if data.get('inject_network', False):
+ with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': data['private_dns_name'],
- 'network': FLAGS.simple_network_network,
- 'netmask': FLAGS.simple_network_netmask,
- 'gateway': FLAGS.simple_network_gateway,
- 'broadcast': FLAGS.simple_network_broadcast,
- 'dns': FLAGS.simple_network_dns}
+ 'network': data['network_network'],
+ 'netmask': data['network_netmask'],
+ 'gateway': data['network_gateway'],
+ 'broadcast': data['network_broadcast'],
+ 'dns': data['network_dns']}
if key or net:
logging.info('Injecting data into image %s', data['image_id'])
yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
@@ -235,6 +243,7 @@ class LibvirtConnection(object):
# TODO(termie): lazy lazy hack because xml is annoying
xml_info['nova'] = json.dumps(instance.datamodel.copy())
+ xml_info['type'] = FLAGS.libvirt_type
libvirt_xml = libvirt_xml % xml_info
logging.debug("Finished the toXML method")
@@ -255,7 +264,7 @@ class LibvirtConnection(object):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
-
+
Returns a list of all block devices for this domain.
"""
domain = self._conn.lookupByName(instance_id)
@@ -298,7 +307,7 @@ class LibvirtConnection(object):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
-
+
Returns a list of all network interfaces for this instance.
"""
domain = self._conn.lookupByName(instance_id)
@@ -341,7 +350,7 @@ class LibvirtConnection(object):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
- """
+ """
domain = self._conn.lookupByName(instance_id)
return domain.blockStats(disk)
@@ -350,6 +359,6 @@ class LibvirtConnection(object):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
- """
+ """
domain = self._conn.lookupByName(instance_id)
return domain.interfaceStats(interface)
diff --git a/nova/volume/service.py b/nova/volume/service.py
index 87a47f40a..66163a812 100644
--- a/nova/volume/service.py
+++ b/nova/volume/service.py
@@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS.
Currently uses Ata-over-Ethernet.
"""
-import glob
import logging
import os
-import shutil
-import socket
-import tempfile
from twisted.internet import defer
@@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
-flags.DEFINE_string('storage_name',
- socket.gethostname(),
- 'name of this service')
flags.DEFINE_integer('first_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10,
'AoE starting shelf_id for this service')
@@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id',
flags.DEFINE_string('aoe_export_dir',
'/var/lib/vblade-persist/vblades',
'AoE directory where exports are created')
-flags.DEFINE_integer('slots_per_shelf',
+flags.DEFINE_integer('blades_per_shelf',
16,
- 'Number of AoE slots per shelf')
+ 'Number of AoE blades per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this service')
@@ -69,7 +62,7 @@ flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
-class NoMoreVolumes(exception.Error):
+class NoMoreBlades(exception.Error):
pass
def get_volume(volume_id):
@@ -77,8 +70,9 @@ def get_volume(volume_id):
volume_class = Volume
if FLAGS.fake_storage:
volume_class = FakeVolume
- if datastore.Redis.instance().sismember('volumes', volume_id):
- return volume_class(volume_id=volume_id)
+ vol = volume_class.lookup(volume_id)
+ if vol:
+ return vol
raise exception.Error("Volume does not exist")
class VolumeService(service.Service):
@@ -91,18 +85,10 @@ class VolumeService(service.Service):
super(VolumeService, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
- FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
- def __del__(self):
- # TODO(josh): Get rid of this destructor, volumes destroy themselves
- if FLAGS.fake_storage:
- try:
- shutil.rmtree(FLAGS.aoe_export_dir)
- except Exception, err:
- pass
-
+ @defer.inlineCallbacks
@validate.rangetest(size=(0, 1000))
def create_volume(self, size, user_id, project_id):
"""
@@ -111,11 +97,10 @@ class VolumeService(service.Service):
Volume at this point has size, owner, and zone.
"""
logging.debug("Creating volume of size: %s" % (size))
- vol = self.volume_class.create(size, user_id, project_id)
- datastore.Redis.instance().sadd('volumes', vol['volume_id'])
- datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
- self._restart_exports()
- return vol['volume_id']
+ vol = yield self.volume_class.create(size, user_id, project_id)
+ logging.debug("restarting exports")
+ yield self._restart_exports()
+ defer.returnValue(vol['volume_id'])
def by_node(self, node_id):
""" returns a list of volumes for a node """
@@ -128,26 +113,24 @@ class VolumeService(service.Service):
for volume_id in datastore.Redis.instance().smembers('volumes'):
yield self.volume_class(volume_id=volume_id)
+ @defer.inlineCallbacks
def delete_volume(self, volume_id):
logging.debug("Deleting volume with id of: %s" % (volume_id))
vol = get_volume(volume_id)
- if vol['status'] == "attached":
+ if vol['attach_status'] == "attached":
raise exception.Error("Volume is still attached")
- if vol['node_name'] != FLAGS.storage_name:
+ if vol['node_name'] != FLAGS.node_name:
raise exception.Error("Volume is not local to this node")
- vol.destroy()
- datastore.Redis.instance().srem('volumes', vol['volume_id'])
- datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
- return True
+ yield vol.destroy()
+ defer.returnValue(True)
@defer.inlineCallbacks
def _restart_exports(self):
if FLAGS.fake_storage:
return
- yield process.simple_execute(
- "sudo vblade-persist auto all")
- yield process.simple_execute(
- "sudo vblade-persist start all")
+ # NOTE(vish): these commands sometimes sends output to stderr for warnings
+ yield process.simple_execute("sudo vblade-persist auto all", error_ok=1)
+ yield process.simple_execute("sudo vblade-persist start all", error_ok=1)
@defer.inlineCallbacks
def _init_volume_group(self):
@@ -170,13 +153,15 @@ class Volume(datastore.BasicModel):
return self.volume_id
def default_state(self):
- return {"volume_id": self.volume_id}
+ return {"volume_id": self.volume_id,
+ "node_name": "unassigned"}
@classmethod
+ @defer.inlineCallbacks
def create(cls, size, user_id, project_id):
volume_id = utils.generate_uid('vol')
vol = cls(volume_id)
- vol['node_name'] = FLAGS.storage_name
+ vol['node_name'] = FLAGS.node_name
vol['size'] = size
vol['user_id'] = user_id
vol['project_id'] = project_id
@@ -188,13 +173,12 @@ class Volume(datastore.BasicModel):
vol['attach_status'] = "detached" # attaching | attached | detaching | detached
vol['delete_on_termination'] = 'False'
vol.save()
- vol.create_lv()
- vol._setup_export()
+ yield vol._create_lv()
+ yield vol._setup_export()
# TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
- # TODO(joshua
vol['status'] = "available"
vol.save()
- return vol
+ defer.returnValue(vol)
def start_attach(self, instance_id, mountpoint):
""" """
@@ -223,16 +207,35 @@ class Volume(datastore.BasicModel):
self['attach_status'] = "detached"
self.save()
+ def save(self):
+ is_new = self.is_new_record()
+ super(Volume, self).save()
+ if is_new:
+ redis = datastore.Redis.instance()
+ key = self.__devices_key
+ # TODO(vish): these should be added by admin commands
+ more = redis.scard(self._redis_association_name("node",
+ self['node_name']))
+ if (not redis.exists(key) and not more):
+ for shelf_id in range(FLAGS.first_shelf_id,
+ FLAGS.last_shelf_id + 1):
+ for blade_id in range(FLAGS.blades_per_shelf):
+ redis.sadd(key, "%s.%s" % (shelf_id, blade_id))
+ self.associate_with("node", self['node_name'])
+
+ @defer.inlineCallbacks
def destroy(self):
- try:
- self._remove_export()
- except:
- pass
- self._delete_lv()
+ yield self._remove_export()
+ yield self._delete_lv()
+ self.unassociate_with("node", self['node_name'])
+ if self.get('shelf_id', None) and self.get('blade_id', None):
+ redis = datastore.Redis.instance()
+ key = self.__devices_key
+ redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id']))
super(Volume, self).destroy()
@defer.inlineCallbacks
- def create_lv(self):
+ def _create_lv(self):
if str(self['size']) == '0':
sizestr = '100M'
else:
@@ -240,65 +243,72 @@ class Volume(datastore.BasicModel):
yield process.simple_execute(
"sudo lvcreate -L %s -n %s %s" % (sizestr,
self['volume_id'],
- FLAGS.volume_group))
+ FLAGS.volume_group),
+ error_ok=1)
@defer.inlineCallbacks
def _delete_lv(self):
yield process.simple_execute(
"sudo lvremove -f %s/%s" % (FLAGS.volume_group,
- self['volume_id']))
+ self['volume_id']), error_ok=1)
+ @property
+ def __devices_key(self):
+ return 'volume_devices:%s' % FLAGS.node_name
+
+ @defer.inlineCallbacks
def _setup_export(self):
- (shelf_id, blade_id) = get_next_aoe_numbers()
+ redis = datastore.Redis.instance()
+ key = self.__devices_key
+ device = redis.spop(key)
+ if not device:
+ raise NoMoreBlades()
+ (shelf_id, blade_id) = device.split('.')
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
- self._exec_export()
+ yield self._exec_setup_export()
@defer.inlineCallbacks
- def _exec_export(self):
+ def _exec_setup_export(self):
yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(self['shelf_id'],
self['blade_id'],
FLAGS.aoe_eth_dev,
FLAGS.volume_group,
- self['volume_id']))
+ self['volume_id']), error_ok=1)
@defer.inlineCallbacks
def _remove_export(self):
+ if not self.get('shelf_id', None) or not self.get('blade_id', None):
+ defer.returnValue(False)
+ yield self._exec_remove_export()
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ def _exec_remove_export(self):
yield process.simple_execute(
"sudo vblade-persist stop %s %s" % (self['shelf_id'],
- self['blade_id']))
+ self['blade_id']), error_ok=1)
yield process.simple_execute(
"sudo vblade-persist destroy %s %s" % (self['shelf_id'],
- self['blade_id']))
+ self['blade_id']), error_ok=1)
+
class FakeVolume(Volume):
- def create_lv(self):
+ def _create_lv(self):
pass
- def _exec_export(self):
+ def _exec_setup_export(self):
fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
f = file(fname, "w")
f.close()
- def _remove_export(self):
- pass
+ def _exec_remove_export(self):
+ os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device']))
def _delete_lv(self):
pass
-
-def get_next_aoe_numbers():
- for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
- aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
- if not aoes:
- blade_id = 0
- else:
- blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
- if blade_id < FLAGS.slots_per_shelf:
- logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
- return (shelf_id, blade_id)
- raise NoMoreVolumes()
diff --git a/nova/wsgi.py b/nova/wsgi.py
new file mode 100644
index 000000000..4fd6e59e3
--- /dev/null
+++ b/nova/wsgi.py
@@ -0,0 +1,173 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility methods for working with WSGI servers
+"""
+
+import logging
+import sys
+
+import eventlet
+import eventlet.wsgi
+eventlet.patcher.monkey_patch(all=False, socket=True)
+import routes
+import routes.middleware
+
+
+logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
+
+
+def run_server(application, port):
+ """Run a WSGI server with the given application."""
+ sock = eventlet.listen(('0.0.0.0', port))
+ eventlet.wsgi.server(sock, application)
+
+
+class Application(object):
+ """Base WSGI application wrapper. Subclasses need to implement __call__."""
+
+ def __call__(self, environ, start_response):
+ r"""Subclasses will probably want to implement __call__ like this:
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ # Any of the following objects work as responses:
+
+ # Option 1: simple string
+ res = 'message\n'
+
+ # Option 2: a nicely formatted HTTP exception page
+ res = exc.HTTPForbidden(detail='Nice try')
+
+ # Option 3: a webob Response object (in case you need to play with
+ # headers, or you want to be treated like an iterable, or or or)
+ res = Response();
+ res.app_iter = open('somefile')
+
+ # Option 4: any wsgi app to be run next
+ res = self.application
+
+ # Option 5: you can get a Response object for a wsgi app, too, to
+ # play with headers etc
+ res = req.get_response(self.application)
+
+ # You can then just return your response...
+ return res
+ # ... or set req.response and return None.
+ req.response = res
+
+ See the end of http://pythonpaste.org/webob/modules/dec.html
+ for more info.
+ """
+ raise NotImplementedError("You must implement __call__")
+
+
+class Middleware(Application): # pylint: disable-msg=W0223
+ """Base WSGI middleware wrapper. These classes require an
+ application to be initialized that will be called next."""
+
+ def __init__(self, application): # pylint: disable-msg=W0231
+ self.application = application
+
+
+class Debug(Middleware):
+ """Helper class that can be insertd into any WSGI application chain
+ to get information about the request and response."""
+
+ def __call__(self, environ, start_response):
+ for key, value in environ.items():
+ print key, "=", value
+ print
+ wrapper = debug_start_response(start_response)
+ return debug_print_body(self.application(environ, wrapper))
+
+
+def debug_start_response(start_response):
+ """Wrap the start_response to capture when called."""
+
+ def wrapper(status, headers, exc_info=None):
+ """Print out all headers when start_response is called."""
+ print status
+ for (key, value) in headers:
+ print key, "=", value
+ print
+ start_response(status, headers, exc_info)
+
+ return wrapper
+
+
+def debug_print_body(body):
+ """Print the body of the response as it is sent back."""
+
+ class Wrapper(object):
+ """Iterate through all the body parts and print before returning."""
+
+ def __iter__(self):
+ for part in body:
+ sys.stdout.write(part)
+ sys.stdout.flush()
+ yield part
+ print
+
+ return Wrapper()
+
+
+class ParsedRoutes(Middleware):
+ """Processed parsed routes from routes.middleware.RoutesMiddleware
+ and call either the controller if found or the default application
+ otherwise."""
+
+ def __call__(self, environ, start_response):
+ if environ['routes.route'] is None:
+ return self.application(environ, start_response)
+ app = environ['wsgiorg.routing_args'][1]['controller']
+ return app(environ, start_response)
+
+
+class Router(Middleware): # pylint: disable-msg=R0921
+ """Wrapper to help setup routes.middleware.RoutesMiddleware."""
+
+ def __init__(self, application):
+ self.map = routes.Mapper()
+ self._build_map()
+ application = ParsedRoutes(application)
+ application = routes.middleware.RoutesMiddleware(application, self.map)
+ super(Router, self).__init__(application)
+
+ def __call__(self, environ, start_response):
+ return self.application(environ, start_response)
+
+ def _build_map(self):
+ """Method to create new connections for the routing map."""
+ raise NotImplementedError("You must implement _build_map")
+
+ def _connect(self, *args, **kwargs):
+ """Wrapper for the map.connect method."""
+ self.map.connect(*args, **kwargs)
+
+
+def route_args(application):
+ """Decorator to make grabbing routing args more convenient."""
+
+ def wrapper(self, req):
+ """Call application with req and parsed routing args from."""
+ return application(self, req, req.environ['wsgiorg.routing_args'][1])
+
+ return wrapper
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 000000000..53d02d6b2
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,19 @@
+[Messages Control]
+disable-msg=C0103
+
+[Basic]
+# Variables can be 1 to 31 characters long, with
+# lowercase and underscores
+variable-rgx=[a-z_][a-z0-9_]{0,30}$
+
+# Method names should be at least 3 characters long
+# and be lowecased with underscores
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+[MESSAGES CONTROL]
+# TODOs in code comments are fine...
+disable-msg=W0511
+
+[Design]
+max-public-methods=100
+min-public-methods=0
diff --git a/run_tests.py b/run_tests.py
index 5a8966f02..7fe6e73ec 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -54,6 +54,7 @@ from nova.tests.auth_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
+from nova.tests.flags_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
@@ -68,7 +69,8 @@ flags.DEFINE_bool('flush_db', True,
'Flush the database before running fake tests')
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
- 'Path to where to pipe STDERR during test runs. Default = "run_tests.err.log"')
+ 'Path to where to pipe STDERR during test runs. '
+ 'Default = "run_tests.err.log"')
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
diff --git a/run_tests.sh b/run_tests.sh
index 1bf3d1a79..85d7c8834 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -4,10 +4,9 @@ venv=.nova-venv
with_venv=tools/with_venv.sh
if [ -e ${venv} ]; then
- ${with_venv} python run_tests.py
+ ${with_venv} python run_tests.py $@
else
- echo "You need to install the Nova virtualenv before you can run this."
- echo ""
- echo "Please run tools/install_venv.py"
- exit 1
+ echo "No virtual environment found...creating one"
+ python tools/install_venv.py
+ ${with_venv} python run_tests.py $@
fi
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 0b35fc8e9..e1a270638 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -1,3 +1,23 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 OpenStack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
Installation script for Nova's development virtualenv
"""
@@ -7,20 +27,20 @@ import subprocess
import sys
-ROOT = os.path.dirname(os.path.dirname(__file__))
+ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
-
def die(message, *args):
print >>sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, error_ok=False):
- # Useful for debugging:
- #print >>sys.stderr, ' '.join(cmd)
+ """Runs a command in an out-of-process shell, returning the
+ output of that command
+ """
if redirect_output:
stdout = subprocess.PIPE
else:
@@ -33,32 +53,44 @@ def run_command(cmd, redirect_output=True, error_ok=False):
return output
+HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip())
+HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip())
+
+
def check_dependencies():
- """Make sure pip and virtualenv are on the path."""
- print 'Checking for pip...',
- if not run_command(['which', 'pip']).strip():
- die('ERROR: pip not found.\n\nNova development requires pip,'
- ' please install it using your favorite package management tool')
- print 'done.'
+ """Make sure virtualenv is in the path."""
print 'Checking for virtualenv...',
- if not run_command(['which', 'virtualenv']).strip():
- die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
- ' please install it using your favorite package management tool')
+ if not HAS_VIRTUALENV:
+ print 'not found.'
+ # Try installing it via easy_install...
+ if HAS_EASY_INSTALL:
+ print 'Installing virtualenv via easy_install...',
+ if not run_command(['which', 'easy_install']):
+ die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
+ ' please install it using your favorite package management tool')
+ print 'done.'
print 'done.'
def create_virtualenv(venv=VENV):
+ """Creates the virtual environment and installs PIP only into the
+ virtual environment
+ """
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
+ print 'Installing pip in virtualenv...',
+ if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
+ die("Failed to install pip.")
+ print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
- run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
redirect_output=False)
- run_command(['pip', 'install', '-E', venv, TWISTED_NOVA],
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA],
redirect_output=False)
diff --git a/tools/pip-requires b/tools/pip-requires
index 4eb47ca2b..c173d6221 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,9 +1,12 @@
+pep8==0.5.0
+pylint==0.21.1
IPy==0.70
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
boto==2.0b1
carrot==0.10.5
+eventlet==0.9.10
lockfile==0.8
python-daemon==1.5.5
python-gflags==1.3