diff options
| author | Chris Behrens <cbehrens@codestud.com> | 2010-08-16 15:56:28 -0500 |
|---|---|---|
| committer | Chris Behrens <cbehrens@codestud.com> | 2010-08-16 15:56:28 -0500 |
| commit | ad7a20231a8fb11bf7c75f2e180735e2de450102 (patch) | |
| tree | d22caa69a8fc66da6b33636299be83e962786e87 | |
| parent | d1982a50561f7b35ffc76ce5d45aaec11e76a23c (diff) | |
| parent | b07a85167ffde07747fc6e892df46686b95529e8 (diff) | |
| download | nova-ad7a20231a8fb11bf7c75f2e180735e2de450102.tar.gz nova-ad7a20231a8fb11bf7c75f2e180735e2de450102.tar.xz nova-ad7a20231a8fb11bf7c75f2e180735e2de450102.zip | |
merge from trunk
55 files changed, 2737 insertions, 1036 deletions
diff --git a/bin/nova-api b/bin/nova-api index 1f2009c30..13baf22a7 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -29,8 +29,6 @@ from nova import flags from nova import rpc from nova import server from nova import utils -from nova.auth import manager -from nova.compute import model from nova.endpoint import admin from nova.endpoint import api from nova.endpoint import cloud @@ -39,10 +37,10 @@ FLAGS = flags.FLAGS def main(_argv): + """Load the controllers and start the tornado I/O loop.""" controllers = { 'Cloud': cloud.CloudController(), - 'Admin': admin.AdminController() - } + 'Admin': admin.AdminController()} _app = api.APIServerApplication(controllers) conn = rpc.Connection.instance() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index b3e7d456a..f70a4482c 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -18,8 +18,6 @@ # under the License. """ -nova-dhcpbridge - Handle lease database updates from DHCP servers. """ @@ -42,34 +40,42 @@ from nova.network import service FLAGS = flags.FLAGS -def add_lease(mac, ip, hostname, interface): +def add_lease(_mac, ip, _hostname, _interface): + """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: service.VlanNetworkService().lease_ip(ip) else: - rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name), + rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "lease_ip", - "args" : {"fixed_ip": ip}}) + "args": {"fixed_ip": ip}}) + -def old_lease(mac, ip, hostname, interface): +def old_lease(_mac, _ip, _hostname, _interface): + """Do nothing, just an old lease update.""" logging.debug("Adopted old lease or got a change of mac/hostname") -def del_lease(mac, ip, hostname, interface): + +def del_lease(_mac, ip, _hostname, _interface): + """Called when a lease expires.""" if FLAGS.fake_rabbit: service.VlanNetworkService().release_ip(ip) else: - rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name), + rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_ip", - "args" : {"fixed_ip": ip}}) + "args": {"fixed_ip": ip}}) + def init_leases(interface): + """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for host_name in net.hosts: - res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name]) + for address in net.assigned_objs: + res += "%s\n" % linux_net.host_dhcp(address) return res def main(): + """Parse environment and arguments and call the approproate action.""" flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) @@ -79,18 +85,19 @@ def main(): FLAGS.redis_db = 8 FLAGS.network_size = 32 FLAGS.connection_type = 'fake' - FLAGS.fake_network=True - FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver' + FLAGS.fake_network = True + FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' action = argv[1] - if action in ['add','del','old']: + if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] hostname = argv[4] - logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface)) - globals()[action+'_lease'](mac, ip, hostname, interface) + logging.debug("Called %s for mac %s with ip %s and " + "hostname %s on interface %s", + action, mac, ip, hostname, interface) + globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) - exit(0) if __name__ == "__main__": - sys.exit(main()) + main() diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 2e79f09b7..5165109b2 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -37,20 +37,17 @@ FLAGS = flags.FLAGS api_url = 'https://imagestore.canonical.com/api/dashboard' -image_cache = None -def images(): - global image_cache - if not image_cache: - try: - images = json.load(urllib2.urlopen(api_url))['images'] - image_cache = [i for i in images if i['title'].find('amd64') > -1] - except Exception: - print 'unable to download canonical image list' - sys.exit(1) - return image_cache - -# FIXME(ja): add checksum/signature checks + +def get_images(): + """Get a list of the images from the imagestore URL.""" + images = json.load(urllib2.urlopen(api_url))['images'] + images = [img for img in images if img['title'].find('amd64') > -1] + return images + + def download(img): + """Download an image to the local filesystem.""" + # FIXME(ja): add checksum/signature checks tempdir = tempfile.mkdtemp(prefix='cis-') kernel_id = None @@ -79,20 +76,22 @@ def download(img): shutil.rmtree(tempdir) + def main(): + """Main entry point.""" utils.default_flagfile() argv = FLAGS(sys.argv) + images = get_images() if len(argv) == 2: - for img in images(): + for img in images: if argv[1] == 'all' or argv[1] == img['title']: download(img) else: print 'usage: %s (title|all)' print 'available images:' - for image in images(): - print image['title'] + for img in images: + print img['title'] if __name__ == '__main__': main() - diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index b195089b7..911fb6f42 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -22,7 +22,6 @@ """ import logging -from twisted.internet import task from twisted.application import service from nova import twistd @@ -30,7 +29,11 @@ from nova.compute import monitor logging.getLogger('boto').setLevel(logging.WARN) -def main(): + +if __name__ == '__main__': + twistd.serve(__file__) + +if __name__ == '__builtin__': logging.warn('Starting instance monitor') m = monitor.InstanceMonitor() @@ -38,14 +41,3 @@ def main(): # parses this file, return it so that we can get it into globals below application = service.Application('nova-instancemonitor') m.setServiceParent(application) - return application - -if __name__ == '__main__': - twistd.serve(__file__) - -if __name__ == '__builtin__': - application = main() - - - - diff --git a/bin/nova-manage b/bin/nova-manage index 7835c7a77..071436b13 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -37,12 +37,15 @@ FLAGS = flags.FLAGS class VpnCommands(object): + """Class for managing VPNs.""" + def __init__(self): self.manager = manager.AuthManager() self.instdir = model.InstanceDirectory() self.pipe = pipelib.CloudPipe(cloud.CloudController()) def list(self): + """Print a listing of the VPNs for all projects.""" print "%-12s\t" % 'project', print "%-12s\t" % 'ip:port', print "%s" % 'state' @@ -50,9 +53,10 @@ class VpnCommands(object): print "%-12s\t" % project.name, print "%s:%s\t" % (project.vpn_ip, project.vpn_port), - vpn = self.__vpn_for(project.id) + vpn = self._vpn_for(project.id) if vpn: - out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name']) + command = "ping -c1 -w1 %s > /dev/null; echo $?" + out, _err = utils.execute(command % vpn['private_dns_name']) if out.strip() == '0': net = 'up' else: @@ -66,25 +70,32 @@ class VpnCommands(object): else: print None - def __vpn_for(self, project_id): + def _vpn_for(self, project_id): + """Get the VPN instance for a project ID.""" for instance in self.instdir.all: - if (instance.state.has_key('image_id') + if ('image_id' in instance.state and instance['image_id'] == FLAGS.vpn_image_id - and not instance['state_description'] in ['shutting_down', 'shutdown'] + and not instance['state_description'] in + ['shutting_down', 'shutdown'] and instance['project_id'] == project_id): return instance def spawn(self): + """Run all VPNs.""" for p in reversed(self.manager.get_projects()): - if not self.__vpn_for(p.id): - print 'spawning %s' % p.id - self.pipe.launch_vpn_instance(p.id) - time.sleep(10) + if not self._vpn_for(p.id): + print 'spawning %s' % p.id + self.pipe.launch_vpn_instance(p.id) + time.sleep(10) def run(self, project_id): + """Start the VPN for a given project.""" self.pipe.launch_vpn_instance(project_id) + class RoleCommands(object): + """Class for managing roles.""" + def __init__(self): self.manager = manager.AuthManager() @@ -107,25 +118,24 @@ class RoleCommands(object): arguments: user, role [project]""" self.manager.remove_role(user, role, project) + class UserCommands(object): + """Class for managing users.""" + def __init__(self): self.manager = manager.AuthManager() - def __print_export(self, user): - print 'export EC2_ACCESS_KEY=%s' % user.access - print 'export EC2_SECRET_KEY=%s' % user.secret - def admin(self, name, access=None, secret=None): """creates a new admin and prints exports arguments: name [access] [secret]""" user = self.manager.create_user(name, access, secret, True) - self.__print_export(user) + print_export(user) def create(self, name, access=None, secret=None): """creates a new user and prints exports arguments: name [access] [secret]""" user = self.manager.create_user(name, access, secret, False) - self.__print_export(user) + print_export(user) def delete(self, name): """deletes an existing user @@ -137,7 +147,7 @@ class UserCommands(object): arguments: name""" user = self.manager.get_user(name) if user: - self.__print_export(user) + print_export(user) else: print "User %s doesn't exist" % name @@ -147,53 +157,58 @@ class UserCommands(object): for user in self.manager.get_users(): print user.name + +def print_export(user): + """Print export variables to use with API.""" + print 'export EC2_ACCESS_KEY=%s' % user.access + print 'export EC2_SECRET_KEY=%s' % user.secret + + class ProjectCommands(object): + """Class for managing projects.""" + def __init__(self): self.manager = manager.AuthManager() def add(self, project, user): - """adds user to project + """Adds user to project arguments: project user""" self.manager.add_to_project(user, project) def create(self, name, project_manager, description=None): - """creates a new project + """Creates a new project arguments: name project_manager [description]""" - user = self.manager.create_project(name, project_manager, description) + self.manager.create_project(name, project_manager, description) def delete(self, name): - """deletes an existing project + """Deletes an existing project arguments: name""" self.manager.delete_project(name) def environment(self, project_id, user_id, filename='novarc'): - """exports environment variables to an sourcable file + """Exports environment variables to an sourcable file arguments: project_id user_id [filename='novarc]""" rc = self.manager.get_environment_rc(project_id, user_id) with open(filename, 'w') as f: f.write(rc) def list(self): - """lists all projects + """Lists all projects arguments: <none>""" for project in self.manager.get_projects(): print project.name def remove(self, project, user): - """removes user from project + """Removes user from project arguments: project user""" self.manager.remove_from_project(user, project) - def zip(self, project_id, user_id, filename='nova.zip'): - """exports credentials for project to a zip file + def zipfile(self, project_id, user_id, filename='nova.zip'): + """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" - zip = self.manager.get_credentials(project_id, user_id) + zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: - f.write(zip) - - -def usage(script_name): - print script_name + " category action [<args>]" + f.write(zip_file) categories = [ @@ -205,62 +220,61 @@ categories = [ def lazy_match(name, key_value_tuples): - """finds all objects that have a key that case insensitively contains [name] - key_value_tuples is a list of tuples of the form (key, value) + """Finds all objects that have a key that case insensitively contains + [name] key_value_tuples is a list of tuples of the form (key, value) returns a list of tuples of the form (key, value)""" - return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0] + result = [] + for (k, v) in key_value_tuples: + if k.lower().find(name.lower()) == 0: + result.append((k, v)) + if len(result) == 0: + print "%s does not match any options:" % name + for k, _v in key_value_tuples: + print "\t%s" % k + sys.exit(2) + if len(result) > 1: + print "%s matched multiple options:" % name + for k, _v in result: + print "\t%s" % k + sys.exit(2) + return result def methods_of(obj): - """get all callable methods of an object that don't start with underscore + """Get all callable methods of an object that don't start with underscore returns a list of tuples of the form (method_name, method)""" - return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')] + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result -if __name__ == '__main__': +def main(): + """Parse options and call the appropriate class/method.""" utils.default_flagfile('/etc/nova/nova-manage.conf') argv = FLAGS(sys.argv) script_name = argv.pop(0) if len(argv) < 1: - usage(script_name) + print script_name + " category action [<args>]" print "Available categories:" - for k, v in categories: + for k, _ in categories: print "\t%s" % k sys.exit(2) category = argv.pop(0) matches = lazy_match(category, categories) - if len(matches) == 0: - print "%s does not match any categories:" % category - for k, v in categories: - print "\t%s" % k - sys.exit(2) - if len(matches) > 1: - print "%s matched multiple categories:" % category - for k, v in matches: - print "\t%s" % k - sys.exit(2) # instantiate the command group object category, fn = matches[0] command_object = fn() actions = methods_of(command_object) if len(argv) < 1: - usage(script_name) + print script_name + " category action [<args>]" print "Available actions for %s category:" % category - for k, v in actions: + for k, _v in actions: print "\t%s" % k sys.exit(2) action = argv.pop(0) matches = lazy_match(action, actions) - if len(matches) == 0: - print "%s does not match any actions" % action - for k, v in actions: - print "\t%s" % k - sys.exit(2) - if len(matches) > 1: - print "%s matched multiple actions:" % action - for k, v in matches: - print "\t%s" % k - sys.exit(2) action, fn = matches[0] # call the action with the remaining arguments try: @@ -271,3 +285,5 @@ if __name__ == '__main__': print "%s %s: %s" % (category, action, fn.__doc__) sys.exit(2) +if __name__ == '__main__': + main() diff --git a/bin/nova-objectstore b/bin/nova-objectstore index c0fa815c0..02f2bcb48 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -30,15 +30,9 @@ from nova.objectstore import handler FLAGS = flags.FLAGS -def main(): - app = handler.get_application() - print app - return app - -# NOTE(soren): Stolen from nova-compute if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': utils.default_flagfile() - application = main() + application = handler.get_application() diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 306a1fc60..026880d5a 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,4 +1,5 @@ #!/usr/bin/env python +# pylint: disable-msg=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the @@ -17,42 +18,17 @@ # See the License for the specific language governing permissions and # limitations under the License. """ - WSGI daemon for the main API endpoint. + Daemon for the Rackspace API endpoint. """ -import logging -from tornado import ioloop -from wsgiref import simple_server - from nova import flags -from nova import rpc -from nova import server from nova import utils -from nova.auth import manager +from nova import wsgi from nova.endpoint import rackspace FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') -def main(_argv): - user_manager = manager.AuthManager() - api_instance = rackspace.Api(user_manager) - conn = rpc.Connection.instance() - rpc_consumer = rpc.AdapterConsumer(connection=conn, - topic=FLAGS.cloud_topic, - proxy=api_instance) - -# TODO: fire rpc response listener (without attach to tornado) -# io_inst = ioloop.IOLoop.instance() -# _injected = consumer.attach_to_tornado(io_inst) - - http_server = simple_server.WSGIServer(('0.0.0.0', FLAGS.cc_port), simple_server.WSGIRequestHandler) - http_server.set_app(api_instance.handler) - logging.debug('Started HTTP server on port %i' % FLAGS.cc_port) - while True: - http_server.handle_request() -# io_inst.start() - if __name__ == '__main__': utils.default_flagfile() - server.serve('nova-rsapi', main) + wsgi.run_server(rackspace.API(), FLAGS.cc_port) diff --git a/doc/source/getting.started.rst b/doc/source/getting.started.rst index 3eadd0882..f683bb256 100644 --- a/doc/source/getting.started.rst +++ b/doc/source/getting.started.rst @@ -40,6 +40,7 @@ Python libraries we don't vendor * M2Crypto: python library interface for openssl * curl +* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks. Vendored python libaries (don't require any installation) diff --git a/nova/adminclient.py b/nova/adminclient.py index 25d5e71cb..242298a75 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -57,6 +57,28 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) +class UserRole(object): + """ + Information about a Nova user's role, as parsed through SAX. + Fields include: + role + """ + def __init__(self, connection=None): + self.connection = connection + self.role = None + + def __repr__(self): + return 'UserRole:%s' % self.role + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'role': + self.role = value + else: + setattr(self, name, str(value)) + class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX @@ -114,7 +136,6 @@ class ProjectMember(object): else: setattr(self, name, str(value)) - class HostInfo(object): """ Information about a Nova Host, as parsed through SAX: @@ -196,6 +217,24 @@ class NovaAdminClient(object): """ deletes a user """ return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + def get_roles(self, project_roles=True): + """Returns a list of available roles.""" + return self.apiconn.get_list('DescribeRoles', + {'ProjectRoles': project_roles}, + [('item', UserRole)]) + + def get_user_roles(self, user, project=None): + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + params = {'User':user} + if project: + params['Project'] = project + return self.apiconn.get_list('DescribeUserRoles', + params, + [('item', UserRole)]) + def add_user_role(self, user, role, project=None): """ Add a role to a user either globally or for a specific project. diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ec739e134..453fa196c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -182,7 +182,8 @@ class LdapDriver(object): for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" % member_uid) + "because user %s doesn't exist" + % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required if not manager_dn in members: @@ -236,6 +237,26 @@ class LdapDriver(object): role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) + def get_user_roles(self, uid, project_id=None): + """Retrieve list of roles for user (or user and project)""" + if project_id is None: + # NOTE(vish): This is unneccesarily slow, but since we can't + # guarantee that the global roles are located + # together in the ldap tree, we're doing this version. + roles = [] + for role in FLAGS.allowed_roles: + role_dn = self.__role_to_dn(role) + if self.__is_in_group(uid, role_dn): + roles.append(role) + return roles + else: + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + roles = self.__find_objects(project_dn, + '(&(&(objectclass=groupOfNames)' + '(!(objectclass=novaProject)))' + '(member=%s))' % self.__uid_to_dn(uid)) + return [role['cn'][0] for role in roles] + def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): @@ -253,24 +274,24 @@ class LdapDriver(object): self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid, FLAGS.ldap_user_subtree)) - def delete_project(self, name): + def delete_project(self, project_id): """Delete a project""" - project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) self.__delete_roles(project_dn) self.__delete_group(project_dn) - def __user_exists(self, name): + def __user_exists(self, uid): """Check if user exists""" - return self.get_user(name) != None + return self.get_user(uid) != None def __key_pair_exists(self, uid, key_name): """Check if key pair exists""" return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None - def __project_exists(self, name): + def __project_exists(self, project_id): """Check if project exists""" - return self.get_project(name) != None + return self.get_project(project_id) != None def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d44ed52b2..064fd78bc 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -29,17 +29,19 @@ import uuid import zipfile from nova import crypto -from nova import datastore from nova import exception from nova import flags -from nova import objectstore # for flags from nova import utils -from nova.auth import ldapdriver # for flags from nova.auth import signer from nova.network import vpn + FLAGS = flags.FLAGS +flags.DEFINE_list('allowed_roles', + ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], + 'Allowed roles for project') + # NOTE(vish): a user with one of these roles will be a superuser and # have access to all api commands flags.DEFINE_list('superuser_roles', ['cloudadmin'], @@ -99,6 +101,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" def __init__(self, id, name, access, secret, admin): + AuthBase.__init__(self) self.id = id self.name = name self.access = access @@ -159,6 +162,7 @@ class KeyPair(AuthBase): fingerprint is stored. The user's private key is not saved. """ def __init__(self, id, name, owner_id, public_key, fingerprint): + AuthBase.__init__(self) self.id = id self.name = name self.owner_id = owner_id @@ -176,6 +180,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" def __init__(self, id, name, project_manager_id, description, member_ids): + AuthBase.__init__(self) self.id = id self.name = name self.project_manager_id = project_manager_id @@ -234,7 +239,7 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ - _instance=None + _instance = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: @@ -248,7 +253,7 @@ class AuthManager(object): reset the driver if it is not set or a new driver is specified. """ if driver or not getattr(self, 'driver', None): - self.driver = utils.import_class(driver or FLAGS.auth_driver) + self.driver = utils.import_class(driver or FLAGS.auth_driver) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', @@ -431,6 +436,10 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to add local role. """ + if role not in FLAGS.allowed_roles: + raise exception.NotFound("The %s role can not be found" % role) + if project is not None and role in FLAGS.global_roles: + raise exception.NotFound("The %s role is global only" % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -454,6 +463,19 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + def get_roles(self, project_roles=True): + """Get list of allowed roles""" + if project_roles: + return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles)) + else: + return FLAGS.allowed_roles + + def get_user_roles(self, user, project=None): + """Get user global or per-project roles""" + with self.driver() as drv: + return drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) + def get_project(self, pid): """Get project object by id""" with self.driver() as drv: diff --git a/nova/datastore.py b/nova/datastore.py index 51ef7a758..5dc6ed107 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -124,13 +124,17 @@ class BasicModel(object): yield cls(identifier) @classmethod - @absorb_connection_error def associated_to(cls, foreign_type, foreign_id): - redis_set = cls._redis_association_name(foreign_type, foreign_id) - for identifier in Redis.instance().smembers(redis_set): + for identifier in cls.associated_keys(foreign_type, foreign_id): yield cls(identifier) @classmethod + @absorb_connection_error + def associated_keys(cls, foreign_type, foreign_id): + redis_set = cls._redis_association_name(foreign_type, foreign_id) + return Redis.instance().smembers(redis_set) or [] + + @classmethod def _redis_set_name(cls, kls_name): # stupidly pluralize (for compatiblity with previous codebase) return kls_name.lower() + "s" @@ -138,7 +142,7 @@ class BasicModel(object): @classmethod def _redis_association_name(cls, foreign_type, foreign_id): return cls._redis_set_name("%s:%s:%s" % - (foreign_type, foreign_id, cls.__name__)) + (foreign_type, foreign_id, cls._redis_name())) @property def identifier(self): @@ -170,6 +174,9 @@ class BasicModel(object): def setdefault(self, item, default): return self.state.setdefault(item, default) + def __contains__(self, item): + return item in self.state + def __getitem__(self, item): return self.state[item] diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index c4b8c05ca..4f4824fca 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -103,6 +103,21 @@ class AdminController(object): return True @admin_only + def describe_roles(self, context, project_roles=True, **kwargs): + """Returns a list of allowed roles.""" + roles = manager.AuthManager().get_roles(project_roles) + return { 'roles': [{'role': r} for r in roles]} + + @admin_only + def describe_user_roles(self, context, user, project=None, **kwargs): + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + roles = manager.AuthManager().get_user_roles(user, project=project) + return { 'roles': [{'role': r} for r in roles]} + + @admin_only def modify_user_role(self, context, user, role, project=None, operation='add', **kwargs): """Add or remove a role for a user and project.""" diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 5e52ccf8d..43edd3575 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -85,7 +86,7 @@ class CloudController(object): """ Ensure the keychains and folders exist. """ # Create keys folder, if it doesn't exist if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) + os.makedirs(FLAGS.keys_path) # Gen root CA, if we don't have one root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) if not os.path.exists(root_ca_path): @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -123,6 +125,12 @@ class CloudController(object): } else: keys = '' + + address_record = network_model.FixedIp(i['private_dns_name']) + if address_record: + hostname = address_record['hostname'] + else: + hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-') data = { 'user-data': base64.b64decode(i['user_data']), 'meta-data': { @@ -135,19 +143,19 @@ class CloudController(object): 'root': '/dev/sda1', 'swap': 'sda3' }, - 'hostname': i['private_dns_name'], # is this public sometimes? + 'hostname': hostname, 'instance-action': 'none', 'instance-id': i['instance_id'], 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], + 'local-hostname': hostname, 'local-ipv4': i['private_dns_name'], # TODO: switch to IP 'kernel-id': i.get('kernel_id', ''), 'placement': { 'availaibility-zone': i.get('availability_zone', 'nova'), }, - 'public-hostname': i.get('dns_name', ''), + 'public-hostname': hostname, 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,26 +211,22 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): @@ -232,7 +236,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +255,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +289,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,16 +302,16 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) + volume = self._get_volume(context, result) defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) + address = network_model.ElasticIp.lookup(public_ip) if address and (context.user.is_admin() or address['project_id'] == context.project.id): return address raise exception.NotFound("Address at ip %s not found" % public_ip) @@ -348,15 +352,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +376,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -394,7 +398,15 @@ class CloudController(object): @rbac.allow('all') def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) + return defer.succeed(self._format_describe_instances(context)) + + def _format_describe_instances(self, context): + return { 'reservationSet': self._format_instances(context) } + + def _format_run_instances(self, context, reservation_id): + i = self._format_instances(context, reservation_id) + assert len(i) == 1 + return i[0] def _format_instances(self, context, reservation_id = None): reservations = {} @@ -425,7 +437,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,8 +455,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } - return instance_response + return list(reservations.values()) @rbac.allow('all') def describe_addresses(self, context, **kwargs): @@ -451,13 +463,13 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.PublicAddress.all(): + for address in network_model.ElasticIp.all(): # TODO(vish): implement a by_project iterator for addresses if (context.user.is_admin() or address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -472,12 +484,11 @@ class CloudController(object): @defer.inlineCallbacks def allocate_address(self, context, **kwargs): network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, + public_ip = yield rpc.call(network_topic, {"method": "allocate_elastic_ip", "args": {"user_id": context.user.id, "project_id": context.project.id}}) - public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -517,11 +528,10 @@ class CloudController(object): """Retrieves the network host for a project""" host = network_service.get_host_for_project(context.project.id) if not host: - result = yield rpc.call(FLAGS.network_topic, + host = yield rpc.call(FLAGS.network_topic, {"method": "set_network_host", "args": {"user_id": context.user.id, "project_id": context.project.id}}) - host = result['result'] defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) @rbac.allow('projectmanager', 'sysadmin') @@ -561,17 +571,17 @@ class CloudController(object): # TODO: Get the real security group of launch in here security_group = "default" for num in range(int(kwargs['max_count'])): - vpn = False + is_vpn = False if image_id == FLAGS.vpn_image_id: - vpn = True - allocate_result = yield rpc.call(network_topic, + is_vpn = True + inst = self.instdir.new() + allocate_data = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, - "vpn": vpn}}) - allocate_data = allocate_result['result'] - inst = self.instdir.new() + "is_vpn": is_vpn, + "hostname": inst.instance_id}}) inst['image_id'] = image_id inst['kernel_id'] = kernel_id inst['ramdisk_id'] = ramdisk_id @@ -585,17 +595,18 @@ class CloudController(object): inst['project_id'] = context.project.id inst['ami_launch_index'] = num inst['security_group'] = security_group + inst['hostname'] = inst.instance_id for (key, value) in allocate_data.iteritems(): inst[key] = value inst.save() rpc.cast(FLAGS.scheduler_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) + defer.returnValue(self._format_run_instances(context, reservation_id)) @rbac.allow('projectmanager', 'sysadmin') @defer.inlineCallbacks @@ -646,7 +657,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +667,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +700,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index de05ba2da..75b828e91 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -17,153 +17,95 @@ # under the License. """ -Rackspace API +Rackspace API Endpoint """ -import base64 import json -import logging -import multiprocessing -import os import time -import tornado.web -from twisted.internet import defer -from nova import datastore -from nova import exception +import webob.dec +import webob.exc + from nova import flags from nova import rpc from nova import utils +from nova import wsgi from nova.auth import manager -from nova.compute import model -from nova.compute import network -from nova.endpoint import images -from nova.endpoint import wsgi +from nova.compute import model as compute +from nova.network import model as network FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -# TODO(todd): subclass Exception so we can bubble meaningful errors - - -class Api(object): - - def __init__(self, rpc_mechanism): - self.controllers = { - "v1.0": RackspaceAuthenticationApi(), - "servers": RackspaceCloudServerApi() - } - self.rpc_mechanism = rpc_mechanism - - def handler(self, environ, responder): - environ['nova.context'] = self.build_context(environ) - controller, path = wsgi.Util.route( - environ['PATH_INFO'], - self.controllers - ) - if not controller: - # TODO(todd): Exception (404) - raise Exception("Missing Controller") - rv = controller.process(path, environ) - if type(rv) is tuple: - responder(rv[0], rv[1]) - rv = rv[2] - else: - responder("200 OK", []) - return rv - - def build_context(self, env): - rv = {} - if env.has_key("HTTP_X_AUTH_TOKEN"): - rv['user'] = manager.AuthManager().get_user_from_access_key( - env['HTTP_X_AUTH_TOKEN'] - ) - if rv['user']: - rv['project'] = manager.AuthManager().get_project( - rv['user'].name - ) - return rv - - -class RackspaceApiEndpoint(object): - def process(self, path, env): - if not self.check_authentication(env): - # TODO(todd): Exception (Unauthorized) - raise Exception("Unable to authenticate") - - if len(path) == 0: - return self.index(env) - - action = path.pop(0) - if hasattr(self, action): - method = getattr(self, action) - return method(path, env) - else: - # TODO(todd): Exception (404) - raise Exception("Missing method %s" % path[0]) - - def check_authentication(self, env): - if hasattr(self, "process_without_authentication") \ - and getattr(self, "process_without_authentication"): - return True - if not env['nova.context']['user']: - return False - return True - - -class RackspaceAuthenticationApi(RackspaceApiEndpoint): +class API(wsgi.Middleware): + """Entry point for all requests.""" def __init__(self): - self.process_without_authentication = True - - # TODO(todd): make a actual session with a unique token - # just pass the auth key back through for now - def index(self, env): - response = '204 No Content' - headers = [ - ('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']), - ('X-Storage-Url', 'http://%s' % env['HTTP_HOST']), - ('X-CDN-Managment-Url', 'http://%s' % env['HTTP_HOST']), - ('X-Auth-Token', env['HTTP_X_AUTH_KEY']) - ] - body = "" - return (response, headers, body) - - -class RackspaceCloudServerApi(RackspaceApiEndpoint): + super(API, self).__init__(Router(webob.exc.HTTPNotFound())) + + def __call__(self, environ, start_response): + context = {} + if "HTTP_X_AUTH_TOKEN" in environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden()(environ, start_response) + environ['nova.context'] = context + return self.application(environ, start_response) + + +class Router(wsgi.Router): + """Route requests to the next WSGI application.""" + + def _build_map(self): + """Build routing map for authentication and cloud.""" + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) + + +class AuthenticationAPI(wsgi.Application): + """Handle all authorization requests through WSGI applications.""" + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + # TODO(todd): make a actual session with a unique token + # just pass the auth key back through for now + res = webob.Response() + res.status = '204 No Content' + res.headers.add('X-Server-Management-Url', req.host_url) + res.headers.add('X-Storage-Url', req.host_url) + res.headers.add('X-CDN-Managment-Url', req.host_url) + res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) + return res + + +class CloudServerAPI(wsgi.Application): + """Handle all server requests through WSGI applications.""" def __init__(self): - self.instdir = model.InstanceDirectory() + super(CloudServerAPI, self).__init__() + self.instdir = compute.InstanceDirectory() self.network = network.PublicNetworkController() - def index(self, env): - if env['REQUEST_METHOD'] == 'GET': - return self.detail(env) - elif env['REQUEST_METHOD'] == 'POST': - return self.launch_server(env) - - def detail(self, args, env): - value = { - "servers": - [] - } + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + value = {"servers": []} for inst in self.instdir.all: value["servers"].append(self.instance_details(inst)) - return json.dumps(value) - ## - ## - - def launch_server(self, env): - data = json.loads(env['wsgi.input'].read(int(env['CONTENT_LENGTH']))) - inst = self.build_server_instance(data, env['nova.context']) - self.schedule_launch_of_instance(inst) - return json.dumps({"server": self.instance_details(inst)}) - - def instance_details(self, inst): + def instance_details(self, inst): # pylint: disable-msg=R0201 + """Build the data structure to represent details for an instance.""" return { "id": inst.get("instance_id", None), "imageId": inst.get("image_id", None), @@ -171,11 +113,9 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): "hostId": inst.get("node_name", None), "status": inst.get("state", "pending"), "addresses": { - "public": [self.network.get_public_ip_for_instance( - inst.get("instance_id", None) - )], - "private": [inst.get("private_dns_name", None)] - }, + "public": [network.get_public_ip_for_instance( + inst.get("instance_id", None))], + "private": [inst.get("private_dns_name", None)]}, # implemented only by Rackspace, not AWS "name": inst.get("name", "Not-Specified"), @@ -184,11 +124,22 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): "progress": "Not-Supported", "metadata": { "Server Label": "Not-Supported", - "Image Version": "Not-Supported" - } - } + "Image Version": "Not-Supported"}} + + @webob.dec.wsgify + def launch_server(self, req): + """Launch a new instance.""" + data = json.loads(req.body) + inst = self.build_server_instance(data, req.environ['nova.context']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + return json.dumps({"server": self.instance_details(inst)}) def build_server_instance(self, env, context): + """Build instance data structure and save it to the data store.""" reservation = utils.generate_uid('r') ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) inst = self.instdir.new() @@ -200,27 +151,33 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): inst['reservation_id'] = reservation inst['launch_time'] = ltime inst['mac_address'] = utils.generate_mac() - address = network.allocate_ip( + address = self.network.allocate_ip( inst['user_id'], inst['project_id'], - mac=inst['mac_address'] - ) + mac=inst['mac_address']) inst['private_dns_name'] = str(address) inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( inst['user_id'], inst['project_id'], - 'default' # security group - )['bridge_name'] + 'default')['bridge_name'] # key_data, key_name, ami_launch_index # TODO(todd): key data or root password inst.save() return inst - def schedule_launch_of_instance(self, inst): - rpc.cast( - FLAGS.compute_topic, - { - "method": "run_instance", - "args": {"instance_id": inst.instance_id} - } - ) + @webob.dec.wsgify + @wsgi.route_args + def delete_server(self, req, route_args): # pylint: disable-msg=R0201 + """Delete an instance.""" + owner_hostname = None + instance = compute.Instance.lookup(route_args['server_id']) + if instance: + owner_hostname = instance["node_name"] + if not owner_hostname: + return webob.exc.HTTPNotFound("Did not find image, or it was " + "not in a running state.") + rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) + rpc.cast(rpc_transport, + {"method": "reboot_instance", + "args": {"instance_id": route_args['server_id']}}) + req.status = "202 Accepted" diff --git a/nova/flags.py b/nova/flags.py index 7f92e3f70..be1e1184a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -21,16 +21,145 @@ Package-level global flags are defined here, the rest are defined where they're used. """ +import getopt import socket +import sys +import gflags -from gflags import * -# This keeps pylint from barfing on the imports -FLAGS = FLAGS -DEFINE_string = DEFINE_string -DEFINE_integer = DEFINE_integer -DEFINE_bool = DEFINE_bool +class FlagValues(gflags.FlagValues): + """Extension of gflags.FlagValues that allows undefined and runtime flags. + + Unknown flags will be ignored when parsing the command line, but the + command line will be kept so that it can be replayed if new flags are + defined after the initial parsing. + + """ + + def __init__(self): + gflags.FlagValues.__init__(self) + self.__dict__['__dirty'] = [] + self.__dict__['__was_already_parsed'] = False + self.__dict__['__stored_argv'] = [] + + def __call__(self, argv): + # We're doing some hacky stuff here so that we don't have to copy + # out all the code of the original verbatim and then tweak a few lines. + # We're hijacking the output of getopt so we can still return the + # leftover args at the end + sneaky_unparsed_args = {"value": None} + original_argv = list(argv) + + if self.IsGnuGetOpt(): + orig_getopt = getattr(getopt, 'gnu_getopt') + orig_name = 'gnu_getopt' + else: + orig_getopt = getattr(getopt, 'getopt') + orig_name = 'getopt' + + def _sneaky(*args, **kw): + optlist, unparsed_args = orig_getopt(*args, **kw) + sneaky_unparsed_args['value'] = unparsed_args + return optlist, unparsed_args + + try: + setattr(getopt, orig_name, _sneaky) + args = gflags.FlagValues.__call__(self, argv) + except gflags.UnrecognizedFlagError: + # Undefined args were found, for now we don't care so just + # act like everything went well + # (these three lines are copied pretty much verbatim from the end + # of the __call__ function we are wrapping) + unparsed_args = sneaky_unparsed_args['value'] + if unparsed_args: + if self.IsGnuGetOpt(): + args = argv[:1] + unparsed + else: + args = argv[:1] + original_argv[-len(unparsed_args):] + else: + args = argv[:1] + finally: + setattr(getopt, orig_name, orig_getopt) + + # Store the arguments for later, we'll need them for new flags + # added at runtime + self.__dict__['__stored_argv'] = original_argv + self.__dict__['__was_already_parsed'] = True + self.ClearDirty() + return args + + def SetDirty(self, name): + """Mark a flag as dirty so that accessing it will case a reparse.""" + self.__dict__['__dirty'].append(name) + + def IsDirty(self, name): + return name in self.__dict__['__dirty'] + + def ClearDirty(self): + self.__dict__['__is_dirty'] = [] + + def WasAlreadyParsed(self): + return self.__dict__['__was_already_parsed'] + + def ParseNewFlags(self): + if '__stored_argv' not in self.__dict__: + return + new_flags = FlagValues() + for k in self.__dict__['__dirty']: + new_flags[k] = gflags.FlagValues.__getitem__(self, k) + + new_flags(self.__dict__['__stored_argv']) + for k in self.__dict__['__dirty']: + setattr(self, k, getattr(new_flags, k)) + self.ClearDirty() + + def __setitem__(self, name, flag): + gflags.FlagValues.__setitem__(self, name, flag) + if self.WasAlreadyParsed(): + self.SetDirty(name) + + def __getitem__(self, name): + if self.IsDirty(name): + self.ParseNewFlags() + return gflags.FlagValues.__getitem__(self, name) + + def __getattr__(self, name): + if self.IsDirty(name): + self.ParseNewFlags() + return gflags.FlagValues.__getattr__(self, name) + + +FLAGS = FlagValues() + + +def _wrapper(func): + def _wrapped(*args, **kw): + kw.setdefault('flag_values', FLAGS) + func(*args, **kw) + _wrapped.func_name = func.func_name + return _wrapped + + +DEFINE_string = _wrapper(gflags.DEFINE_string) +DEFINE_integer = _wrapper(gflags.DEFINE_integer) +DEFINE_bool = _wrapper(gflags.DEFINE_bool) +DEFINE_boolean = _wrapper(gflags.DEFINE_boolean) +DEFINE_float = _wrapper(gflags.DEFINE_float) +DEFINE_enum = _wrapper(gflags.DEFINE_enum) +DEFINE_list = _wrapper(gflags.DEFINE_list) +DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) +DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) +DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) + + +def DECLARE(name, module_string, flag_values=FLAGS): + if module_string not in sys.modules: + __import__(module_string, globals(), locals()) + if name not in flag_values: + raise gflags.UnrecognizedFlag( + "%s not defined by %s" % (name, module_string)) + # __GLOBAL FLAGS ONLY__ # Define any app-specific flags in their own files, docs at: diff --git a/nova/network/exception.py b/nova/network/exception.py index 5722e9672..8d7aa1498 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -24,17 +24,25 @@ from nova.exception import Error class NoMoreAddresses(Error): + """No More Addresses are available in the network""" pass + class AddressNotAllocated(Error): + """The specified address has not been allocated""" pass + class AddressAlreadyAssociated(Error): + """The specified address has already been associated""" pass + class AddressNotAssociated(Error): + """The specified address is not associated""" pass + class NotValidNetworkSize(Error): + """The network size is not valid""" pass - diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4a4b4c8a8..15050adaf 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -15,85 +13,102 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +Implements vlans, bridges, and iptables rules using linux utilities. +""" import logging import signal import os -import subprocess # todo(ja): does the definition of network_path belong here? +from nova import flags from nova import utils -from nova import flags -FLAGS=flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') + def execute(cmd, addl_env=None): + """Wrapper around utils.execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s" % cmd) + logging.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, addl_env=addl_env) + def runthis(desc, cmd): + """Wrapper around utils.runthis for fake_network""" if FLAGS.fake_network: return execute(cmd) else: - return utils.runthis(desc,cmd) - -def Popen(cmd): - if FLAGS.fake_network: - execute(' '.join(cmd)) - else: - subprocess.Popen(cmd) + return utils.runthis(desc, cmd) def device_exists(device): - (out, err) = execute("ifconfig %s" % device) + """Check if ethernet device exists""" + (_out, err) = execute("ifconfig %s" % device) return not err + def confirm_rule(cmd): + """Delete and re-add iptables rule""" execute("sudo iptables --delete %s" % (cmd)) execute("sudo iptables -I %s" % (cmd)) + def remove_rule(cmd): + """Remove iptables rule""" execute("sudo iptables --delete %s" % (cmd)) -def bind_public_ip(ip, interface): - runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface)) -def unbind_public_ip(ip, interface): - runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface)) +def bind_public_ip(public_ip, interface): + """Bind ip to an interface""" + runthis("Binding IP to interface: %s", + "sudo ip addr add %s dev %s" % (public_ip, interface)) + + +def unbind_public_ip(public_ip, interface): + """Unbind a public ip from an interface""" + runthis("Binding IP to interface: %s", + "sudo ip addr del %s dev %s" % (public_ip, interface)) + def vlan_create(net): - """ create a vlan on on a bridge device unless vlan already exists """ + """Create a vlan on on a bridge device unless vlan already exists""" if not device_exists("vlan%s" % net['vlan']): logging.debug("Starting VLAN inteface for %s network", (net['vlan'])) execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan'])) execute("sudo ifconfig vlan%s up" % (net['vlan'])) + def bridge_create(net): - """ create a bridge on a vlan unless it already exists """ + """Create a bridge on a vlan unless it already exists""" if not device_exists(net['bridge_name']): logging.debug("Starting Bridge inteface for %s network", (net['vlan'])) execute("sudo brctl addbr %s" % (net['bridge_name'])) execute("sudo brctl setfd %s 0" % (net.bridge_name)) # execute("sudo brctl setageing %s 10" % (net.bridge_name)) execute("sudo brctl stp %s off" % (net['bridge_name'])) - execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], net['vlan'])) + execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], + net['vlan'])) if net.bridge_gets_ip: execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ (net['bridge_name'], net.gateway, net.broadcast, net.netmask)) - confirm_rule("FORWARD --in-interface %s -j ACCEPT" % (net['bridge_name'])) + confirm_rule("FORWARD --in-interface %s -j ACCEPT" % + (net['bridge_name'])) else: execute("sudo ifconfig %s up" % net['bridge_name']) -def dnsmasq_cmd(net): + +def _dnsmasq_cmd(net): + """Builds dnsmasq command""" cmd = ['sudo -E dnsmasq', ' --strict-order', ' --bind-interfaces', @@ -101,76 +116,81 @@ def dnsmasq_cmd(net): ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'), ' --listen-address=%s' % net.dhcp_listen_address, ' --except-interface=lo', - ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start), + ' --dhcp-range=%s,static,120s' % net.dhcp_range_start, ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'), ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'), ' --leasefile-ro'] return ''.join(cmd) -def hostDHCP(network, host, mac): - idx = host.split(".")[-1] # Logically, the idx of instances they've launched in this net - return "%s,%s-%s-%s.novalocal,%s" % \ - (mac, network['user_id'], network['vlan'], idx, host) -# todo(ja): if the system has restarted or pid numbers have wrapped +def host_dhcp(address): + """Return a host string for an address object""" + return "%s,%s.novalocal,%s" % (address['mac'], + address['hostname'], + address.address) + + +# TODO(ja): if the system has restarted or pid numbers have wrapped # then you cannot be certain that the pid refers to the # dnsmasq. As well, sending a HUP only reloads the hostfile, # so any configuration options (like dchp-range, vlan, ...) # aren't reloaded def start_dnsmasq(network): - """ (re)starts a dnsmasq server for a given network + """(Re)starts a dnsmasq server for a given network if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: - for host_name in network.hosts: - f.write("%s\n" % hostDHCP(network, host_name, network.hosts[host_name])) + for address in network.assigned_objs: + f.write("%s\n" % host_dhcp(address)) pid = dnsmasq_pid_for(network) # if dnsmasq is already running, then tell it to reload if pid: - # todo(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers + # TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) - except Exception, e: - logging.debug("Hupping dnsmasq threw %s", e) - - # otherwise delete the existing leases file and start dnsmasq - lease_file = dhcp_file(network['vlan'], 'leases') - if os.path.exists(lease_file): - os.unlink(lease_file) + return + except Exception as exc: # pylint: disable=W0703 + logging.debug("Hupping dnsmasq threw %s", exc) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network['bridge_name']} - execute(dnsmasq_cmd(network), addl_env=env) + execute(_dnsmasq_cmd(network), addl_env=env) + def stop_dnsmasq(network): - """ stops the dnsmasq instance for a given network """ + """Stops the dnsmasq instance for a given network""" pid = dnsmasq_pid_for(network) if pid: try: os.kill(pid, signal.SIGTERM) - except Exception, e: - logging.debug("Killing dnsmasq threw %s", e) + except Exception as exc: # pylint: disable=W0703 + logging.debug("Killing dnsmasq threw %s", exc) + def dhcp_file(vlan, kind): - """ return path to a pid, leases or conf file for a vlan """ + """Return path to a pid, leases or conf file for a vlan""" return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) + def bin_file(script): + """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + def dnsmasq_pid_for(network): - """ the pid for prior dnsmasq instance for a vlan, - returns None if no pid file exists + """Returns he pid for prior dnsmasq instance for a vlan + + Returns None if no pid file exists - if machine has rebooted pid might be incorrect (caller should check) + If machine has rebooted pid might be incorrect (caller should check) """ pid_file = dhcp_file(network['vlan'], 'pid') @@ -178,4 +198,3 @@ def dnsmasq_pid_for(network): if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) - diff --git a/nova/network/model.py b/nova/network/model.py index daac035e4..1a958b564 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -57,7 +57,8 @@ logging.getLogger().setLevel(logging.DEBUG) class Vlan(datastore.BasicModel): - def __init__(self, project, vlan): + """Tracks vlans assigned to project it the datastore""" + def __init__(self, project, vlan): # pylint: disable=W0231 """ Since we don't want to try and find a vlan by its identifier, but by a project id, we don't call super-init. @@ -67,10 +68,12 @@ class Vlan(datastore.BasicModel): @property def identifier(self): + """Datastore identifier""" return "%s:%s" % (self.project_id, self.vlan_id) @classmethod def create(cls, project, vlan): + """Create a Vlan object""" instance = cls(project, vlan) instance.save() return instance @@ -78,6 +81,7 @@ class Vlan(datastore.BasicModel): @classmethod @datastore.absorb_connection_error def lookup(cls, project): + """Returns object by project if it exists in datastore or None""" set_name = cls._redis_set_name(cls.__name__) vlan = datastore.Redis.instance().hget(set_name, project) if vlan: @@ -88,20 +92,20 @@ class Vlan(datastore.BasicModel): @classmethod @datastore.absorb_connection_error def dict_by_project(cls): - """a hash of project:vlan""" + """A hash of project:vlan""" set_name = cls._redis_set_name(cls.__name__) - return datastore.Redis.instance().hgetall(set_name) + return datastore.Redis.instance().hgetall(set_name) or {} @classmethod @datastore.absorb_connection_error def dict_by_vlan(cls): - """a hash of vlan:project""" + """A hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) - rv = {} - h = datastore.Redis.instance().hgetall(set_name) - for v in h.keys(): - rv[h[v]] = v - return rv + retvals = {} + hashset = datastore.Redis.instance().hgetall(set_name) or {} + for (key, val) in hashset.iteritems(): + retvals[val] = key + return retvals @classmethod @datastore.absorb_connection_error @@ -119,39 +123,100 @@ class Vlan(datastore.BasicModel): default way of saving into "vlan:ID" and adding to a set of "vlans". """ set_name = self._redis_set_name(self.__class__.__name__) - datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id) + datastore.Redis.instance().hset(set_name, + self.project_id, + self.vlan_id) @datastore.absorb_connection_error def destroy(self): + """Removes the object from the datastore""" set_name = self._redis_set_name(self.__class__.__name__) datastore.Redis.instance().hdel(set_name, self.project_id) def subnet(self): + """Returns a string containing the subnet""" vlan = int(self.vlan_id) network = IPy.IP(FLAGS.private_range) - start = (vlan-FLAGS.vlan_start) * FLAGS.network_size + start = (vlan - FLAGS.vlan_start) * FLAGS.network_size # minus one for the gateway. return "%s-%s" % (network[start], network[start + FLAGS.network_size - 1]) -# CLEANUP: -# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients -# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win? -# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients +class FixedIp(datastore.BasicModel): + """Represents a fixed ip in the datastore""" + + def __init__(self, address): + self.address = address + super(FixedIp, self).__init__() + + @property + def identifier(self): + return self.address + + # NOTE(vish): address states allocated, leased, deallocated + def default_state(self): + return {'address': self.address, + 'state': 'none'} + + @classmethod + # pylint: disable=R0913 + def create(cls, user_id, project_id, address, mac, hostname, network_id): + """Creates an FixedIp object""" + addr = cls(address) + addr['user_id'] = user_id + addr['project_id'] = project_id + addr['mac'] = mac + if hostname is None: + hostname = "ip-%s" % address.replace('.', '-') + addr['hostname'] = hostname + addr['network_id'] = network_id + addr['state'] = 'allocated' + addr.save() + return addr + + def save(self): + is_new = self.is_new_record() + success = super(FixedIp, self).save() + if success and is_new: + self.associate_with("network", self['network_id']) + + def destroy(self): + self.unassociate_with("network", self['network_id']) + super(FixedIp, self).destroy() + + +class ElasticIp(FixedIp): + """Represents an elastic ip in the datastore""" + override_type = "address" + + def default_state(self): + return {'address': self.address, + 'instance_id': 'available', + 'private_ip': 'available'} + + +# CLEANUP: +# TODO(ja): does vlanpool "keeper" need to know the min/max - +# shouldn't FLAGS always win? class BaseNetwork(datastore.BasicModel): + """Implements basic logic for allocating ips in a network""" override_type = 'network' - NUM_STATIC_IPS = 3 # Network, Gateway, and CloudPipe + address_class = FixedIp @property def identifier(self): + """Datastore identifier""" return self.network_id def default_state(self): + """Default values for new objects""" return {'network_id': self.network_id, 'network_str': self.network_str} @classmethod + # pylint: disable=R0913 def create(cls, user_id, project_id, security_group, vlan, network_str): + """Create a BaseNetwork object""" network_id = "%s:%s" % (project_id, security_group) net = cls(network_id, network_str) net['user_id'] = user_id @@ -169,91 +234,135 @@ class BaseNetwork(datastore.BasicModel): @property def network(self): + """Returns a string representing the network""" return IPy.IP(self['network_str']) @property def netmask(self): + """Returns the netmask of this network""" return self.network.netmask() @property def gateway(self): + """Returns the network gateway address""" return self.network[1] @property def broadcast(self): + """Returns the network broadcast address""" return self.network.broadcast() @property def bridge_name(self): + """Returns the bridge associated with this network""" return "br%s" % (self["vlan"]) @property def user(self): + """Returns the user associated with this network""" return manager.AuthManager().get_user(self['user_id']) @property def project(self): + """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) - @property - def _hosts_key(self): - return "network:%s:hosts" % (self['network_str']) - - @property - def hosts(self): - return datastore.Redis.instance().hgetall(self._hosts_key) or {} - - def _add_host(self, _user_id, _project_id, host, target): - datastore.Redis.instance().hset(self._hosts_key, host, target) + # pylint: disable=R0913 + def _add_host(self, user_id, project_id, ip_address, mac, hostname): + """Add a host to the datastore""" + self.address_class.create(user_id, project_id, ip_address, + mac, hostname, self.identifier) - def _rem_host(self, host): - datastore.Redis.instance().hdel(self._hosts_key, host) + def _rem_host(self, ip_address): + """Remove a host from the datastore""" + self.address_class(ip_address).destroy() @property def assigned(self): - return datastore.Redis.instance().hkeys(self._hosts_key) + """Returns a list of all assigned addresses""" + return self.address_class.associated_keys('network', self.identifier) + + @property + def assigned_objs(self): + """Returns a list of all assigned addresses as objects""" + return self.address_class.associated_to('network', self.identifier) + + def get_address(self, ip_address): + """Returns a specific ip as an object""" + if ip_address in self.assigned: + return self.address_class(ip_address) + return None @property def available(self): - # the .2 address is always CloudPipe - # and the top <n> are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + """Returns a list of all available addresses in the network""" + for idx in range(self.num_bottom_reserved_ips, + len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) - if not address in self.hosts.keys(): + if not address in self.assigned: yield address @property - def num_static_ips(self): - return BaseNetwork.NUM_STATIC_IPS + def num_bottom_reserved_ips(self): + """Returns number of ips reserved at the bottom of the range""" + return 2 # Network, Gateway - def allocate_ip(self, user_id, project_id, mac): + @property + def num_top_reserved_ips(self): + """Returns number of ips reserved at the top of the range""" + return 1 # Broadcast + + def allocate_ip(self, user_id, project_id, mac, hostname=None): + """Allocates an ip to a mac address""" for address in self.available: - logging.debug("Allocating IP %s to %s" % (address, project_id)) - self._add_host(user_id, project_id, address, mac) + logging.debug("Allocating IP %s to %s", address, project_id) + self._add_host(user_id, project_id, address, mac, hostname) self.express(address=address) return address raise exception.NoMoreAddresses("Project %s with network %s" % - (project_id, str(self.network))) + (project_id, str(self.network))) def lease_ip(self, ip_str): - logging.debug("Leasing allocated IP %s" % (ip_str)) + """Called when DHCP lease is activated""" + if not ip_str in self.assigned: + raise exception.AddressNotAllocated() + address = self.get_address(ip_str) + if address: + logging.debug("Leasing allocated IP %s", ip_str) + address['state'] = 'leased' + address.save() def release_ip(self, ip_str): + """Called when DHCP lease expires + + Removes the ip from the assigned list""" if not ip_str in self.assigned: raise exception.AddressNotAllocated() - self.deexpress(address=ip_str) + logging.debug("Releasing IP %s", ip_str) self._rem_host(ip_str) + self.deexpress(address=ip_str) def deallocate_ip(self, ip_str): - # Do nothing for now, cleanup on ip release - pass + """Deallocates an allocated ip""" + if not ip_str in self.assigned: + raise exception.AddressNotAllocated() + address = self.get_address(ip_str) + if address: + if address['state'] != 'leased': + # NOTE(vish): address hasn't been leased, so release it + self.release_ip(ip_str) + else: + logging.debug("Deallocating allocated IP %s", ip_str) + address['state'] == 'deallocated' + address.save() - def list_addresses(self): - for address in self.hosts: - yield address + def express(self, address=None): + """Set up network. Implemented in subclasses""" + pass - def express(self, address=None): pass - def deexpress(self, address=None): pass + def deexpress(self, address=None): + """Tear down network. Implemented in subclasses""" + pass class BridgedNetwork(BaseNetwork): @@ -277,7 +386,11 @@ class BridgedNetwork(BaseNetwork): override_type = 'network' @classmethod - def get_network_for_project(cls, user_id, project_id, security_group): + def get_network_for_project(cls, + user_id, + project_id, + security_group='default'): + """Returns network for a given project""" vlan = get_vlan_for_project(project_id) network_str = vlan.subnet() return cls.create(user_id, project_id, security_group, vlan.vlan_id, @@ -293,30 +406,36 @@ class BridgedNetwork(BaseNetwork): linux_net.vlan_create(self) linux_net.bridge_create(self) + class DHCPNetwork(BridgedNetwork): - """ - properties: - dhcp_listen_address: the ip of the gateway / dhcp host - dhcp_range_start: the first ip to give out - dhcp_range_end: the last ip to give out - """ + """Network supporting DHCP""" bridge_gets_ip = True override_type = 'network' def __init__(self, *args, **kwargs): super(DHCPNetwork, self).__init__(*args, **kwargs) - # logging.debug("Initing DHCPNetwork object...") - self.dhcp_listen_address = self.network[1] - self.dhcp_range_start = self.network[3] - self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)] - try: + if not(os.path.exists(FLAGS.networks_path)): os.makedirs(FLAGS.networks_path) - # NOTE(todd): I guess this is a lazy way to not have to check if the - # directory exists, but shouldn't we be smarter about - # telling the difference between existing directory and - # permission denied? (Errno 17 vs 13, OSError) - except Exception, err: - pass + + @property + def num_bottom_reserved_ips(self): + # For cloudpipe + return super(DHCPNetwork, self).num_bottom_reserved_ips + 1 + + @property + def num_top_reserved_ips(self): + return super(DHCPNetwork, self).num_top_reserved_ips + \ + FLAGS.cnt_vpn_clients + + @property + def dhcp_listen_address(self): + """Address where dhcp server should listen""" + return self.gateway + + @property + def dhcp_range_start(self): + """Starting address dhcp server should use""" + return self.network[self.num_bottom_reserved_ips] def express(self, address=None): super(DHCPNetwork, self).express(address=address) @@ -326,20 +445,23 @@ class DHCPNetwork(BridgedNetwork): linux_net.start_dnsmasq(self) else: logging.debug("Not launching dnsmasq: no hosts.") - self.express_cloudpipe() + self.express_vpn() - def allocate_vpn_ip(self, user_id, project_id, mac): + def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None): + """Allocates the reserved ip to a vpn instance""" address = str(self.network[2]) - self._add_host(user_id, project_id, address, mac) + self._add_host(user_id, project_id, address, mac, hostname) self.express(address=address) return address - def express_cloudpipe(self): + def express_vpn(self): + """Sets up routing rules for vpn""" private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) - linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (self.project.vpn_ip, self.project.vpn_port, private_ip)) + linux_net.confirm_rule( + "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" + % (self.project.vpn_ip, self.project.vpn_port, private_ip)) def deexpress(self, address=None): # if this is the last address, stop dns @@ -349,82 +471,39 @@ class DHCPNetwork(BridgedNetwork): else: linux_net.start_dnsmasq(self) -class PublicAddress(datastore.BasicModel): - override_type = "address" - - def __init__(self, address): - self.address = address - super(PublicAddress, self).__init__() - - @property - def identifier(self): - return self.address - - def default_state(self): - return {'address': self.address} - - @classmethod - def create(cls, user_id, project_id, address): - addr = cls(address) - addr['user_id'] = user_id - addr['project_id'] = project_id - addr['instance_id'] = 'available' - addr['private_ip'] = 'available' - addr.save() - return addr +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] -DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)] class PublicNetworkController(BaseNetwork): + """Handles elastic ips""" override_type = 'network' + address_class = ElasticIp def __init__(self, *args, **kwargs): network_id = "public:default" - super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range) + super(PublicNetworkController, self).__init__(network_id, + FLAGS.public_range, *args, **kwargs) self['user_id'] = "public" self['project_id'] = "public" - self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', + time.gmtime()) self["vlan"] = FLAGS.public_vlan self.save() self.express() - @property - def available(self): - for idx in range(2, len(self.network)-1): - address = str(self.network[idx]) - if not address in self.hosts.keys(): - yield address - - @property - def host_objs(self): - for address in self.assigned: - yield PublicAddress(address) - - def get_host(self, host): - if host in self.assigned: - return PublicAddress(host) - return None - - def _add_host(self, user_id, project_id, host, _target): - datastore.Redis.instance().hset(self._hosts_key, host, project_id) - PublicAddress.create(user_id, project_id, host) - - def _rem_host(self, host): - PublicAddress(host).destroy() - datastore.Redis.instance().hdel(self._hosts_key, host) - def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): + """Associates a public ip to a private ip and instance id""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() - # TODO(joshua): Keep an index going both ways - for addr in self.host_objs: + # TODO(josh): Keep an index going both ways + for addr in self.assigned_objs: if addr.get('private_ip', None) == private_ip: raise exception.AddressAlreadyAssociated() - addr = self.get_host(public_ip) + addr = self.get_address(public_ip) if addr.get('private_ip', 'available') != 'available': raise exception.AddressAlreadyAssociated() addr['private_ip'] = private_ip @@ -433,9 +512,10 @@ class PublicNetworkController(BaseNetwork): self.express(address=public_ip) def disassociate_address(self, public_ip): + """Disassociates a public ip with its private ip""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() - addr = self.get_host(public_ip) + addr = self.get_address(public_ip) if addr.get('private_ip', 'available') == 'available': raise exception.AddressNotAssociated() self.deexpress(address=public_ip) @@ -444,11 +524,14 @@ class PublicNetworkController(BaseNetwork): addr.save() def express(self, address=None): - addresses = self.host_objs if address: - addresses = [self.get_host(address)] + if not address in self.assigned: + raise exception.AddressNotAllocated() + addresses = [self.get_address(address)] + else: + addresses = self.assigned_objs for addr in addresses: - if addr.get('private_ip','available') == 'available': + if addr.get('private_ip', 'available') == 'available': continue public_ip = addr['address'] private_ip = addr['private_ip'] @@ -457,15 +540,16 @@ class PublicNetworkController(BaseNetwork): % (public_ip, private_ip)) linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" % (private_ip, public_ip)) - # TODO: Get these from the secgroup datastore entries + # TODO(joshua): Get these from the secgroup datastore entries linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: - linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) + linux_net.confirm_rule( + "FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (private_ip, protocol, port)) def deexpress(self, address=None): - addr = self.get_host(address) + addr = self.get_address(address) private_ip = addr['private_ip'] linux_net.unbind_public_ip(address, FLAGS.public_interface) linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" @@ -479,19 +563,18 @@ class PublicNetworkController(BaseNetwork): % (private_ip, protocol, port)) -# FIXME(todd): does this present a race condition, or is there some piece of -# architecture that mitigates it (only one queue listener per net)? +# FIXME(todd): does this present a race condition, or is there some +# piece of architecture that mitigates it (only one queue +# listener per net)? def get_vlan_for_project(project_id): - """ - Allocate vlan IDs to individual users. - """ + """Allocate vlan IDs to individual users""" vlan = Vlan.lookup(project_id) if vlan: return vlan known_vlans = Vlan.dict_by_vlan() for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end): vstr = str(vnum) - if not known_vlans.has_key(vstr): + if not vstr in known_vlans: return Vlan.create(project_id, vnum) old_project_id = known_vlans[vstr] if not manager.AuthManager().get_project(old_project_id): @@ -515,8 +598,9 @@ def get_vlan_for_project(project_id): return Vlan.create(project_id, vnum) raise exception.AddressNotAllocated("Out of VLANs") + def get_project_network(project_id, security_group='default'): - """ get a project's private network, allocating one if needed """ + """Gets a project's private network, allocating one if needed""" project = manager.AuthManager().get_project(project_id) if not project: raise nova_exception.NotFound("Project %s doesn't exist." % project_id) @@ -527,28 +611,23 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): - # TODO(vish): This is completely the wrong way to do this, but - # I'm getting the network binary working before I - # tackle doing this the right way. - logging.debug("Get Network By Address: %s" % address) - for project in manager.AuthManager().get_projects(): - net = get_project_network(project.id) - if address in net.assigned: - logging.debug("Found %s in %s" % (address, project.id)) - return net - raise exception.AddressNotAllocated() + """Gets the network for a given private ip""" + address_record = FixedIp.lookup(address) + if not address_record: + raise exception.AddressNotAllocated() + return get_project_network(address_record['project_id']) def get_network_by_interface(iface, security_group='default'): + """Gets the network for a given interface""" vlan = iface.rpartition("br")[2] project_id = Vlan.dict_by_vlan().get(vlan) return get_project_network(project_id, security_group) - def get_public_ip_for_instance(instance_id): - # FIXME: this should be a lookup - iteration won't scale - for address_record in PublicAddress.all(): + """Gets the public ip for a given instance""" + # FIXME(josh): this should be a lookup - iteration won't scale + for address_record in ElasticIp.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] - diff --git a/nova/network/service.py b/nova/network/service.py index 1a61f49d4..625f20dd4 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -17,7 +17,7 @@ # under the License. """ -Network Nodes are responsible for allocating ips and setting up network +Network Hosts are responsible for allocating ips and setting up network """ from nova import datastore @@ -38,7 +38,7 @@ flags.DEFINE_string('network_type', flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') flags.DEFINE_list('flat_network_ips', - ['192.168.0.2','192.168.0.3','192.168.0.4'], + ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'Available ips for simple network') flags.DEFINE_string('flat_network_network', '192.168.0.0', 'Network for simple network') @@ -51,26 +51,34 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') + def type_to_class(network_type): + """Convert a network_type string into an actual Python class""" if network_type == 'flat': return FlatNetworkService - elif network_type == 'vlan': + elif network_type == 'vlan': return VlanNetworkService raise NotFound("Couldn't find %s network type" % network_type) def setup_compute_network(network_type, user_id, project_id, security_group): + """Sets up the network on a compute host""" srv = type_to_class(network_type) - srv.setup_compute_network(network_type, user_id, project_id, security_group) + srv.setup_compute_network(network_type, + user_id, + project_id, + security_group) def get_host_for_project(project_id): + """Get host allocated to project from datastore""" redis = datastore.Redis.instance() return redis.get(_host_key(project_id)) def _host_key(project_id): - return "network_host:%s" % project_id + """Returns redis host key for network""" + return "networkhost:%s" % project_id class BaseNetworkService(service.Service): @@ -80,6 +88,7 @@ class BaseNetworkService(service.Service): """ def __init__(self, *args, **kwargs): self.network = model.PublicNetworkController() + super(BaseNetworkService, self).__init__(*args, **kwargs) def set_network_host(self, user_id, project_id, *args, **kwargs): """Safely sets the host of the projects network""" @@ -109,7 +118,7 @@ class BaseNetworkService(service.Service): pass @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Sets up matching network for compute hosts""" raise NotImplementedError() @@ -138,12 +147,14 @@ class FlatNetworkService(BaseNetworkService): """Basic network where no vlans are used""" @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Network is created manually""" pass - def allocate_fixed_ip(self, user_id, project_id, + def allocate_fixed_ip(self, + user_id, + project_id, security_group='default', *args, **kwargs): """Gets a fixed ip from the pool @@ -152,7 +163,7 @@ class FlatNetworkService(BaseNetworkService): """ # NOTE(vish): Some automation could be done here. For example, # creating the flat_network_bridge and setting up - # a gateway. This is all done manually atm + # a gateway. This is all done manually atm. redis = datastore.Redis.instance() if not redis.exists('ips') and not len(redis.keys('instances:*')): for fixed_ip in FLAGS.flat_network_ips: @@ -160,6 +171,8 @@ class FlatNetworkService(BaseNetworkService): fixed_ip = redis.spop('ips') if not fixed_ip: raise exception.NoMoreAddresses() + # TODO(vish): some sort of dns handling for hostname should + # probably be done here. return {'inject_network': True, 'network_type': FLAGS.network_type, 'mac_address': utils.generate_mac(), @@ -175,37 +188,51 @@ class FlatNetworkService(BaseNetworkService): """Returns an ip to the pool""" datastore.Redis.instance().sadd('ips', fixed_ip) + class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" # NOTE(vish): A lot of the interactions with network/model.py can be # simplified and improved. Also there it may be useful # to support vlans separately from dhcp, instead of having # both of them together in this class. - def allocate_fixed_ip(self, user_id, project_id, + # pylint: disable=W0221 + def allocate_fixed_ip(self, + user_id, + project_id, security_group='default', - vpn=False, *args, **kwargs): - """Gets a fixed ip from the pool """ + is_vpn=False, + hostname=None, + *args, **kwargs): + """Gets a fixed ip from the pool""" mac = utils.generate_mac() net = model.get_project_network(project_id) - if vpn: - fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac) + if is_vpn: + fixed_ip = net.allocate_vpn_ip(user_id, + project_id, + mac, + hostname) else: - fixed_ip = net.allocate_ip(user_id, project_id, mac) + fixed_ip = net.allocate_ip(user_id, + project_id, + mac, + hostname) return {'network_type': FLAGS.network_type, 'bridge_name': net['bridge_name'], 'mac_address': mac, - 'private_dns_name' : fixed_ip} + 'private_dns_name': fixed_ip} def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs): """Returns an ip to the pool""" return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip) - def lease_ip(self, address): - return model.get_network_by_address(address).lease_ip(address) + def lease_ip(self, fixed_ip): + """Called by bridge when ip is leased""" + return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip) - def release_ip(self, address): - return model.get_network_by_address(address).release_ip(address) + def release_ip(self, fixed_ip): + """Called by bridge when ip is released""" + return model.get_network_by_address(fixed_ip).release_ip(fixed_ip) def restart_nets(self): """Ensure the network for each user is enabled""" @@ -218,7 +245,7 @@ class VlanNetworkService(BaseNetworkService): vpn.NetworkData.create(project_id) @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Sets up matching network for compute hosts""" # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because diff --git a/nova/network/vpn.py b/nova/network/vpn.py index cec84287c..a0e2a7fa1 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -33,7 +33,9 @@ flags.DEFINE_integer('vpn_start_port', 1000, flags.DEFINE_integer('vpn_end_port', 2000, 'End port for the cloudpipe VPN servers') + class NoMorePorts(exception.Error): + """No ports available to allocate for the given ip""" pass @@ -67,34 +69,44 @@ class NetworkData(datastore.BasicModel): return network_data @classmethod - def find_free_port_for_ip(cls, ip): + def find_free_port_for_ip(cls, vpn_ip): """Finds a free port for a given ip from the redis set""" # TODO(vish): these redis commands should be generalized and # placed into a base class. Conceptually, it is # similar to an association, but we are just # storing a set of values instead of keys that # should be turned into objects. - redis = datastore.Redis.instance() - key = 'ip:%s:ports' % ip - # TODO(vish): these ports should be allocated through an admin - # command instead of a flag - if (not redis.exists(key) and - not redis.exists(cls._redis_association_name('ip', ip))): - for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(key, i) + cls._ensure_set_exists(vpn_ip) - port = redis.spop(key) + port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip)) if not port: raise NoMorePorts() return port @classmethod - def num_ports_for_ip(cls, ip): + def _redis_ports_key(cls, vpn_ip): + """Key that ports are stored under in redis""" + return 'ip:%s:ports' % vpn_ip + + @classmethod + def _ensure_set_exists(cls, vpn_ip): + """Creates the set of ports for the ip if it doesn't already exist""" + # TODO(vish): these ports should be allocated through an admin + # command instead of a flag + redis = datastore.Redis.instance() + if (not redis.exists(cls._redis_ports_key(vpn_ip)) and + not redis.exists(cls._redis_association_name('ip', vpn_ip))): + for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): + redis.sadd(cls._redis_ports_key(vpn_ip), i) + + @classmethod + def num_ports_for_ip(cls, vpn_ip): """Calculates the number of free ports for a given ip""" - return datastore.Redis.instance().scard('ip:%s:ports' % ip) + cls._ensure_set_exists(vpn_ip) + return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) @property - def ip(self): + def ip(self): # pylint: disable=C0103 """The ip assigned to the project""" return self['ip'] @@ -113,4 +125,3 @@ class NetworkData(datastore.BasicModel): self.unassociate_with('ip', self.ip) datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) super(NetworkData, self).destroy() - diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index f625a2aa1..dfe1918e3 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -269,7 +269,23 @@ class ImagesResource(Resource): images = [i for i in image.Image.all() \ if i.is_authorized(request.context, readonly=True)] - request.write(json.dumps([i.metadata for i in images])) + # Bug #617776: + # We used to have 'type' in the image metadata, but this field + # should be called 'imageType', as per the EC2 specification. + # For compat with old metadata files we copy type to imageType if + # imageType is not present. + # For compat with euca2ools (and any other clients using the + # incorrect name) we copy imageType to type. + # imageType is primary if we end up with both in the metadata file + # (which should never happen). + def decorate(m): + if 'imageType' not in m and 'type' in m: + m[u'imageType'] = m['type'] + elif 'imageType' in m: + m[u'type'] = m['imageType'] + return m + + request.write(json.dumps([decorate(i.metadata) for i in images])) request.finish() return server.NOT_DONE_YET diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 860298ba6..861eb364f 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -148,7 +148,7 @@ class Image(object): 'imageOwnerId': 'system', 'isPublic': public, 'architecture': 'x86_64', - 'type': image_type, + 'imageType': image_type, 'state': 'available' } @@ -195,7 +195,7 @@ class Image(object): 'imageOwnerId': context.project.id, 'isPublic': False, # FIXME: grab public from manifest 'architecture': 'x86_64', # FIXME: grab architecture from manifest - 'type' : image_type + 'imageType' : image_type } def write_state(state): diff --git a/nova/rpc.py b/nova/rpc.py index 2a550c3ae..4ac546c2a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -21,14 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers. No fan-out support yet. """ -from carrot import connection +from carrot import connection as carrot_connection from carrot import messaging import json import logging import sys import uuid from twisted.internet import defer -from twisted.internet import reactor from twisted.internet import task from nova import exception @@ -39,13 +38,15 @@ from nova import flags FLAGS = flags.FLAGS -_log = logging.getLogger('amqplib') -_log.setLevel(logging.WARN) +LOG = logging.getLogger('amqplib') +LOG.setLevel(logging.DEBUG) -class Connection(connection.BrokerConnection): +class Connection(carrot_connection.BrokerConnection): + """Connection instance object""" @classmethod def instance(cls): + """Returns the instance""" if not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -56,18 +57,33 @@ class Connection(connection.BrokerConnection): if FLAGS.fake_rabbit: params['backend_cls'] = fakerabbit.Backend + # NOTE(vish): magic is fun! + # pylint: disable=W0142 cls._instance = cls(**params) return cls._instance @classmethod def recreate(cls): + """Recreates the connection instance + + This is necessary to recover from some network errors/disconnects""" del cls._instance return cls.instance() + class Consumer(messaging.Consumer): + """Consumer base class + + Contains methods for connecting the fetch method to async loops + """ + def __init__(self, *args, **kwargs): + self.failed_connection = False + super(Consumer, self).__init__(*args, **kwargs) + # TODO(termie): it would be nice to give these some way of automatically # cleaning up after themselves def attach_to_tornado(self, io_inst=None): + """Attach a callback to tornado that fires 10 times a second""" from tornado import ioloop if io_inst is None: io_inst = ioloop.IOLoop.instance() @@ -79,33 +95,44 @@ class Consumer(messaging.Consumer): attachToTornado = attach_to_tornado - def fetch(self, *args, **kwargs): + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connections""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: - if getattr(self, 'failed_connection', False): - # attempt to reconnect + if self.failed_connection: + # NOTE(vish): conn is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 self.conn = Connection.recreate() self.backend = self.conn.create_backend() - super(Consumer, self).fetch(*args, **kwargs) - if getattr(self, 'failed_connection', False): + super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + if self.failed_connection: logging.error("Reconnected to queue") self.failed_connection = False - except Exception, ex: - if not getattr(self, 'failed_connection', False): + # NOTE(vish): This is catching all errors because we really don't + # exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception: # pylint: disable=W0703 + if not self.failed_connection: logging.exception("Failed to fetch message from queue") self.failed_connection = True def attach_to_twisted(self): + """Attach a callback to twisted that fires 10 times a second""" loop = task.LoopingCall(self.fetch, enable_callbacks=True) loop.start(interval=0.1) + class Publisher(messaging.Publisher): + """Publisher base class""" pass class TopicConsumer(Consumer): + """Consumes messages on a specific topic""" exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): self.queue = topic self.routing_key = topic @@ -115,14 +142,24 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): + """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - _log.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug('Initing the Adapter Consumer for %s' % (topic)) self.proxy = proxy - super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + super(AdapterConsumer, self).__init__(connection=connection, + topic=topic) @exception.wrap_exception def receive(self, message_data, message): - _log.debug('received %s' % (message_data)) + """Magically looks for a method on the proxy object and calls it + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + """ + LOG.debug('received %s' % (message_data)) msg_id = message_data.pop('_msg_id', None) method = message_data.get('method') @@ -133,21 +170,25 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - _log.warn('no method for message: %s' % (message_data)) + LOG.warn('no method for message: %s' % (message_data)) msg_reply(msg_id, 'No method for message: %s' % message_data) return node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + # pylint: disable=W0142 d = defer.maybeDeferred(node_func, **node_args) if msg_id: - d.addCallback(lambda rval: msg_reply(msg_id, rval)) - d.addErrback(lambda e: msg_reply(msg_id, str(e))) + d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) + d.addErrback(lambda e: msg_reply(msg_id, None, e)) return class TopicPublisher(Publisher): + """Publishes messages on a specific topic""" exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): self.routing_key = topic self.exchange = FLAGS.control_exchange @@ -156,7 +197,9 @@ class TopicPublisher(Publisher): class DirectConsumer(Consumer): + """Consumes messages directly on a channel specified by msg_id""" exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): self.queue = msg_id self.routing_key = msg_id @@ -166,7 +209,9 @@ class DirectConsumer(Consumer): class DirectPublisher(Publisher): + """Publishes messages directly on a channel specified by msg_id""" exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id self.exchange = msg_id @@ -174,32 +219,63 @@ class DirectPublisher(Publisher): super(DirectPublisher, self).__init__(connection=connection) -def msg_reply(msg_id, reply): +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id + + failure should be a twisted failure object""" + if failure: + message = failure.getErrorMessage() + traceback = failure.getTraceback() + logging.error("Returning exception %s to caller", message) + logging.error(traceback) + failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply}) + publisher.send({'result': reply, 'failure': failure}) except TypeError: publisher.send( {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()) - }) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) publisher.close() +class RemoteError(exception.Error): + """Signifies that a remote class has raised an exception + + Containes a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevent info.""" + def __init__(self, exc_type, value, traceback): + self.exc_type = exc_type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__("%s %s\n%s" % (exc_type, + value, + traceback)) + + def call(topic, msg): - _log.debug("Making asynchronous call...") + """Sends a message on a topic and wait for a response""" + LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - _log.debug("MSG_ID is %s" % (msg_id)) + LOG.debug("MSG_ID is %s" % (msg_id)) conn = Connection.instance() d = defer.Deferred() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + def deferred_receive(data, message): + """Acks message and callbacks or errbacks""" message.ack() - d.callback(data) + if data['failure']: + return d.errback(RemoteError(*data['failure'])) + else: + return d.callback(data['result']) + consumer.register_callback(deferred_receive) injected = consumer.attach_to_tornado() @@ -213,7 +289,8 @@ def call(topic, msg): def cast(topic, msg): - _log.debug("Making asynchronous cast...") + """Sends a message on a topic without waiting for a response""" + LOG.debug("Making asynchronous cast...") conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) @@ -221,16 +298,18 @@ def cast(topic, msg): def generic_response(message_data, message): - _log.debug('response %s', message_data) + """Logs a result and exits""" + LOG.debug('response %s', message_data) message.ack() sys.exit(0) def send_message(topic, message, wait=True): + """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - _log.debug('topic is %s', topic) - _log.debug('message %s', message) + LOG.debug('topic is %s', topic) + LOG.debug('message %s', message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), @@ -253,6 +332,8 @@ def send_message(topic, message, wait=True): consumer.wait() -# TODO: Replace with a docstring test if __name__ == "__main__": + # NOTE(vish): you can send messages from the command line using + # topic and a json sting representing a dictionary + # for the method send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/server.py b/nova/server.py index 7a1901a2f..96550f078 100644 --- a/nova/server.py +++ b/nova/server.py @@ -52,13 +52,8 @@ def stop(pidfile): """ # Get the pid from the pidfile try: - pf = file(pidfile,'r') - pid = int(pf.read().strip()) - pf.close() + pid = int(open(pidfile,'r').read().strip()) except IOError: - pid = None - - if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % pidfile) return # not an error in a restart @@ -79,14 +74,15 @@ def stop(pidfile): def serve(name, main): + """Controller for server""" argv = FLAGS(sys.argv) if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name - logging.debug("Full set of FLAGS: \n\n\n" ) + logging.debug("Full set of FLAGS: \n\n\n") for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) )) + logging.debug("%s : %s", flag, FLAGS.get(flag, None)) action = 'start' if len(argv) > 1: @@ -102,7 +98,11 @@ def serve(name, main): else: print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) + daemonize(argv, name, main) + +def daemonize(args, name, main): + """Does the work of daemonizing the process""" logging.getLogger('amqplib').setLevel(logging.WARN) if FLAGS.daemonize: logger = logging.getLogger() @@ -115,7 +115,7 @@ def serve(name, main): else: if not FLAGS.logfile: FLAGS.logfile = '%s.log' % name - logfile = logging.handlers.FileHandler(FLAGS.logfile) + logfile = logging.FileHandler(FLAGS.logfile) logfile.setFormatter(formatter) logger.addHandler(logfile) stdin, stdout, stderr = None, None, None @@ -137,4 +137,4 @@ def serve(name, main): stdout=stdout, stderr=stderr ): - main(argv) + main(args) diff --git a/nova/test.py b/nova/test.py index 6fbcab5e4..c7e08734f 100644 --- a/nova/test.py +++ b/nova/test.py @@ -22,15 +22,14 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ -import logging import mox import stubout +import sys import time -import unittest + from tornado import ioloop from twisted.internet import defer -from twisted.python import failure -from twisted.trial import unittest as trial_unittest +from twisted.trial import unittest from nova import fakerabbit from nova import flags @@ -41,20 +40,21 @@ flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') -def skip_if_fake(f): +def skip_if_fake(func): + """Decorator that skips a test if running in fake mode""" def _skipper(*args, **kw): + """Wrapped skipper function""" if FLAGS.fake_tests: - raise trial_unittest.SkipTest('Test cannot be run in fake mode') + raise unittest.SkipTest('Test cannot be run in fake mode') else: - return f(*args, **kw) - - _skipper.func_name = f.func_name + return func(*args, **kw) return _skipper -class TrialTestCase(trial_unittest.TestCase): - - def setUp(self): +class TrialTestCase(unittest.TestCase): + """Test case base class for all unit tests""" + def setUp(self): # pylint: disable-msg=C0103 + """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() # emulate some of the mox stuff, we can't use the metaclass @@ -63,7 +63,8 @@ class TrialTestCase(trial_unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): + def tearDown(self): # pylint: disable-msg=C0103 + """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() self.mox.UnsetStubs() @@ -75,6 +76,7 @@ class TrialTestCase(trial_unittest.TestCase): fakerabbit.reset_all() def flags(self, **kw): + """Override flag variables for a test""" for k, v in kw.iteritems(): if k in self.flag_overrides: self.reset_flags() @@ -84,13 +86,17 @@ class TrialTestCase(trial_unittest.TestCase): setattr(FLAGS, k, v) def reset_flags(self): + """Resets all flag variables for the test. Runs after each test""" for k, v in self.flag_overrides.iteritems(): setattr(FLAGS, k, v) class BaseTestCase(TrialTestCase): - def setUp(self): + # TODO(jaypipes): Can this be moved into the TrialTestCase class? + """Base test case class for all unit tests.""" + def setUp(self): # pylint: disable-msg=C0103 + """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of # the injected listeners... this is fine for now though @@ -98,33 +104,27 @@ class BaseTestCase(TrialTestCase): self.ioloop = ioloop.IOLoop.instance() self._waiting = None - self._doneWaiting = False - self._timedOut = False - self.set_up() - - def set_up(self): - pass - - def tear_down(self): - pass + self._done_waiting = False + self._timed_out = False - def tearDown(self): + def tearDown(self):# pylint: disable-msg=C0103 + """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: x.stop() if FLAGS.fake_rabbit: fakerabbit.reset_all() - self.tear_down() - def _waitForTest(self, timeout=60): + def _wait_for_test(self, timeout=60): """ Push the ioloop along to wait for our test to complete. """ self._waiting = self.ioloop.add_timeout(time.time() + timeout, self._timeout) def _wait(): - if self._timedOut: + """Wrapped wait function. Called on timeout.""" + if self._timed_out: self.fail('test timed out') self._done() - if self._doneWaiting: + if self._done_waiting: self.ioloop.stop() return # we can use add_callback here but this uses less cpu when testing @@ -134,15 +134,18 @@ class BaseTestCase(TrialTestCase): self.ioloop.start() def _done(self): + """Callback used for cleaning up deferred test methods.""" if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: + except Exception: # pylint: disable-msg=W0703 + # TODO(jaypipes): This produces a pylint warning. Should + # we really be catching Exception and then passing here? pass self._waiting = None - self._doneWaiting = True + self._done_waiting = True - def _maybeInlineCallbacks(self, f): + def _maybe_inline_callbacks(self, func): """ If we're doing async calls in our tests, wait on them. This is probably the most complicated hunk of code we have so far. @@ -165,7 +168,7 @@ class BaseTestCase(TrialTestCase): d.addCallback(_describe) d.addCallback(_checkDescribe) d.addCallback(lambda x: self._done()) - self._waitForTest() + self._wait_for_test() Example (inline callbacks! yay!): @@ -179,16 +182,17 @@ class BaseTestCase(TrialTestCase): # TODO(termie): this can be a wrapper function instead and # and we can make a metaclass so that we don't # have to copy all that "run" code below. - g = f() + g = func() if not hasattr(g, 'send'): self._done() return defer.succeed(g) - inlined = defer.inlineCallbacks(f) + inlined = defer.inlineCallbacks(func) d = inlined() return d - def _catchExceptions(self, result, failure): + def _catch_exceptions(self, result, failure): + """Catches all exceptions and handles keyboard interrupts.""" exc = (failure.type, failure.value, failure.getTracebackObject()) if isinstance(failure.value, self.failureException): result.addFailure(self, exc) @@ -200,44 +204,46 @@ class BaseTestCase(TrialTestCase): self._done() def _timeout(self): + """Helper method which trips the timeouts""" self._waiting = False - self._timedOut = True + self._timed_out = True def run(self, result=None): - if result is None: result = self.defaultTestResult() + """Runs the test case""" result.startTest(self) - testMethod = getattr(self, self._testMethodName) + test_method = getattr(self, self._testMethodName) try: try: self.setUp() except KeyboardInterrupt: raise except: - result.addError(self, self._exc_info()) + result.addError(self, sys.exc_info()) return ok = False try: - d = self._maybeInlineCallbacks(testMethod) - d.addErrback(lambda x: self._catchExceptions(result, x)) + d = self._maybe_inline_callbacks(test_method) + d.addErrback(lambda x: self._catch_exceptions(result, x)) d.addBoth(lambda x: self._done() and x) - self._waitForTest() + self._wait_for_test() ok = True except self.failureException: - result.addFailure(self, self._exc_info()) + result.addFailure(self, sys.exc_info()) except KeyboardInterrupt: raise except: - result.addError(self, self._exc_info()) + result.addError(self, sys.exc_info()) try: self.tearDown() except KeyboardInterrupt: raise except: - result.addError(self, self._exc_info()) + result.addError(self, sys.exc_info()) ok = False - if ok: result.addSuccess(self) + if ok: + result.addSuccess(self) finally: result.stopTest(self) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index f7e0625a3..0b404bfdc 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -179,7 +179,21 @@ class AuthTestCase(test.BaseTestCase): project.add_role('test1', 'sysadmin') self.assertTrue(project.has_role('test1', 'sysadmin')) - def test_211_can_remove_project_role(self): + def test_211_can_list_project_roles(self): + project = self.manager.get_project('testproj') + user = self.manager.get_user('test1') + self.manager.add_role(user, 'netadmin', project) + roles = self.manager.get_user_roles(user) + self.assertTrue('sysadmin' in roles) + self.assertFalse('netadmin' in roles) + project_roles = self.manager.get_user_roles(user, project) + self.assertTrue('sysadmin' in project_roles) + self.assertTrue('netadmin' in project_roles) + # has role should be false because global role is missing + self.assertFalse(self.manager.has_role(user, 'netadmin', project)) + + + def test_212_can_remove_project_role(self): project = self.manager.get_project('testproj') self.assertTrue(project.has_role('test1', 'sysadmin')) project.remove_role('test1', 'sysadmin') diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 40837405c..3501771cc 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -132,7 +132,7 @@ class CloudTestCase(test.BaseTestCase): 'state': 0x01, 'user_data': '' } - rv = self.cloud._format_instances(self.context) + rv = self.cloud._format_describe_instances(self.context) self.assert_(len(rv['reservationSet']) == 0) # simulate launch of 5 instances diff --git a/nova/endpoint/wsgi.py b/nova/tests/declare_flags.py index b7bb588c3..51a55ec72 100644 --- a/nova/endpoint/wsgi.py +++ b/nova/tests/declare_flags.py @@ -16,25 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. -''' -Utility methods for working with WSGI servers -''' +from nova import flags -class Util(object): - - @staticmethod - def route(reqstr, controllers): - if len(reqstr) == 0: - return Util.select_root_controller(controllers), [] - parts = [x for x in reqstr.split("/") if len(x) > 0] - if len(parts) == 0: - return Util.select_root_controller(controllers), [] - return controllers[parts[0]], parts[1:] - - @staticmethod - def select_root_controller(controllers): - if '' in controllers: - return controllers[''] - else: - return None +FLAGS = flags.FLAGS +flags.DEFINE_integer('answer', 42, 'test flag') diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py new file mode 100644 index 000000000..d49d5dc43 --- /dev/null +++ b/nova/tests/flags_unittest.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception +from nova import flags +from nova import test + + +class FlagsTestCase(test.TrialTestCase): + def setUp(self): + super(FlagsTestCase, self).setUp() + self.FLAGS = flags.FlagValues() + self.global_FLAGS = flags.FLAGS + + def test_define(self): + self.assert_('string' not in self.FLAGS) + self.assert_('int' not in self.FLAGS) + self.assert_('false' not in self.FLAGS) + self.assert_('true' not in self.FLAGS) + + flags.DEFINE_string('string', 'default', 'desc', flag_values=self.FLAGS) + flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS) + + self.assert_(self.FLAGS['string']) + self.assert_(self.FLAGS['int']) + self.assert_(self.FLAGS['false']) + self.assert_(self.FLAGS['true']) + self.assertEqual(self.FLAGS.string, 'default') + self.assertEqual(self.FLAGS.int, 1) + self.assertEqual(self.FLAGS.false, False) + self.assertEqual(self.FLAGS.true, True) + + argv = ['flags_test', + '--string', 'foo', + '--int', '2', + '--false', + '--notrue'] + + self.FLAGS(argv) + self.assertEqual(self.FLAGS.string, 'foo') + self.assertEqual(self.FLAGS.int, 2) + self.assertEqual(self.FLAGS.false, True) + self.assertEqual(self.FLAGS.true, False) + + def test_declare(self): + self.assert_('answer' not in self.global_FLAGS) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assert_('answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.answer, 42) + + # Make sure we don't overwrite anything + self.global_FLAGS.answer = 256 + self.assertEqual(self.global_FLAGS.answer, 256) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assertEqual(self.global_FLAGS.answer, 256) + + def test_runtime_and_unknown_flags(self): + self.assert_('runtime_answer' not in self.global_FLAGS) + + argv = ['flags_test', '--runtime_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + self.assertEqual(len(args), 2) + self.assertEqual(args[1], 'extra_arg') + + self.assert_('runtime_answer' not in self.global_FLAGS) + + import nova.tests.runtime_flags + + self.assert_('runtime_answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.runtime_answer, 60) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 879ee02a4..039509809 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -15,7 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Unit Tests for network code +""" import IPy import os import logging @@ -31,8 +33,10 @@ from nova.network.exception import NoMoreAddresses FLAGS = flags.FLAGS + class NetworkTestCase(test.TrialTestCase): - def setUp(self): + """Test cases for network code""" + def setUp(self): # pylint: disable=C0103 super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge @@ -43,7 +47,6 @@ class NetworkTestCase(test.TrialTestCase): network_size=32) logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() - self.dnsmasq = FakeDNSMasq() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.projects.append(self.manager.create_project('netuser', @@ -54,47 +57,49 @@ class NetworkTestCase(test.TrialTestCase): self.projects.append(self.manager.create_project(name, 'netuser', name)) - self.network = model.PublicNetworkController() + vpn.NetworkData.create(self.projects[i].id) self.service = service.VlanNetworkService() - def tearDown(self): + def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) def test_public_network_allocation(self): + """Makes sure that we can allocaate a public ip""" pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public") + address = self.service.allocate_elastic_ip(self.user.id, + self.projects[0].id) self.assertTrue(IPy.IP(address) in pubnet) - self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): - result = yield self.service.allocate_fixed_ip( + """Makes sure that we can allocate and deallocate a fixed ip""" + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] - logging.debug("Was allocated %s" % (address)) net = model.get_project_network(self.projects[0].id, "default") self.assertEqual(True, is_in_project(address, self.projects[0].id)) hostname = "test-host" - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - rv = self.service.deallocate_fixed_ip(address) + issue_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + release_ip(mac, address, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) - def test_range_allocation(self): - hostname = "test-host" - result = yield self.service.allocate_fixed_ip( - self.user.id, self.projects[0].id) + def test_side_effects(self): + """Ensures allocating and releasing has no side effects""" + hostname = "side-effect-host" + result = self.service.allocate_fixed_ip(self.user.id, + self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( - self.user, self.projects[1].id) + result = self.service.allocate_fixed_ip(self.user, + self.projects[1].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] @@ -102,66 +107,75 @@ class NetworkTestCase(test.TrialTestCase): secondnet = model.get_project_network(self.projects[1].id, "default") self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(True, is_in_project(secondaddress, + self.projects[1].id)) self.assertEqual(False, is_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - self.dnsmasq.issue_ip(secondmac, secondaddress, - hostname, secondnet.bridge_name) + issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - rv = self.service.deallocate_fixed_ip(address) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + release_ip(mac, address, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second - self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(True, is_in_project(secondaddress, + self.projects[1].id)) - rv = self.service.deallocate_fixed_ip(secondaddress) - self.dnsmasq.release_ip(secondmac, secondaddress, - hostname, secondnet.bridge_name) - self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) + self.service.deallocate_fixed_ip(secondaddress) + release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) + self.assertEqual(False, is_in_project(secondaddress, + self.projects[1].id)) def test_subnet_edge(self): - result = yield self.service.allocate_fixed_ip(self.user.id, + """Makes sure that private ips don't overlap""" + result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] hostname = "toomany-hosts" - for i in range(1,5): + for i in range(1, 5): project_id = self.projects[i].id - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac = result['mac_address'] address = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac2 = result['mac_address'] address2 = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] - self.assertEqual(False, is_in_project(address, self.projects[0].id)) - self.assertEqual(False, is_in_project(address2, self.projects[0].id)) - self.assertEqual(False, is_in_project(address3, self.projects[0].id)) - rv = self.service.deallocate_fixed_ip(address) - rv = self.service.deallocate_fixed_ip(address2) - rv = self.service.deallocate_fixed_ip(address3) net = model.get_project_network(project_id, "default") - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) - self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name) - self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name) + issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(mac2, address2, hostname, net.bridge_name) + issue_ip(mac3, address3, hostname, net.bridge_name) + self.assertEqual(False, is_in_project(address, + self.projects[0].id)) + self.assertEqual(False, is_in_project(address2, + self.projects[0].id)) + self.assertEqual(False, is_in_project(address3, + self.projects[0].id)) + self.service.deallocate_fixed_ip(address) + self.service.deallocate_fixed_ip(address2) + self.service.deallocate_fixed_ip(address3) + release_ip(mac, address, hostname, net.bridge_name) + release_ip(mac2, address2, hostname, net.bridge_name) + release_ip(mac3, address3, hostname, net.bridge_name) net = model.get_project_network(self.projects[0].id, "default") - rv = self.service.deallocate_fixed_ip(firstaddress) - self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(firstaddress) + release_ip(mac, firstaddress, hostname, net.bridge_name) - def test_212_vpn_ip_and_port_looks_valid(self): - vpn.NetworkData.create(self.projects[0].id) + def test_vpn_ip_and_port_looks_valid(self): + """Ensure the vpn ip and port are reasonable""" self.assert_(self.projects[0].vpn_ip) self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port) self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port) def test_too_many_vpns(self): + """Ensure error is raised if we run out of vpn ports""" vpns = [] for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)): vpns.append(vpn.NetworkData.create("vpnuser%s" % i)) @@ -169,84 +183,102 @@ class NetworkTestCase(test.TrialTestCase): for network_datum in vpns: network_datum.destroy() - def test_release_before_deallocate(self): - pass + def test_ips_are_reused(self): + """Makes sure that ip addresses that are deallocated get reused""" + result = self.service.allocate_fixed_ip( + self.user.id, self.projects[0].id) + mac = result['mac_address'] + address = result['private_dns_name'] - def test_deallocate_before_issued(self): - pass + hostname = "reuse-host" + net = model.get_project_network(self.projects[0].id, "default") - def test_too_many_addresses(self): - """ - Here, we test that a proper NoMoreAddresses exception is raised. + issue_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + release_ip(mac, address, hostname, net.bridge_name) - However, the number of available IP addresses depends on the test - environment's setup. + result = self.service.allocate_fixed_ip( + self.user, self.projects[0].id) + secondmac = result['mac_address'] + secondaddress = result['private_dns_name'] + self.assertEqual(address, secondaddress) + issue_ip(secondmac, secondaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) + release_ip(secondmac, secondaddress, hostname, net.bridge_name) - Network size is set in test fixture's setUp method. + def test_available_ips(self): + """Make sure the number of available ips for the network is correct - There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS) + The number of available IP addresses depends on the test + environment's setup. - And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary - services (gateway, CloudPipe, etc) + Network size is set in test fixture's setUp method. - So we should get flags.network_size - (NUM_STATIC_IPS + - NUM_PREALLOCATED_IPS + - NUM_RESERVED_VPN_IPS) - usable addresses + There are ips reserved at the bottom and top of the range. + services (network, gateway, CloudPipe, broadcast) """ net = model.get_project_network(self.projects[0].id, "default") + num_preallocated_ips = len(net.assigned) + net_size = flags.FLAGS.network_size + num_available_ips = net_size - (net.num_bottom_reserved_ips + + num_preallocated_ips + + net.num_top_reserved_ips) + self.assertEqual(num_available_ips, len(list(net.available))) - # Determine expected number of available IP addresses - num_static_ips = net.num_static_ips - num_preallocated_ips = len(net.hosts.keys()) - num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients - num_available_ips = flags.FLAGS.network_size - (num_static_ips + - num_preallocated_ips + - num_reserved_vpn_ips) + def test_too_many_addresses(self): + """Test for a NoMoreAddresses exception when all fixed ips are used. + """ + net = model.get_project_network(self.projects[0].id, "default") hostname = "toomany-hosts" macs = {} addresses = {} - for i in range(0, (num_available_ips - 1)): - result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) + # Number of availaible ips is len of the available list + num_available_ips = len(list(net.available)) + for i in range(num_available_ips): + result = self.service.allocate_fixed_ip(self.user.id, + self.projects[0].id) macs[i] = result['mac_address'] addresses[i] = result['private_dns_name'] - self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name) + issue_ip(macs[i], addresses[i], hostname, net.bridge_name) + + self.assertEqual(len(list(net.available)), 0) + self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, + self.user.id, self.projects[0].id) - self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses) + for i in range(len(addresses)): + self.service.deallocate_fixed_ip(addresses[i]) + release_ip(macs[i], addresses[i], hostname, net.bridge_name) + self.assertEqual(len(list(net.available)), num_available_ips) - for i in range(0, (num_available_ips - 1)): - rv = self.service.deallocate_fixed_ip(addresses[i]) - self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name) def is_in_project(address, project_id): - return address in model.get_project_network(project_id).list_addresses() + """Returns true if address is in specified project""" + return address in model.get_project_network(project_id).assigned -def _get_project_addresses(project_id): - project_addresses = [] - for addr in model.get_project_network(project_id).list_addresses(): - project_addresses.append(addr) - return project_addresses def binpath(script): + """Returns the absolute path to a script in bin""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -class FakeDNSMasq(object): - def issue_ip(self, mac, ip, hostname, interface): - cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), - mac, ip, hostname) - env = {'DNSMASQ_INTERFACE': interface, - 'TESTING' : '1', - 'FLAGFILE' : FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s " % (out, err)) - - def release_ip(self, mac, ip, hostname, interface): - cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), - mac, ip, hostname) - env = {'DNSMASQ_INTERFACE': interface, - 'TESTING' : '1', - 'FLAGFILE' : FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s " % (out, err)) +def issue_ip(mac, private_ip, hostname, interface): + """Run add command on dhcpbridge""" + cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), + mac, private_ip, hostname) + env = {'DNSMASQ_INTERFACE': interface, + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("ISSUE_IP: %s, %s ", out, err) + + +def release_ip(mac, private_ip, hostname, interface): + """Run del command on dhcpbridge""" + cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), + mac, private_ip, hostname) + env = {'DNSMASQ_INTERFACE': interface, + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py new file mode 100644 index 000000000..764a97416 --- /dev/null +++ b/nova/tests/rpc_unittest.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" +import logging + +from twisted.internet import defer + +from nova import flags +from nova import rpc +from nova import test + + +FLAGS = flags.FLAGS + + +class RpcTestCase(test.BaseTestCase): + """Test cases for rpc""" + def setUp(self): # pylint: disable=C0103 + super(RpcTestCase, self).setUp() + self.conn = rpc.Connection.instance() + self.receiver = TestReceiver() + self.consumer = rpc.AdapterConsumer(connection=self.conn, + topic='test', + proxy=self.receiver) + + self.injected.append(self.consumer.attach_to_tornado(self.ioloop)) + + def test_call_succeed(self): + """Get a value through rpc call""" + value = 42 + result = yield rpc.call('test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_exception(self): + """Test that exception gets passed back properly + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + """ + value = 42 + self.assertFailure(rpc.call('test', {"method": "fail", + "args": {"value": value}}), + rpc.RemoteError) + try: + yield rpc.call('test', {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call + + Uses static methods because we aren't actually storing any state""" + + @staticmethod + def echo(value): + """Simply returns whatever value is sent in""" + logging.debug("Received %s", value) + return defer.succeed(value) + + @staticmethod + def fail(value): + """Raises an exception with the value sent in""" + raise Exception(value) diff --git a/exercise_rsapi.py b/nova/tests/runtime_flags.py index 20589b9cb..1eb501406 100644 --- a/exercise_rsapi.py +++ b/nova/tests/runtime_flags.py @@ -16,36 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. -import cloudservers +from nova import flags -class IdFake: - def __init__(self, id): - self.id = id +FLAGS = flags.FLAGS -# to get your access key: -# from nova.auth import users -# users.UserManger.instance().get_users()[0].access -rscloud = cloudservers.CloudServers( - 'admin', - '6cca875e-5ab3-4c60-9852-abf5c5c60cc6' - ) -rscloud.client.AUTH_URL = 'http://localhost:8773/v1.0' - - -rv = rscloud.servers.list() -print "SERVERS: %s" % rv - -if len(rv) == 0: - server = rscloud.servers.create( - "test-server", - IdFake("ami-tiny"), - IdFake("m1.tiny") - ) - print "LAUNCH: %s" % server -else: - server = rv[0] - print "Server to kill: %s" % server - -raw_input("press enter key to kill the server") - -server.delete() +flags.DEFINE_integer('runtime_answer', 54, 'test flag') diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py new file mode 100644 index 000000000..2aab16809 --- /dev/null +++ b/nova/tests/virt_unittest.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import test +from nova.virt import libvirt_conn + +FLAGS = flags.FLAGS + + +class LibvirtConnTestCase(test.TrialTestCase): + def test_get_uri_and_template(self): + class MockDataModel(object): + def __init__(self): + self.datamodel = { 'name' : 'i-cafebabe', + 'memory_kb' : '1024000', + 'basepath' : '/some/path', + 'bridge_name' : 'br100', + 'mac_address' : '02:12:34:46:56:67', + 'vcpus' : 2 } + + type_uri_map = { 'qemu' : ('qemu:///system', + [lambda s: '<domain type=\'qemu\'>' in s, + lambda s: 'type>hvm</type' in s, + lambda s: 'emulator>/usr/bin/kvm' not in s]), + 'kvm' : ('qemu:///system', + [lambda s: '<domain type=\'kvm\'>' in s, + lambda s: 'type>hvm</type' in s, + lambda s: 'emulator>/usr/bin/qemu<' not in s]), + 'uml' : ('uml:///system', + [lambda s: '<domain type=\'uml\'>' in s, + lambda s: 'type>uml</type' in s]), + } + + for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = libvirt_conn.LibvirtConnection(True) + + uri, template = conn.get_uri_and_template() + self.assertEquals(uri, expected_uri) + + for i, check in enumerate(checks): + xml = conn.toXml(MockDataModel()) + self.assertTrue(check(xml), '%s failed check %d' % (xml, i)) + + # Deliberately not just assigning this string to FLAGS.libvirt_uri and + # checking against that later on. This way we make sure the + # implementation doesn't fiddle around with the FLAGS. + testuri = 'something completely different' + FLAGS.libvirt_uri = testuri + for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = libvirt_conn.LibvirtConnection(True) + uri, template = conn.get_uri_and_template() + self.assertEquals(uri, testuri) + diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 0f4f0e34d..2a07afe69 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -17,6 +17,10 @@ # under the License. import logging +import shutil +import tempfile + +from twisted.internet import defer from nova import compute from nova import exception @@ -34,10 +38,16 @@ class VolumeTestCase(test.TrialTestCase): super(VolumeTestCase, self).setUp() self.compute = compute.service.ComputeService() self.volume = None + self.tempdir = tempfile.mkdtemp() self.flags(connection_type='fake', - fake_storage=True) + fake_storage=True, + aoe_export_dir=self.tempdir) self.volume = volume_service.VolumeService() + def tearDown(self): + shutil.rmtree(self.tempdir) + + @defer.inlineCallbacks def test_run_create_volume(self): vol_size = '0' user_id = 'fake' @@ -48,34 +58,40 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume(volume_id)['volume_id']) rv = self.volume.delete_volume(volume_id) - self.assertFailure(volume_service.get_volume(volume_id), - exception.Error) + self.assertRaises(exception.Error, volume_service.get_volume, volume_id) + @defer.inlineCallbacks def test_too_big_volume(self): vol_size = '1001' user_id = 'fake' project_id = 'fake' - self.assertRaises(TypeError, - self.volume.create_volume, - vol_size, user_id, project_id) + try: + yield self.volume.create_volume(vol_size, user_id, project_id) + self.fail("Should have thrown TypeError") + except TypeError: + pass + @defer.inlineCallbacks def test_too_many_volumes(self): vol_size = '1' user_id = 'fake' project_id = 'fake' num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.slots_per_shelf * num_shelves + total_slots = FLAGS.blades_per_shelf * num_shelves vols = [] + from nova import datastore + redis = datastore.Redis.instance() for i in xrange(total_slots): vid = yield self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), - volume_service.NoMoreVolumes) + volume_service.NoMoreBlades) for id in vols: yield self.volume.delete_volume(id) + @defer.inlineCallbacks def test_run_attach_detach_volume(self): # Create one volume and one compute to test with instance_id = "storage-test" @@ -84,22 +100,26 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' mountpoint = "/dev/sdf" volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volume_service.get_volume(volume_id) volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.compute.attach_volume(volume_id, - instance_id, - mountpoint) + if FLAGS.fake_tests: + volume_obj.finish_attach() + else: + rv = yield self.compute.attach_volume(instance_id, + volume_id, + mountpoint) self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attachStatus'], "attached") + self.assertEqual(volume_obj['attach_status'], "attached") self.assertEqual(volume_obj['instance_id'], instance_id) self.assertEqual(volume_obj['mountpoint'], mountpoint) - self.assertRaises(exception.Error, - self.volume.delete_volume, - volume_id) - - rv = yield self.volume.detach_volume(volume_id) + self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) + volume_obj.start_detach() + if FLAGS.fake_tests: + volume_obj.finish_detach() + else: + rv = yield self.volume.detach_volume(instance_id, + volume_id) volume_obj = volume_service.get_volume(volume_id) self.assertEqual(volume_obj['status'], "available") @@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume, volume_id) + @defer.inlineCallbacks + def test_multiple_volume_race_condition(self): + vol_size = "5" + user_id = "fake" + project_id = 'fake' + shelf_blades = [] + def _check(volume_id): + vol = volume_service.get_volume(volume_id) + shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id']) + self.assert_(shelf_blade not in shelf_blades) + shelf_blades.append(shelf_blade) + logging.debug("got %s" % shelf_blade) + vol.destroy() + deferreds = [] + for i in range(5): + d = self.volume.create_volume(vol_size, user_id, project_id) + d.addCallback(_check) + d.addErrback(self.fail) + deferreds.append(d) + yield defer.DeferredList(deferreds) + def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node diff --git a/nova/twistd.py b/nova/twistd.py index c83276daa..8de322aa5 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -241,15 +241,7 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - class NoNewlineFormatter(logging.Formatter): - """Strips newlines from default formatter""" - def format(self, record): - """Grabs default formatter's output and strips newlines""" - data = logging.Formatter.format(self, record) - return data.replace("\n", "--") - - # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well - formatter = NoNewlineFormatter( + formatter = logging.Formatter( '(%(name)s): %(levelname)s %(message)s') handler = logging.StreamHandler(log.StdioOnnaStick()) handler.setFormatter(formatter) diff --git a/nova/utils.py b/nova/utils.py index 0b23de7cd..63db080f1 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -119,11 +119,15 @@ def get_my_ip(): ''' if getattr(FLAGS, 'fake_tests', None): return '127.0.0.1' - csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - csock.connect(('www.google.com', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + csock.connect(('www.google.com', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.gaierror as ex: + logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) + return "127.0.0.1" def isotime(at=None): if not at: diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 004adb19d..90bc7fa0a 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -27,6 +27,15 @@ FLAGS = flags.FLAGS def get_connection(read_only=False): + """Returns an object representing the connection to a virtualization + platform. This could be nova.virt.fake.FakeConnection in test mode, + a connection to KVM or QEMU via libvirt, or a connection to XenServer + or Xen Cloud Platform via XenAPI. + + Any object returned here must conform to the interface documented by + FakeConnection. + """ + # TODO(termie): maybe lazy load after initial check for permissions # TODO(termie): check whether we can be disconnected t = FLAGS.connection_type diff --git a/nova/virt/fake.py b/nova/virt/fake.py index d9ae5ac96..105837181 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -19,6 +19,7 @@ """ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor. +This module also documents the semantics of real hypervisor connections. """ import logging @@ -32,6 +33,38 @@ def get_connection(_): class FakeConnection(object): + """ + The interface to this class talks in terms of 'instances' (Amazon EC2 and + internal Nova terminology), by which we mean 'running virtual machine' + (XenAPI terminology) or domain (Xen or libvirt terminology). + + An instance has an ID, which is the identifier chosen by Nova to represent + the instance further up the stack. This is unfortunately also called a + 'name' elsewhere. As far as this layer is concerned, 'instance ID' and + 'instance name' are synonyms. + + Note that the instance ID or name is not human-readable or + customer-controlled -- it's an internal ID chosen by Nova. At the + nova.virt layer, instances do not have human-readable names at all -- such + things are only known higher up the stack. + + Most virtualization platforms will also have their own identity schemes, + to uniquely identify a VM or domain. These IDs must stay internal to the + platform-specific layer, and never escape the connection interface. The + platform-specific layer is responsible for keeping track of which instance + ID maps to which platform-specific ID, and vice versa. + + In contrast, the list_disks and list_interfaces calls may return + platform-specific IDs. These identify a specific virtual disk or specific + virtual network interface, and these IDs are opaque to the rest of Nova. + + Some methods here take an instance of nova.compute.service.Instance. This + is the datastructure used by nova.compute to store details regarding an + instance, and pass them into this layer. This layer is responsible for + translating that generic datastructure into terms that are specific to the + virtualization platform. + """ + def __init__(self): self.instances = {} @@ -42,20 +75,59 @@ class FakeConnection(object): return cls._instance def list_instances(self): + """ + Return the names of all the instances known to the virtualization + layer, as a list. + """ return self.instances.keys() def spawn(self, instance): + """ + Create a new instance/VM/domain on the virtualization platform. + + The given parameter is an instance of nova.compute.service.Instance. + This function should use the data there to guide the creation of + the new instance. + + Once this function successfully completes, the instance should be + running (power_state.RUNNING). + + If this function fails, any partial instance should be completely + cleaned up, and the virtualization platform should be in the state + that it was before this call began. + """ + fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING def reboot(self, instance): + """ + Reboot the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + """ pass - + def destroy(self, instance): + """ + Destroy (shutdown and delete) the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + """ del self.instances[instance.name] def get_info(self, instance_id): + """ + Get a block of information about the given instance. This is returned + as a dictionary containing 'state': The power_state of the instance, + 'max_mem': The maximum memory for the instance, in KiB, 'mem': The + current memory the instance has, in KiB, 'num_cpu': The current number + of virtual CPUs the instance has, 'cpu_time': The total CPU time used + by the instance, in nanoseconds. + """ i = self.instances[instance_id] return {'state': i._state, 'max_mem': 0, @@ -64,15 +136,70 @@ class FakeConnection(object): 'cpu_time': 0} def list_disks(self, instance_id): + """ + Return the IDs of all the virtual disks attached to the specified + instance, as a list. These IDs are opaque to the caller (they are + only useful for giving back to this layer as a parameter to + disk_stats). These IDs only need to be unique for a given instance. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return ['A_DISK'] def list_interfaces(self, instance_id): + """ + Return the IDs of all the virtual network interfaces attached to the + specified instance, as a list. These IDs are opaque to the caller + (they are only useful for giving back to this layer as a parameter to + interface_stats). These IDs only need to be unique for a given + instance. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return ['A_VIF'] def block_stats(self, instance_id, disk_id): + """ + Return performance counters associated with the given disk_id on the + given instance_id. These are returned as [rd_req, rd_bytes, wr_req, + wr_bytes, errs], where rd indicates read, wr indicates write, req is + the total number of I/O requests made, bytes is the total number of + bytes transferred, and errs is the number of requests held up due to a + full pipeline. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return [0L, 0L, 0L, 0L, null] def interface_stats(self, instance_id, iface_id): + """ + Return performance counters associated with the given iface_id on the + given instance_id. These are returned as [rx_bytes, rx_packets, + rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx + indicates receive, tx indicates transmit, bytes and packets indicate + the total number of bytes or packets transferred, and errs and dropped + is the total number of packets failed / dropped. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] diff --git a/nova/virt/images.py b/nova/virt/images.py index 48a87b514..1e23c48b9 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. import os.path import time +import urlparse from nova import flags from nova import process @@ -43,7 +44,7 @@ def fetch(image, path, user, project): return f(image, path, user, project) def _fetch_s3_image(image, path, user, project): - url = _image_url('%s/image' % image) + url = image_url(image) # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use @@ -51,11 +52,11 @@ def _fetch_s3_image(image, path, user, project): headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - uri = '/' + url.partition('/')[2] + (_, _, url_path, _, _, _) = urlparse.urlparse(url) access = manager.AuthManager().get_access_key(user, project) signature = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', - uri) + url_path) headers['Authorization'] = 'AWS %s:%s' % (access, signature) cmd = ['/usr/bin/curl', '--silent', url] @@ -72,5 +73,6 @@ def _fetch_local_image(image, path, user, project): def _image_path(path): return os.path.join(FLAGS.images_path, path) -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) +def image_url(image): + return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, + image) diff --git a/nova/compute/interfaces.template b/nova/virt/interfaces.template index 11df301f6..11df301f6 100644 --- a/nova/compute/interfaces.template +++ b/nova/virt/interfaces.template diff --git a/nova/compute/libvirt.xml.template b/nova/virt/libvirt.qemu.xml.template index 307f9d03a..307f9d03a 100644 --- a/nova/compute/libvirt.xml.template +++ b/nova/virt/libvirt.qemu.xml.template diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template new file mode 100644 index 000000000..6f4290f98 --- /dev/null +++ b/nova/virt/libvirt.uml.xml.template @@ -0,0 +1,25 @@ +<domain type='%(type)s'> + <name>%(name)s</name> + <memory>%(memory_kb)s</memory> + <os> + <type>%(type)s</type> + <kernel>/usr/bin/linux</kernel> + <root>/dev/ubda1</root> + </os> + <devices> + <disk type='file'> + <source file='%(basepath)s/disk'/> + <target dev='ubd0' bus='uml'/> + </disk> + <interface type='bridge'> + <source bridge='%(bridge_name)s'/> + <mac address='%(mac_address)s'/> + </interface> + <console type="pty" /> + <serial type="file"> + <source path='%(basepath)s/console.log'/> + <target port='1'/> + </serial> + </devices> + <nova>%(nova)s</nova> +</domain> diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 551ba6e54..7449d3954 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,15 +44,20 @@ libxml2 = None FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') + utils.abspath('virt/libvirt.qemu.xml.template'), + 'Libvirt XML Template for QEmu/KVM') +flags.DEFINE_string('libvirt_uml_xml_template', + utils.abspath('virt/libvirt.uml.xml.template'), + 'Libvirt XML Template for user-mode-linux') flags.DEFINE_string('injected_network_template', - utils.abspath('compute/interfaces.template'), + utils.abspath('virt/interfaces.template'), 'Template file for injected network') - flags.DEFINE_string('libvirt_type', 'kvm', - 'Libvirt domain type (kvm, qemu, etc)') + 'Libvirt domain type (valid options are: kvm, qemu, uml)') +flags.DEFINE_string('libvirt_uri', + '', + 'Override the default libvirt URI (which is dependent on libvirt_type)') def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -65,16 +70,42 @@ def get_connection(read_only): libxml2 = __import__('libxml2') return LibvirtConnection(read_only) - class LibvirtConnection(object): def __init__(self, read_only): + self.libvirt_uri, template_file = self.get_uri_and_template() + + self.libvirt_xml = open(template_file).read() + self._wrapped_conn = None + self.read_only = read_only + + + @property + def _conn(self): + if not self._wrapped_conn: + self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) + return self._wrapped_conn + + + def get_uri_and_template(self): + if FLAGS.libvirt_type == 'uml': + uri = FLAGS.libvirt_uri or 'uml:///system' + template_file = FLAGS.libvirt_uml_xml_template + else: + uri = FLAGS.libvirt_uri or 'qemu:///system' + template_file = FLAGS.libvirt_xml_template + return uri, template_file + + + def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] + if read_only: - self._conn = libvirt.openReadOnly('qemu:///system') + return libvirt.openReadOnly(uri) else: - self._conn = libvirt.openAuth('qemu:///system', auth, 0) + return libvirt.openAuth(uri, auth, 0) + def list_instances(self): @@ -114,7 +145,8 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.abspath(instance.datamodel['basepath']) logging.info("Deleting instance files at %s", target) - shutil.rmtree(target) + if os.path.exists(target): + shutil.rmtree(target) @defer.inlineCallbacks @@ -236,14 +268,13 @@ class LibvirtConnection(object): def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() xml_info = instance.datamodel.copy() # TODO(joshua): Make this xml express the attached disks as well # TODO(termie): lazy lazy hack because xml is annoying xml_info['nova'] = json.dumps(instance.datamodel.copy()) xml_info['type'] = FLAGS.libvirt_type - libvirt_xml = libvirt_xml % xml_info + libvirt_xml = self.libvirt_xml % xml_info logging.debug("Finished the toXML method") return libvirt_xml @@ -260,12 +291,6 @@ class LibvirtConnection(object): def get_disks(self, instance_id): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - - Returns a list of all block devices for this domain. - """ domain = self._conn.lookupByName(instance_id) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) @@ -303,12 +328,6 @@ class LibvirtConnection(object): def get_interfaces(self, instance_id): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - - Returns a list of all network interfaces for this instance. - """ domain = self._conn.lookupByName(instance_id) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) @@ -346,18 +365,10 @@ class LibvirtConnection(object): def block_stats(self, instance_id, disk): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - """ domain = self._conn.lookupByName(instance_id) return domain.blockStats(disk) def interface_stats(self, instance_id, interface): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - """ domain = self._conn.lookupByName(instance_id) return domain.interfaceStats(interface) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index dc372e3e3..9fe15644f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -19,6 +19,7 @@ A connection to XenServer or Xen Cloud Platform. """ import logging +import xmlrpclib from twisted.internet import defer from twisted.internet import task @@ -26,7 +27,9 @@ from twisted.internet import task from nova import exception from nova import flags from nova import process +from nova.auth.manager import AuthManager from nova.compute import power_state +from nova.virt import images XenAPI = None @@ -71,10 +74,41 @@ class XenAPIConnection(object): @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): - vm = self.lookup(instance.name) + vm = yield self.lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) + + if 'bridge_name' in instance.datamodel: + network_ref = \ + yield self._find_network_with_bridge( + instance.datamodel['bridge_name']) + else: + network_ref = None + + if 'mac_address' in instance.datamodel: + mac_address = instance.datamodel['mac_address'] + else: + mac_address = '' + + user = AuthManager().get_user(instance.datamodel['user_id']) + project = AuthManager().get_project(instance.datamodel['project_id']) + vdi_uuid = yield self.fetch_image( + instance.datamodel['image_id'], user, project, True) + kernel = yield self.fetch_image( + instance.datamodel['kernel_id'], user, project, False) + ramdisk = yield self.fetch_image( + instance.datamodel['ramdisk_id'], user, project, False) + vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) + + vm_ref = yield self.create_vm(instance, kernel, ramdisk) + yield self.create_vbd(vm_ref, vdi_ref, 0, True) + if network_ref: + yield self._create_vif(vm_ref, network_ref, mac_address) + yield self._conn.xenapi.VM.start(vm_ref, False, False) + + + def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) rec = { @@ -92,9 +126,9 @@ class XenAPIConnection(object): 'actions_after_reboot': 'restart', 'actions_after_crash': 'destroy', 'PV_bootloader': '', - 'PV_kernel': instance.datamodel['kernel_id'], - 'PV_ramdisk': instance.datamodel['ramdisk_id'], - 'PV_args': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', 'PV_bootloader_args': '', 'PV_legacy_args': '', 'HVM_boot_policy': '', @@ -106,8 +140,78 @@ class XenAPIConnection(object): 'user_version': '0', 'other_config': {}, } - vm = yield self._conn.xenapi.VM.create(rec) - #yield self._conn.xenapi.VM.start(vm, False, False) + logging.debug('Created VM %s...', instance.name) + vm_ref = self._conn.xenapi.VM.create(rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + return vm_ref + + + def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = self._conn.xenapi.VBD.create(vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + return vbd_ref + + + def _create_vif(self, vm_ref, network_ref, mac_address): + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network']= network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = self._conn.xenapi.VIF.create(vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + return vif_ref + + + def _find_network_with_bridge(self, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = self._conn.xenapi.network.get_all_records_where(expr) + if len(networks) == 1: + return networks.keys()[0] + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) + + + def fetch_image(self, image, user, project, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used).""" + + url = images.image_url(image) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s" % (url, access)) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + return self._call_plugin('objectstore', fn, args) def reboot(self, instance): @@ -125,7 +229,7 @@ class XenAPIConnection(object): def get_info(self, instance_id): vm = self.lookup(instance_id) if vm is None: - raise Exception('instance not present %s' % instance.name) + raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) return {'state': power_state_from_xenapi[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, @@ -143,10 +247,42 @@ class XenAPIConnection(object): else: return vms[0] - power_state_from_xenapi = { - 'Halted' : power_state.RUNNING, #FIXME - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED - } + + def _call_plugin(self, plugin, fn, args): + return _unwrap_plugin_exceptions( + self._conn.xenapi.host.call_plugin, + self._get_xenapi_host(), plugin, fn, args) + + + def _get_xenapi_host(self): + return self._conn.xenapi.session.get_this_host(self._conn.handle) + + +power_state_from_xenapi = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} + + +def _unwrap_plugin_exceptions(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, exn: + logging.debug("Got exception: %s", exn) + if (len(exn.details) == 4 and + exn.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and + exn.details[2] == 'Failure'): + params = None + try: + params = eval(exn.details[3]) + except: + raise exn + raise XenAPI.Failure(params) + else: + raise + except xmlrpclib.ProtocolError, exn: + logging.debug("Got exception: %s", exn) + raise diff --git a/nova/volume/service.py b/nova/volume/service.py index e12f675a7..66163a812 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS. Currently uses Ata-over-Ethernet. """ -import glob import logging import os -import shutil -import socket -import tempfile from twisted.internet import defer @@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this service') flags.DEFINE_integer('first_shelf_id', utils.last_octet(utils.get_my_ip()) * 10, 'AoE starting shelf_id for this service') @@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id', flags.DEFINE_string('aoe_export_dir', '/var/lib/vblade-persist/vblades', 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', +flags.DEFINE_integer('blades_per_shelf', 16, - 'Number of AoE slots per shelf') + 'Number of AoE blades per shelf') flags.DEFINE_string('storage_availability_zone', 'nova', 'availability zone of this service') @@ -69,7 +62,7 @@ flags.DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') -class NoMoreVolumes(exception.Error): +class NoMoreBlades(exception.Error): pass def get_volume(volume_id): @@ -77,8 +70,9 @@ def get_volume(volume_id): volume_class = Volume if FLAGS.fake_storage: volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) + vol = volume_class.lookup(volume_id) + if vol: + return vol raise exception.Error("Volume does not exist") class VolumeService(service.Service): @@ -91,18 +85,9 @@ class VolumeService(service.Service): super(VolumeService, self).__init__() self.volume_class = Volume if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() self.volume_class = FakeVolume self._init_volume_group() - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) def create_volume(self, size, user_id, project_id): @@ -113,8 +98,6 @@ class VolumeService(service.Service): """ logging.debug("Creating volume of size: %s" % (size)) vol = yield self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) logging.debug("restarting exports") yield self._restart_exports() defer.returnValue(vol['volume_id']) @@ -134,21 +117,19 @@ class VolumeService(service.Service): def delete_volume(self, volume_id): logging.debug("Deleting volume with id of: %s" % (volume_id)) vol = get_volume(volume_id) - if vol['status'] == "attached": + if vol['attach_status'] == "attached": raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: + if vol['node_name'] != FLAGS.node_name: raise exception.Error("Volume is not local to this node") yield vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) defer.returnValue(True) @defer.inlineCallbacks def _restart_exports(self): if FLAGS.fake_storage: return - yield process.simple_execute("sudo vblade-persist auto all") - # NOTE(vish): this command sometimes sends output to stderr for warnings + # NOTE(vish): these commands sometimes sends output to stderr for warnings + yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) yield process.simple_execute("sudo vblade-persist start all", error_ok=1) @defer.inlineCallbacks @@ -172,14 +153,15 @@ class Volume(datastore.BasicModel): return self.volume_id def default_state(self): - return {"volume_id": self.volume_id} + return {"volume_id": self.volume_id, + "node_name": "unassigned"} @classmethod @defer.inlineCallbacks def create(cls, size, user_id, project_id): volume_id = utils.generate_uid('vol') vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name + vol['node_name'] = FLAGS.node_name vol['size'] = size vol['user_id'] = user_id vol['project_id'] = project_id @@ -225,14 +207,31 @@ class Volume(datastore.BasicModel): self['attach_status'] = "detached" self.save() + def save(self): + is_new = self.is_new_record() + super(Volume, self).save() + if is_new: + redis = datastore.Redis.instance() + key = self.__devices_key + # TODO(vish): these should be added by admin commands + more = redis.scard(self._redis_association_name("node", + self['node_name'])) + if (not redis.exists(key) and not more): + for shelf_id in range(FLAGS.first_shelf_id, + FLAGS.last_shelf_id + 1): + for blade_id in range(FLAGS.blades_per_shelf): + redis.sadd(key, "%s.%s" % (shelf_id, blade_id)) + self.associate_with("node", self['node_name']) + @defer.inlineCallbacks def destroy(self): - try: - yield self._remove_export() - except Exception as ex: - logging.debug("Ingnoring failure to remove export %s" % ex) - pass + yield self._remove_export() yield self._delete_lv() + self.unassociate_with("node", self['node_name']) + if self.get('shelf_id', None) and self.get('blade_id', None): + redis = datastore.Redis.instance() + key = self.__devices_key + redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id'])) super(Volume, self).destroy() @defer.inlineCallbacks @@ -244,66 +243,72 @@ class Volume(datastore.BasicModel): yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], - FLAGS.volume_group)) + FLAGS.volume_group), + error_ok=1) @defer.inlineCallbacks def _delete_lv(self): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) + + @property + def __devices_key(self): + return 'volume_devices:%s' % FLAGS.node_name @defer.inlineCallbacks def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() + redis = datastore.Redis.instance() + key = self.__devices_key + device = redis.spop(key) + if not device: + raise NoMoreBlades() + (shelf_id, blade_id) = device.split('.') self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) self['shelf_id'] = shelf_id self['blade_id'] = blade_id self.save() - yield self._exec_export() + yield self._exec_setup_export() @defer.inlineCallbacks - def _exec_export(self): + def _exec_setup_export(self): yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (self['shelf_id'], self['blade_id'], FLAGS.aoe_eth_dev, FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _remove_export(self): + if not self.get('shelf_id', None) or not self.get('blade_id', None): + defer.returnValue(False) + yield self._exec_remove_export() + defer.returnValue(True) + + @defer.inlineCallbacks + def _exec_remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) + class FakeVolume(Volume): def _create_lv(self): pass - def _exec_export(self): + def _exec_setup_export(self): fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) f = file(fname, "w") f.close() - def _remove_export(self): - pass + def _exec_remove_export(self): + os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])) def _delete_lv(self): pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() diff --git a/nova/wsgi.py b/nova/wsgi.py new file mode 100644 index 000000000..4fd6e59e3 --- /dev/null +++ b/nova/wsgi.py @@ -0,0 +1,173 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility methods for working with WSGI servers +""" + +import logging +import sys + +import eventlet +import eventlet.wsgi +eventlet.patcher.monkey_patch(all=False, socket=True) +import routes +import routes.middleware + + +logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) + + +def run_server(application, port): + """Run a WSGI server with the given application.""" + sock = eventlet.listen(('0.0.0.0', port)) + eventlet.wsgi.server(sock, application) + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(detail='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + """ + raise NotImplementedError("You must implement __call__") + + +class Middleware(Application): # pylint: disable-msg=W0223 + """Base WSGI middleware wrapper. These classes require an + application to be initialized that will be called next.""" + + def __init__(self, application): # pylint: disable-msg=W0231 + self.application = application + + +class Debug(Middleware): + """Helper class that can be insertd into any WSGI application chain + to get information about the request and response.""" + + def __call__(self, environ, start_response): + for key, value in environ.items(): + print key, "=", value + print + wrapper = debug_start_response(start_response) + return debug_print_body(self.application(environ, wrapper)) + + +def debug_start_response(start_response): + """Wrap the start_response to capture when called.""" + + def wrapper(status, headers, exc_info=None): + """Print out all headers when start_response is called.""" + print status + for (key, value) in headers: + print key, "=", value + print + start_response(status, headers, exc_info) + + return wrapper + + +def debug_print_body(body): + """Print the body of the response as it is sent back.""" + + class Wrapper(object): + """Iterate through all the body parts and print before returning.""" + + def __iter__(self): + for part in body: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print + + return Wrapper() + + +class ParsedRoutes(Middleware): + """Processed parsed routes from routes.middleware.RoutesMiddleware + and call either the controller if found or the default application + otherwise.""" + + def __call__(self, environ, start_response): + if environ['routes.route'] is None: + return self.application(environ, start_response) + app = environ['wsgiorg.routing_args'][1]['controller'] + return app(environ, start_response) + + +class Router(Middleware): # pylint: disable-msg=R0921 + """Wrapper to help setup routes.middleware.RoutesMiddleware.""" + + def __init__(self, application): + self.map = routes.Mapper() + self._build_map() + application = ParsedRoutes(application) + application = routes.middleware.RoutesMiddleware(application, self.map) + super(Router, self).__init__(application) + + def __call__(self, environ, start_response): + return self.application(environ, start_response) + + def _build_map(self): + """Method to create new connections for the routing map.""" + raise NotImplementedError("You must implement _build_map") + + def _connect(self, *args, **kwargs): + """Wrapper for the map.connect method.""" + self.map.connect(*args, **kwargs) + + +def route_args(application): + """Decorator to make grabbing routing args more convenient.""" + + def wrapper(self, req): + """Call application with req and parsed routing args from.""" + return application(self, req, req.environ['wsgiorg.routing_args'][1]) + + return wrapper diff --git a/plugins/xenapi/README b/plugins/xenapi/README new file mode 100644 index 000000000..fbd471035 --- /dev/null +++ b/plugins/xenapi/README @@ -0,0 +1,6 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. + +Also, you need to + +chmod u+x /etc/xapi.d/plugins/objectstore diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 000000000..271e7337f --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 000000000..2d323a016 --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() @@ -1,6 +1,19 @@ +[Messages Control] +disable-msg=C0103 + [Basic] +# Variables can be 1 to 31 characters long, with +# lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Method names should be at least 3 characters long +# and be lowecased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ +[MESSAGES CONTROL] +# TODOs in code comments are fine... +disable-msg=W0511 + [Design] max-public-methods=100 min-public-methods=0 diff --git a/run_tests.py b/run_tests.py index 5a8966f02..d90ac8175 100644 --- a/run_tests.py +++ b/run_tests.py @@ -54,10 +54,12 @@ from nova.tests.auth_unittest import * from nova.tests.api_unittest import * from nova.tests.cloud_unittest import * from nova.tests.compute_unittest import * +from nova.tests.flags_unittest import * from nova.tests.model_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * +from nova.tests.rpc_unittest import * from nova.tests.validator_unittest import * from nova.tests.volume_unittest import * @@ -68,7 +70,8 @@ flags.DEFINE_bool('flush_db', True, 'Flush the database before running fake tests') flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs. Default = "run_tests.err.log"') + 'Path to where to pipe STDERR during test runs. ' + 'Default = "run_tests.err.log"') if __name__ == '__main__': OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) diff --git a/run_tests.sh b/run_tests.sh index 1bf3d1a79..85d7c8834 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -4,10 +4,9 @@ venv=.nova-venv with_venv=tools/with_venv.sh if [ -e ${venv} ]; then - ${with_venv} python run_tests.py + ${with_venv} python run_tests.py $@ else - echo "You need to install the Nova virtualenv before you can run this." - echo "" - echo "Please run tools/install_venv.py" - exit 1 + echo "No virtual environment found...creating one" + python tools/install_venv.py + ${with_venv} python run_tests.py $@ fi diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9..e1a270638 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -1,3 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + """ Installation script for Nova's development virtualenv """ @@ -7,20 +27,20 @@ import subprocess import sys -ROOT = os.path.dirname(os.path.dirname(__file__)) +ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' - def die(message, *args): print >>sys.stderr, message % args sys.exit(1) def run_command(cmd, redirect_output=True, error_ok=False): - # Useful for debugging: - #print >>sys.stderr, ' '.join(cmd) + """Runs a command in an out-of-process shell, returning the + output of that command + """ if redirect_output: stdout = subprocess.PIPE else: @@ -33,32 +53,44 @@ def run_command(cmd, redirect_output=True, error_ok=False): return output +HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) +HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) + + def check_dependencies(): - """Make sure pip and virtualenv are on the path.""" - print 'Checking for pip...', - if not run_command(['which', 'pip']).strip(): - die('ERROR: pip not found.\n\nNova development requires pip,' - ' please install it using your favorite package management tool') - print 'done.' + """Make sure virtualenv is in the path.""" print 'Checking for virtualenv...', - if not run_command(['which', 'virtualenv']).strip(): - die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') + if not HAS_VIRTUALENV: + print 'not found.' + # Try installing it via easy_install... + if HAS_EASY_INSTALL: + print 'Installing virtualenv via easy_install...', + if not run_command(['which', 'easy_install']): + die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' + ' please install it using your favorite package management tool') + print 'done.' print 'done.' def create_virtualenv(venv=VENV): + """Creates the virtual environment and installs PIP only into the + virtual environment + """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' + print 'Installing pip in virtualenv...', + if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): + die("Failed to install pip.") + print 'done.' def install_dependencies(venv=VENV): print 'Installing dependencies with pip (this can take a while)...' - run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], redirect_output=False) - run_command(['pip', 'install', '-E', venv, TWISTED_NOVA], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA], redirect_output=False) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..c173d6221 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,9 +1,12 @@ +pep8==0.5.0 +pylint==0.21.1 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 |
