summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/auth/ldapdriver.py481
-rw-r--r--nova/auth/manager.py811
-rw-r--r--nova/auth/rbac.py2
-rw-r--r--nova/auth/signer.py8
-rw-r--r--nova/auth/users.py974
-rw-r--r--nova/cloudpipe/api.py2
-rw-r--r--nova/cloudpipe/pipelib.py4
-rw-r--r--nova/compute/disk.py7
-rw-r--r--nova/compute/linux_net.py2
-rw-r--r--nova/compute/model.py74
-rw-r--r--nova/compute/network.py17
-rw-r--r--nova/compute/service.py (renamed from nova/compute/node.py)39
-rw-r--r--nova/datastore.py29
-rw-r--r--nova/endpoint/admin.py14
-rwxr-xr-xnova/endpoint/api.py2
-rw-r--r--nova/endpoint/cloud.py27
-rw-r--r--nova/endpoint/rackspace.py7
-rw-r--r--nova/exception.py6
-rw-r--r--nova/flags.py7
-rw-r--r--nova/network/__init__.py32
-rw-r--r--nova/network/service.py35
-rw-r--r--nova/objectstore/bucket.py4
-rw-r--r--nova/objectstore/handler.py52
-rw-r--r--nova/service.py103
-rw-r--r--nova/test.py8
-rw-r--r--nova/tests/access_unittest.py6
-rw-r--r--nova/tests/api_unittest.py31
-rw-r--r--nova/tests/auth_unittest.py (renamed from nova/tests/users_unittest.py)101
-rw-r--r--nova/tests/cloud_unittest.py33
-rw-r--r--nova/tests/compute_unittest.py (renamed from nova/tests/node_unittest.py)43
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/future_unittest.py75
-rw-r--r--nova/tests/model_unittest.py99
-rw-r--r--nova/tests/network_unittest.py95
-rw-r--r--nova/tests/objectstore_unittest.py184
-rw-r--r--nova/tests/real_flags.py1
-rw-r--r--nova/tests/volume_unittest.py115
-rw-r--r--nova/twistd.py33
-rw-r--r--nova/utils.py9
-rw-r--r--nova/virt/images.py28
-rw-r--r--nova/virt/libvirt_conn.py8
-rw-r--r--nova/volume/service.py (renamed from nova/volume/storage.py)63
42 files changed, 2266 insertions, 1407 deletions
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
new file mode 100644
index 000000000..1591c88e9
--- /dev/null
+++ b/nova/auth/ldapdriver.py
@@ -0,0 +1,481 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Auth driver for ldap. Includes FakeLdapDriver.
+
+It should be easy to create a replacement for this driver supporting
+other backends by creating another class that exposes the same
+public methods.
+"""
+
+import logging
+import sys
+
+from nova import exception
+from nova import flags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('ldap_url', 'ldap://localhost',
+ 'Point this at your ldap server')
+flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
+flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
+ 'DN of admin user')
+flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
+flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
+ 'OU for Users')
+flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Projects')
+flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Roles')
+
+# NOTE(vish): mapping with these flags is necessary because we're going
+# to tie in to an existing ldap schema
+flags.DEFINE_string('ldap_cloudadmin',
+ 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
+flags.DEFINE_string('ldap_itsec',
+ 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
+flags.DEFINE_string('ldap_sysadmin',
+ 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
+flags.DEFINE_string('ldap_netadmin',
+ 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
+flags.DEFINE_string('ldap_developer',
+ 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
+
+
+# TODO(vish): make an abstract base class with the same public methods
+# to define a set interface for AuthDrivers. I'm delaying
+# creating this now because I'm expecting an auth refactor
+# in which we may want to change the interface a bit more.
+class LdapDriver(object):
+ """Ldap Auth driver
+
+ Defines enter and exit and therefore supports the with/as syntax.
+ """
+ def __init__(self):
+ """Imports the LDAP module"""
+ self.ldap = __import__('ldap')
+
+ def __enter__(self):
+ """Creates the connection to LDAP"""
+ self.conn = self.ldap.initialize(FLAGS.ldap_url)
+ self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Destroys the connection to LDAP"""
+ self.conn.unbind_s()
+ return False
+
+ def get_user(self, uid):
+ """Retrieve user by id"""
+ attr = self.__find_object(self.__uid_to_dn(uid),
+ '(objectclass=novaUser)')
+ return self.__to_user(attr)
+
+ def get_user_from_access_key(self, access):
+ """Retrieve user by access key"""
+ query = '(accessKey=%s)' % access
+ dn = FLAGS.ldap_user_subtree
+ return self.__to_user(self.__find_object(dn, query))
+
+ def get_key_pair(self, uid, key_name):
+ """Retrieve key pair by uid and key name"""
+ dn = 'cn=%s,%s' % (key_name,
+ self.__uid_to_dn(uid))
+ attr = self.__find_object(dn, '(objectclass=novaKeyPair)')
+ return self.__to_key_pair(uid, attr)
+
+ def get_project(self, pid):
+ """Retrieve project by id"""
+ dn = 'cn=%s,%s' % (pid,
+ FLAGS.ldap_project_subtree)
+ attr = self.__find_object(dn, '(objectclass=novaProject)')
+ return self.__to_project(attr)
+
+ def get_users(self):
+ """Retrieve list of users"""
+ attrs = self.__find_objects(FLAGS.ldap_user_subtree,
+ '(objectclass=novaUser)')
+ return [self.__to_user(attr) for attr in attrs]
+
+ def get_key_pairs(self, uid):
+ """Retrieve list of key pairs"""
+ attrs = self.__find_objects(self.__uid_to_dn(uid),
+ '(objectclass=novaKeyPair)')
+ return [self.__to_key_pair(uid, attr) for attr in attrs]
+
+ def get_projects(self):
+ """Retrieve list of projects"""
+ attrs = self.__find_objects(FLAGS.ldap_project_subtree,
+ '(objectclass=novaProject)')
+ return [self.__to_project(attr) for attr in attrs]
+
+ def create_user(self, name, access_key, secret_key, is_admin):
+ """Create a user"""
+ if self.__user_exists(name):
+ raise exception.Duplicate("LDAP user %s already exists" % name)
+ attr = [
+ ('objectclass', ['person',
+ 'organizationalPerson',
+ 'inetOrgPerson',
+ 'novaUser']),
+ ('ou', [FLAGS.ldap_user_unit]),
+ ('uid', [name]),
+ ('sn', [name]),
+ ('cn', [name]),
+ ('secretKey', [secret_key]),
+ ('accessKey', [access_key]),
+ ('isAdmin', [str(is_admin).upper()]),
+ ]
+ self.conn.add_s(self.__uid_to_dn(name), attr)
+ return self.__to_user(dict(attr))
+
+ def create_key_pair(self, uid, key_name, public_key, fingerprint):
+ """Create a key pair"""
+ # TODO(vish): possibly refactor this to store keys in their own ou
+ # and put dn reference in the user object
+ attr = [
+ ('objectclass', ['novaKeyPair']),
+ ('cn', [key_name]),
+ ('sshPublicKey', [public_key]),
+ ('keyFingerprint', [fingerprint]),
+ ]
+ self.conn.add_s('cn=%s,%s' % (key_name,
+ self.__uid_to_dn(uid)),
+ attr)
+ return self.__to_key_pair(uid, dict(attr))
+
+ def create_project(self, name, manager_uid,
+ description=None, member_uids=None):
+ """Create a project"""
+ if self.__project_exists(name):
+ raise exception.Duplicate("Project can't be created because "
+ "project %s already exists" % name)
+ if not self.__user_exists(manager_uid):
+ raise exception.NotFound("Project can't be created because "
+ "manager %s doesn't exist" % manager_uid)
+ manager_dn = self.__uid_to_dn(manager_uid)
+ # description is a required attribute
+ if description is None:
+ description = name
+ members = []
+ if member_uids != None:
+ for member_uid in member_uids:
+ if not self.__user_exists(member_uid):
+ raise exception.NotFound("Project can't be created "
+ "because user %s doesn't exist" % member_uid)
+ members.append(self.__uid_to_dn(member_uid))
+ # always add the manager as a member because members is required
+ if not manager_dn in members:
+ members.append(manager_dn)
+ attr = [
+ ('objectclass', ['novaProject']),
+ ('cn', [name]),
+ ('description', [description]),
+ ('projectManager', [manager_dn]),
+ ('member', members)
+ ]
+ self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
+ return self.__to_project(dict(attr))
+
+ def add_to_project(self, uid, project_id):
+ """Add user to project"""
+ dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ return self.__add_to_group(uid, dn)
+
+ def remove_from_project(self, uid, project_id):
+ """Remove user from project"""
+ dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ return self.__remove_from_group(uid, dn)
+
+ def is_in_project(self, uid, project_id):
+ """Check if user is in project"""
+ dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ return self.__is_in_group(uid, dn)
+
+ def has_role(self, uid, role, project_id=None):
+ """Check if user has role
+
+ If project is specified, it checks for local role, otherwise it
+ checks for global role
+ """
+ role_dn = self.__role_to_dn(role, project_id)
+ return self.__is_in_group(uid, role_dn)
+
+ def add_role(self, uid, role, project_id=None):
+ """Add role for user (or user and project)"""
+ role_dn = self.__role_to_dn(role, project_id)
+ if not self.__group_exists(role_dn):
+ # create the role if it doesn't exist
+ description = '%s role for %s' % (role, project_id)
+ self.__create_group(role_dn, role, uid, description)
+ else:
+ return self.__add_to_group(uid, role_dn)
+
+ def remove_role(self, uid, role, project_id=None):
+ """Remove role for user (or user and project)"""
+ role_dn = self.__role_to_dn(role, project_id)
+ return self.__remove_from_group(uid, role_dn)
+
+ def delete_user(self, uid):
+ """Delete a user"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s doesn't exist" % uid)
+ self.__delete_key_pairs(uid)
+ self.__remove_from_all(uid)
+ self.conn.delete_s('uid=%s,%s' % (uid,
+ FLAGS.ldap_user_subtree))
+
+ def delete_key_pair(self, uid, key_name):
+ """Delete a key pair"""
+ if not self.__key_pair_exists(uid, key_name):
+ raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
+ (key_name, uid))
+ self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
+ FLAGS.ldap_user_subtree))
+
+ def delete_project(self, name):
+ """Delete a project"""
+ project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree)
+ self.__delete_roles(project_dn)
+ self.__delete_group(project_dn)
+
+ def __user_exists(self, name):
+ """Check if user exists"""
+ return self.get_user(name) != None
+
+ def __key_pair_exists(self, uid, key_name):
+ """Check if key pair exists"""
+ return self.get_user(uid) != None
+ return self.get_key_pair(uid, key_name) != None
+
+ def __project_exists(self, name):
+ """Check if project exists"""
+ return self.get_project(name) != None
+
+ def __find_object(self, dn, query = None):
+ """Find an object by dn and query"""
+ objects = self.__find_objects(dn, query)
+ if len(objects) == 0:
+ return None
+ return objects[0]
+
+ def __find_dns(self, dn, query=None):
+ """Find dns by query"""
+ try:
+ res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ except self.ldap.NO_SUCH_OBJECT:
+ return []
+ # just return the DNs
+ return [dn for dn, attributes in res]
+
+ def __find_objects(self, dn, query = None):
+ """Find objects by query"""
+ try:
+ res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ except self.ldap.NO_SUCH_OBJECT:
+ return []
+ # just return the attributes
+ return [attributes for dn, attributes in res]
+
+ def __find_role_dns(self, tree):
+ """Find dns of role objects in given tree"""
+ return self.__find_dns(tree,
+ '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
+
+ def __find_group_dns_with_member(self, tree, uid):
+ """Find dns of group objects in a given tree that contain member"""
+ dns = self.__find_dns(tree,
+ '(&(objectclass=groupOfNames)(member=%s))' %
+ self.__uid_to_dn(uid))
+ return dns
+
+ def __group_exists(self, dn):
+ """Check if group exists"""
+ return self.__find_object(dn, '(objectclass=groupOfNames)') != None
+
+ def __delete_key_pairs(self, uid):
+ """Delete all key pairs for user"""
+ keys = self.get_key_pairs(uid)
+ if keys != None:
+ for key in keys:
+ self.delete_key_pair(uid, key['name'])
+
+ def __role_to_dn(self, role, project_id=None):
+ """Convert role to corresponding dn"""
+ if project_id == None:
+ return FLAGS.__getitem__("ldap_%s" % role).value
+ else:
+ return 'cn=%s,cn=%s,%s' % (role,
+ project_id,
+ FLAGS.ldap_project_subtree)
+
+ def __create_group(self, group_dn, name, uid,
+ description, member_uids = None):
+ """Create a group"""
+ if self.__group_exists(group_dn):
+ raise exception.Duplicate("Group can't be created because "
+ "group %s already exists" % name)
+ members = []
+ if member_uids != None:
+ for member_uid in member_uids:
+ if not self.__user_exists(member_uid):
+ raise exception.NotFound("Group can't be created "
+ "because user %s doesn't exist" % member_uid)
+ members.append(self.__uid_to_dn(member_uid))
+ dn = self.__uid_to_dn(uid)
+ if not dn in members:
+ members.append(dn)
+ attr = [
+ ('objectclass', ['groupOfNames']),
+ ('cn', [name]),
+ ('description', [description]),
+ ('member', members)
+ ]
+ self.conn.add_s(group_dn, attr)
+
+ def __is_in_group(self, uid, group_dn):
+ """Check if user is in group"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be searched in group "
+ "becuase the user doesn't exist" % (uid,))
+ if not self.__group_exists(group_dn):
+ return False
+ res = self.__find_object(group_dn,
+ '(member=%s)' % self.__uid_to_dn(uid))
+ return res != None
+
+ def __add_to_group(self, uid, group_dn):
+ """Add user to group"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be added to the group "
+ "becuase the user doesn't exist" % (uid,))
+ if not self.__group_exists(group_dn):
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
+ if self.__is_in_group(uid, group_dn):
+ raise exception.Duplicate("User %s is already a member of "
+ "the group %s" % (uid, group_dn))
+ attr = [
+ (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
+ ]
+ self.conn.modify_s(group_dn, attr)
+
+ def __remove_from_group(self, uid, group_dn):
+ """Remove user from group"""
+ if not self.__group_exists(group_dn):
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be removed from the "
+ "group because the user doesn't exist" % (uid,))
+ if not self.__is_in_group(uid, group_dn):
+ raise exception.NotFound("User %s is not a member of the group" %
+ (uid,))
+ self.__safe_remove_from_group(uid, group_dn)
+
+ def __safe_remove_from_group(self, uid, group_dn):
+ """Remove user from group, deleting group if user is last member"""
+ # FIXME(vish): what if deleted user is a project manager?
+ attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
+ try:
+ self.conn.modify_s(group_dn, attr)
+ except self.ldap.OBJECT_CLASS_VIOLATION:
+ logging.debug("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead." % group_dn )
+ self.__delete_group(group_dn)
+
+ def __remove_from_all(self, uid):
+ """Remove user from all roles and projects"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be removed from all "
+ "because the user doesn't exist" % (uid,))
+ dn = self.__uid_to_dn(uid)
+ role_dns = self.__find_group_dns_with_member(
+ FLAGS.role_project_subtree, uid)
+ for role_dn in role_dns:
+ self.__safe_remove_from_group(uid, role_dn)
+ project_dns = self.__find_group_dns_with_member(
+ FLAGS.ldap_project_subtree, uid)
+ for project_dn in project_dns:
+ self.__safe_remove_from_group(uid, role_dn)
+
+ def __delete_group(self, group_dn):
+ """Delete Group"""
+ if not self.__group_exists(group_dn):
+ raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
+ self.conn.delete_s(group_dn)
+
+ def __delete_roles(self, project_dn):
+ """Delete all roles for project"""
+ for role_dn in self.__find_role_dns(project_dn):
+ self.__delete_group(role_dn)
+
+ def __to_user(self, attr):
+ """Convert ldap attributes to User object"""
+ if attr == None:
+ return None
+ return {
+ 'id': attr['uid'][0],
+ 'name': attr['cn'][0],
+ 'access': attr['accessKey'][0],
+ 'secret': attr['secretKey'][0],
+ 'admin': (attr['isAdmin'][0] == 'TRUE')
+ }
+
+ def __to_key_pair(self, owner, attr):
+ """Convert ldap attributes to KeyPair object"""
+ if attr == None:
+ return None
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'owner_id': owner,
+ 'public_key': attr['sshPublicKey'][0],
+ 'fingerprint': attr['keyFingerprint'][0],
+ }
+
+ def __to_project(self, attr):
+ """Convert ldap attributes to Project object"""
+ if attr == None:
+ return None
+ member_dns = attr.get('member', [])
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
+ 'description': attr.get('description', [None])[0],
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]
+ }
+
+ def __dn_to_uid(self, dn):
+ """Convert user dn to uid"""
+ return dn.split(',')[0].split('=')[1]
+
+ def __uid_to_dn(self, dn):
+ """Convert uid to dn"""
+ return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
+
+
+class FakeLdapDriver(LdapDriver):
+ """Fake Ldap Auth driver"""
+ def __init__(self):
+ __import__('nova.auth.fakeldap')
+ self.ldap = sys.modules['nova.auth.fakeldap']
+
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
new file mode 100644
index 000000000..66027f6c2
--- /dev/null
+++ b/nova/auth/manager.py
@@ -0,0 +1,811 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Nova authentication management
+"""
+
+import logging
+import os
+import shutil
+import string
+import sys
+import tempfile
+import uuid
+import zipfile
+
+from nova import crypto
+from nova import datastore
+from nova import exception
+from nova import flags
+from nova import objectstore # for flags
+from nova import utils
+from nova.auth import ldapdriver # for flags
+from nova.auth import signer
+
+FLAGS = flags.FLAGS
+
+# NOTE(vish): a user with one of these roles will be a superuser and
+# have access to all api commands
+flags.DEFINE_list('superuser_roles', ['cloudadmin'],
+ 'Roles that ignore rbac checking completely')
+
+# NOTE(vish): a user with one of these roles will have it for every
+# project, even if he or she is not a member of the project
+flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
+ 'Roles that apply to all projects')
+
+
+flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
+flags.DEFINE_string('credentials_template',
+ utils.abspath('auth/novarc.template'),
+ 'Template for creating users rc file')
+flags.DEFINE_string('vpn_client_template',
+ utils.abspath('cloudpipe/client.ovpn.template'),
+ 'Template for creating users vpn file')
+flags.DEFINE_string('credential_key_file', 'pk.pem',
+ 'Filename of private key in credentials zip')
+flags.DEFINE_string('credential_cert_file', 'cert.pem',
+ 'Filename of certificate in credentials zip')
+flags.DEFINE_string('credential_rc_file', 'novarc',
+ 'Filename of rc in credentials zip')
+
+flags.DEFINE_integer('vpn_start_port', 1000,
+ 'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+ 'End port for the cloudpipe VPN servers')
+
+flags.DEFINE_string('credential_cert_subject',
+ '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
+ 'OU=NovaDev/CN=%s-%s',
+ 'Subject for certificate for users')
+
+flags.DEFINE_string('vpn_ip', '127.0.0.1',
+ 'Public IP for the cloudpipe VPN servers')
+
+flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
+ 'Driver that auth manager uses')
+
+class AuthBase(object):
+ """Base class for objects relating to auth
+
+ Objects derived from this class should be stupid data objects with
+ an id member. They may optionally contain methods that delegate to
+ AuthManager, but should not implement logic themselves.
+ """
+ @classmethod
+ def safe_id(cls, obj):
+ """Safe get object id
+
+ This method will return the id of the object if the object
+ is of this class, otherwise it will return the original object.
+ This allows methods to accept objects or ids as paramaters.
+
+ """
+ if isinstance(obj, cls):
+ return obj.id
+ else:
+ return obj
+
+
+class User(AuthBase):
+ """Object representing a user"""
+ def __init__(self, id, name, access, secret, admin):
+ self.id = id
+ self.name = name
+ self.access = access
+ self.secret = secret
+ self.admin = admin
+
+ def is_superuser(self):
+ return AuthManager().is_superuser(self)
+
+ def is_admin(self):
+ return AuthManager().is_admin(self)
+
+ def has_role(self, role):
+ return AuthManager().has_role(self, role)
+
+ def add_role(self, role):
+ return AuthManager().add_role(self, role)
+
+ def remove_role(self, role):
+ return AuthManager().remove_role(self, role)
+
+ def is_project_member(self, project):
+ return AuthManager().is_project_member(self, project)
+
+ def is_project_manager(self, project):
+ return AuthManager().is_project_manager(self, project)
+
+ def generate_key_pair(self, name):
+ return AuthManager().generate_key_pair(self.id, name)
+
+ def create_key_pair(self, name, public_key, fingerprint):
+ return AuthManager().create_key_pair(self.id,
+ name,
+ public_key,
+ fingerprint)
+
+ def get_key_pair(self, name):
+ return AuthManager().get_key_pair(self.id, name)
+
+ def delete_key_pair(self, name):
+ return AuthManager().delete_key_pair(self.id, name)
+
+ def get_key_pairs(self):
+ return AuthManager().get_key_pairs(self.id)
+
+ def __repr__(self):
+ return "User('%s', '%s', '%s', '%s', %s)" % (self.id,
+ self.name,
+ self.access,
+ self.secret,
+ self.admin)
+
+
+class KeyPair(AuthBase):
+ """Represents an ssh key returned from the datastore
+
+ Even though this object is named KeyPair, only the public key and
+ fingerprint is stored. The user's private key is not saved.
+ """
+ def __init__(self, id, name, owner_id, public_key, fingerprint):
+ self.id = id
+ self.name = name
+ self.owner_id = owner_id
+ self.public_key = public_key
+ self.fingerprint = fingerprint
+
+ def __repr__(self):
+ return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id,
+ self.name,
+ self.owner_id,
+ self.public_key,
+ self.fingerprint)
+
+
+class Project(AuthBase):
+ """Represents a Project returned from the datastore"""
+ def __init__(self, id, name, project_manager_id, description, member_ids):
+ self.id = id
+ self.name = name
+ self.project_manager_id = project_manager_id
+ self.description = description
+ self.member_ids = member_ids
+
+ @property
+ def project_manager(self):
+ return AuthManager().get_user(self.project_manager_id)
+
+ @property
+ def vpn_ip(self):
+ ip, port = AuthManager().get_project_vpn_data(self)
+ return ip
+
+ @property
+ def vpn_port(self):
+ ip, port = AuthManager().get_project_vpn_data(self)
+ return port
+
+ def has_manager(self, user):
+ return AuthManager().is_project_manager(user, self)
+
+ def has_member(self, user):
+ return AuthManager().is_project_member(user, self)
+
+ def add_role(self, user, role):
+ return AuthManager().add_role(user, role, self)
+
+ def remove_role(self, user, role):
+ return AuthManager().remove_role(user, role, self)
+
+ def has_role(self, user, role):
+ return AuthManager().has_role(user, role, self)
+
+ def get_credentials(self, user):
+ return AuthManager().get_credentials(user, self)
+
+ def __repr__(self):
+ return "Project('%s', '%s', '%s', '%s', %s)" % (self.id,
+ self.name,
+ self.project_manager_id,
+ self.description,
+ self.member_ids)
+
+
+class NoMorePorts(exception.Error):
+ pass
+
+
+class Vpn(datastore.BasicModel):
+ """Manages vpn ips and ports for projects"""
+ def __init__(self, project_id):
+ self.project_id = project_id
+ super(Vpn, self).__init__()
+
+ @property
+ def identifier(self):
+ """Identifier used for key in redis"""
+ return self.project_id
+
+ @classmethod
+ def create(cls, project_id):
+ """Creates a vpn for project
+
+ This method finds a free ip and port and stores the associated
+ values in the datastore.
+ """
+ # TODO(vish): get list of vpn ips from redis
+ port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+ vpn = cls(project_id)
+ # save ip for project
+ vpn['project'] = project_id
+ vpn['ip'] = FLAGS.vpn_ip
+ vpn['port'] = port
+ vpn.save()
+ return vpn
+
+ @classmethod
+ def find_free_port_for_ip(cls, ip):
+ """Finds a free port for a given ip from the redis set"""
+ # TODO(vish): these redis commands should be generalized and
+ # placed into a base class. Conceptually, it is
+ # similar to an association, but we are just
+ # storing a set of values instead of keys that
+ # should be turned into objects.
+ redis = datastore.Redis.instance()
+ key = 'ip:%s:ports' % ip
+ # TODO(vish): these ports should be allocated through an admin
+ # command instead of a flag
+ if (not redis.exists(key) and
+ not redis.exists(cls._redis_association_name('ip', ip))):
+ for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+ redis.sadd(key, i)
+
+ port = redis.spop(key)
+ if not port:
+ raise NoMorePorts()
+ return port
+
+ @classmethod
+ def num_ports_for_ip(cls, ip):
+ """Calculates the number of free ports for a given ip"""
+ return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+ @property
+ def ip(self):
+ """The ip assigned to the project"""
+ return self['ip']
+
+ @property
+ def port(self):
+ """The port assigned to the project"""
+ return int(self['port'])
+
+ def save(self):
+ """Saves the association to the given ip"""
+ self.associate_with('ip', self.ip)
+ super(Vpn, self).save()
+
+ def destroy(self):
+ """Cleans up datastore and adds port back to pool"""
+ self.unassociate_with('ip', self.ip)
+ datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+ super(Vpn, self).destroy()
+
+
+class AuthManager(object):
+ """Manager Singleton for dealing with Users, Projects, and Keypairs
+
+ Methods accept objects or ids.
+
+ AuthManager uses a driver object to make requests to the data backend.
+ See ldapdriver for reference.
+
+ AuthManager also manages associated data related to Auth objects that
+ need to be more accessible, such as vpn ips and ports.
+ """
+ _instance=None
+ def __new__(cls, *args, **kwargs):
+ """Returns the AuthManager singleton with driver set
+
+ __init__ is run every time AuthManager() is called, so we need to do
+ any constructor related stuff here. The driver that is specified
+ in the flagfile is loaded here.
+ """
+ if not cls._instance:
+ cls._instance = super(AuthManager, cls).__new__(
+ cls, *args, **kwargs)
+ mod_str, sep, driver_str = FLAGS.auth_driver.rpartition('.')
+ try:
+ __import__(mod_str)
+ cls._instance.driver = getattr(sys.modules[mod_str],
+ driver_str)
+ except (ImportError, AttributeError):
+ raise exception.Error('Auth driver %s cannot be found'
+ % FLAGS.auth_driver)
+ return cls._instance
+
+ def authenticate(self, access, signature, params, verb='GET',
+ server_string='127.0.0.1:8773', path='/',
+ check_type='ec2', headers=None):
+ """Authenticates AWS request using access key and signature
+
+ If the project is not specified, attempts to authenticate to
+ a project with the same name as the user. This way, older tools
+ that have no project knowledge will still work.
+
+ @type access: str
+ @param access: Access key for user in the form "access:project".
+
+ @type signature: str
+ @param signature: Signature of the request.
+
+ @type params: list of str
+ @param params: Web paramaters used for the signature.
+
+ @type verb: str
+ @param verb: Web request verb ('GET' or 'POST').
+
+ @type server_string: str
+ @param server_string: Web request server string.
+
+ @type path: str
+ @param path: Web request path.
+
+ @type check_type: str
+ @param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
+ S3. Any other value will cause signature not to be
+ checked.
+
+ @type headers: list
+ @param headers: HTTP headers passed with the request (only needed for
+ s3 signature checks)
+
+ @rtype: tuple (User, Project)
+ @return: User and project that the request represents.
+ """
+ # TODO(vish): check for valid timestamp
+ (access_key, sep, project_id) = access.partition(':')
+
+ logging.info('Looking up user: %r', access_key)
+ user = self.get_user_from_access_key(access_key)
+ logging.info('user: %r', user)
+ if user == None:
+ raise exception.NotFound('No user found for access key %s' %
+ access_key)
+
+ # NOTE(vish): if we stop using project name as id we need better
+ # logic to find a default project for user
+ if project_id is '':
+ project_id = user.name
+
+ project = self.get_project(project_id)
+ if project == None:
+ raise exception.NotFound('No project called %s could be found' %
+ project_id)
+ if not self.is_admin(user) and not self.is_project_member(user,
+ project):
+ raise exception.NotFound('User %s is not a member of project %s' %
+ (user.id, project.id))
+ if check_type == 's3':
+ expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path)
+ logging.debug('user.secret: %s', user.secret)
+ logging.debug('expected_signature: %s', expected_signature)
+ logging.debug('signature: %s', signature)
+ if signature != expected_signature:
+ raise exception.NotAuthorized('Signature does not match')
+ elif check_type == 'ec2':
+ # NOTE(vish): hmac can't handle unicode, so encode ensures that
+ # secret isn't unicode
+ expected_signature = signer.Signer(user.secret.encode()).generate(
+ params, verb, server_string, path)
+ logging.debug('user.secret: %s', user.secret)
+ logging.debug('expected_signature: %s', expected_signature)
+ logging.debug('signature: %s', signature)
+ if signature != expected_signature:
+ raise exception.NotAuthorized('Signature does not match')
+ return (user, project)
+
+ def is_superuser(self, user):
+ """Checks for superuser status, allowing user to bypass rbac
+
+ @type user: User or uid
+ @param user: User to check.
+
+ @rtype: bool
+ @return: True for superuser.
+ """
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ # NOTE(vish): admin flag on user represents superuser
+ if user.admin:
+ return True
+ for role in FLAGS.superuser_roles:
+ if self.has_role(user, role):
+ return True
+
+ def is_admin(self, user):
+ """Checks for admin status, allowing user to access all projects
+
+ @type user: User or uid
+ @param user: User to check.
+
+ @rtype: bool
+ @return: True for admin.
+ """
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ if self.is_superuser(user):
+ return True
+ for role in FLAGS.global_roles:
+ if self.has_role(user, role):
+ return True
+
+ def has_role(self, user, role, project=None):
+ """Checks existence of role for user
+
+ If project is not specified, checks for a global role. If project
+ is specified, checks for the union of the global role and the
+ project role.
+
+ Role 'projectmanager' only works for projects and simply checks to
+ see if the user is the project_manager of the specified project. It
+ is the same as calling is_project_manager(user, project).
+
+ @type user: User or uid
+ @param user: User to check.
+
+ @type role: str
+ @param role: Role to check.
+
+ @type project: Project or project_id
+ @param project: Project in which to look for local role.
+
+ @rtype: bool
+ @return: True if the user has the role.
+ """
+ with self.driver() as drv:
+ if role == 'projectmanager':
+ if not project:
+ raise exception.Error("Must specify project")
+ return self.is_project_manager(user, project)
+
+ global_role = drv.has_role(User.safe_id(user),
+ role,
+ None)
+ if not global_role:
+ return global_role
+
+ if not project or role in FLAGS.global_roles:
+ return global_role
+
+ return drv.has_role(User.safe_id(user),
+ role,
+ Project.safe_id(project))
+
+ def add_role(self, user, role, project=None):
+ """Adds role for user
+
+ If project is not specified, adds a global role. If project
+ is specified, adds a local role.
+
+ The 'projectmanager' role is special and can't be added or removed.
+
+ @type user: User or uid
+ @param user: User to which to add role.
+
+ @type role: str
+ @param role: Role to add.
+
+ @type project: Project or project_id
+ @param project: Project in which to add local role.
+ """
+ with self.driver() as drv:
+ drv.add_role(User.safe_id(user), role, Project.safe_id(project))
+
+ def remove_role(self, user, role, project=None):
+ """Removes role for user
+
+ If project is not specified, removes a global role. If project
+ is specified, removes a local role.
+
+ The 'projectmanager' role is special and can't be added or removed.
+
+ @type user: User or uid
+ @param user: User from which to remove role.
+
+ @type role: str
+ @param role: Role to remove.
+
+ @type project: Project or project_id
+ @param project: Project in which to remove local role.
+ """
+ with self.driver() as drv:
+ drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+
+ def get_project(self, pid):
+ """Get project object by id"""
+ with self.driver() as drv:
+ project_dict = drv.get_project(pid)
+ if project_dict:
+ return Project(**project_dict)
+
+ def get_projects(self):
+ """Retrieves list of all projects"""
+ with self.driver() as drv:
+ project_list = drv.get_projects()
+ if not project_list:
+ return []
+ return [Project(**project_dict) for project_dict in project_list]
+
+ def create_project(self, name, manager_user,
+ description=None, member_users=None):
+ """Create a project
+
+ @type name: str
+ @param name: Name of the project to create. The name will also be
+ used as the project id.
+
+ @type manager_user: User or uid
+ @param manager_user: This user will be the project manager.
+
+ @type description: str
+ @param project: Description of the project. If no description is
+ specified, the name of the project will be used.
+
+ @type member_users: list of User or uid
+ @param: Initial project members. The project manager will always be
+ added as a member, even if he isn't specified in this list.
+
+ @rtype: Project
+ @return: The new project.
+ """
+ if member_users:
+ member_users = [User.safe_id(u) for u in member_users]
+ with self.driver() as drv:
+ project_dict = drv.create_project(name,
+ User.safe_id(manager_user),
+ description,
+ member_users)
+ if project_dict:
+ if FLAGS.use_vpn:
+ Vpn.create(project_dict['id'])
+ return Project(**project_dict)
+
+ def add_to_project(self, user, project):
+ """Add user to project"""
+ with self.driver() as drv:
+ return drv.add_to_project(User.safe_id(user),
+ Project.safe_id(project))
+
+ def is_project_manager(self, user, project):
+ """Checks if user is project manager"""
+ if not isinstance(project, Project):
+ project = self.get_project(project)
+ return User.safe_id(user) == project.project_manager_id
+
+ def is_project_member(self, user, project):
+ """Checks to see if user is a member of project"""
+ if not isinstance(project, Project):
+ project = self.get_project(project)
+ return User.safe_id(user) in project.member_ids
+
+ def remove_from_project(self, user, project):
+ """Removes a user from a project"""
+ with self.driver() as drv:
+ return drv.remove_from_project(User.safe_id(user),
+ Project.safe_id(project))
+
+ def get_project_vpn_data(self, project):
+ """Gets vpn ip and port for project
+
+ @type project: Project or project_id
+ @param project: Project from which to get associated vpn data
+
+ @rvalue: tuple of (str, str)
+ @return: A tuple containing (ip, port) or None, None if vpn has
+ not been allocated for user.
+ """
+ vpn = Vpn.lookup(Project.safe_id(project))
+ if not vpn:
+ return None, None
+ return (vpn.ip, vpn.port)
+
+ def delete_project(self, project):
+ """Deletes a project"""
+ with self.driver() as drv:
+ return drv.delete_project(Project.safe_id(project))
+
+ def get_user(self, uid):
+ """Retrieves a user by id"""
+ with self.driver() as drv:
+ user_dict = drv.get_user(uid)
+ if user_dict:
+ return User(**user_dict)
+
+ def get_user_from_access_key(self, access_key):
+ """Retrieves a user by access key"""
+ with self.driver() as drv:
+ user_dict = drv.get_user_from_access_key(access_key)
+ if user_dict:
+ return User(**user_dict)
+
+ def get_users(self):
+ """Retrieves a list of all users"""
+ with self.driver() as drv:
+ user_list = drv.get_users()
+ if not user_list:
+ return []
+ return [User(**user_dict) for user_dict in user_list]
+
+ def create_user(self, name, access=None, secret=None, admin=False):
+ """Creates a user
+
+ @type name: str
+ @param name: Name of the user to create.
+
+ @type access: str
+ @param access: Access Key (defaults to a random uuid)
+
+ @type secret: str
+ @param secret: Secret Key (defaults to a random uuid)
+
+ @type admin: bool
+ @param admin: Whether to set the admin flag. The admin flag gives
+ superuser status regardless of roles specifed for the user.
+
+ @type create_project: bool
+ @param: Whether to create a project for the user with the same name.
+
+ @rtype: User
+ @return: The new user.
+ """
+ if access == None: access = str(uuid.uuid4())
+ if secret == None: secret = str(uuid.uuid4())
+ with self.driver() as drv:
+ user_dict = drv.create_user(name, access, secret, admin)
+ if user_dict:
+ return User(**user_dict)
+
+ def delete_user(self, user):
+ """Deletes a user"""
+ with self.driver() as drv:
+ drv.delete_user(User.safe_id(user))
+
+ def generate_key_pair(self, user, key_name):
+ """Generates a key pair for a user
+
+ Generates a public and private key, stores the public key using the
+ key_name, and returns the private key and fingerprint.
+
+ @type user: User or uid
+ @param user: User for which to create key pair.
+
+ @type key_name: str
+ @param key_name: Name to use for the generated KeyPair.
+
+ @rtype: tuple (private_key, fingerprint)
+ @return: A tuple containing the private_key and fingerprint.
+ """
+ # NOTE(vish): generating key pair is slow so check for legal
+ # creation before creating keypair
+ uid = User.safe_id(user)
+ with self.driver() as drv:
+ if not drv.get_user(uid):
+ raise exception.NotFound("User %s doesn't exist" % user)
+ if drv.get_key_pair(uid, key_name):
+ raise exception.Duplicate("The keypair %s already exists"
+ % key_name)
+ private_key, public_key, fingerprint = crypto.generate_key_pair()
+ self.create_key_pair(uid, key_name, public_key, fingerprint)
+ return private_key, fingerprint
+
+ def create_key_pair(self, user, key_name, public_key, fingerprint):
+ """Creates a key pair for user"""
+ with self.driver() as drv:
+ kp_dict = drv.create_key_pair(User.safe_id(user),
+ key_name,
+ public_key,
+ fingerprint)
+ if kp_dict:
+ return KeyPair(**kp_dict)
+
+ def get_key_pair(self, user, key_name):
+ """Retrieves a key pair for user"""
+ with self.driver() as drv:
+ kp_dict = drv.get_key_pair(User.safe_id(user), key_name)
+ if kp_dict:
+ return KeyPair(**kp_dict)
+
+ def get_key_pairs(self, user):
+ """Retrieves all key pairs for user"""
+ with self.driver() as drv:
+ kp_list = drv.get_key_pairs(User.safe_id(user))
+ if not kp_list:
+ return []
+ return [KeyPair(**kp_dict) for kp_dict in kp_list]
+
+ def delete_key_pair(self, user, key_name):
+ """Deletes a key pair for user"""
+ with self.driver() as drv:
+ drv.delete_key_pair(User.safe_id(user), key_name)
+
+ def get_credentials(self, user, project=None):
+ """Get credential zip for user in project"""
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ if project is None:
+ project = user.id
+ pid = Project.safe_id(project)
+ rc = self.__generate_rc(user.access, user.secret, pid)
+ private_key, signed_cert = self._generate_x509_cert(user.id, pid)
+
+ vpn = Vpn.lookup(pid)
+ if not vpn:
+ raise exception.Error("No vpn data allocated for project %s" %
+ project.name)
+ configfile = open(FLAGS.vpn_client_template,"r")
+ s = string.Template(configfile.read())
+ configfile.close()
+ config = s.substitute(keyfile=FLAGS.credential_key_file,
+ certfile=FLAGS.credential_cert_file,
+ ip=vpn.ip,
+ port=vpn.port)
+
+ tmpdir = tempfile.mkdtemp()
+ zf = os.path.join(tmpdir, "temp.zip")
+ zippy = zipfile.ZipFile(zf, 'w')
+ zippy.writestr(FLAGS.credential_rc_file, rc)
+ zippy.writestr(FLAGS.credential_key_file, private_key)
+ zippy.writestr(FLAGS.credential_cert_file, signed_cert)
+ zippy.writestr("nebula-client.conf", config)
+ zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
+ zippy.close()
+ with open(zf, 'rb') as f:
+ buffer = f.read()
+
+ shutil.rmtree(tmpdir)
+ return buffer
+
+ def __generate_rc(self, access, secret, pid):
+ """Generate rc file for user"""
+ rc = open(FLAGS.credentials_template).read()
+ rc = rc % { 'access': access,
+ 'project': pid,
+ 'secret': secret,
+ 'ec2': FLAGS.ec2_url,
+ 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
+ 'nova': FLAGS.ca_file,
+ 'cert': FLAGS.credential_cert_file,
+ 'key': FLAGS.credential_key_file,
+ }
+ return rc
+
+ def _generate_x509_cert(self, uid, pid):
+ """Generate x509 cert for user"""
+ (private_key, csr) = crypto.generate_x509_cert(
+ self.__cert_subject(uid))
+ # TODO(joshua): This should be async call back to the cloud controller
+ signed_cert = crypto.sign_csr(csr, pid)
+ return (private_key, signed_cert)
+
+ def __cert_subject(self, uid):
+ """Helper to generate cert subject"""
+ return FLAGS.credential_cert_subject % (uid, utils.isotime())
diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py
index 9e2bb830c..7fab9419f 100644
--- a/nova/auth/rbac.py
+++ b/nova/auth/rbac.py
@@ -17,7 +17,7 @@
# under the License.
from nova import exception
-from nova.auth import users
+from nova.auth import manager
def allow(*roles):
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 83831bfac..7d7471575 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -48,6 +48,7 @@ import hashlib
import hmac
import logging
import urllib
+import boto.utils
from nova.exception import Error
@@ -59,6 +60,13 @@ class Signer(object):
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
+ def s3_authorization(self, headers, verb, path):
+ c_string = boto.utils.canonical_string(verb, path, headers)
+ hmac = self.hmac.copy()
+ hmac.update(c_string)
+ b64_hmac = base64.encodestring(hmac.digest()).strip()
+ return b64_hmac
+
def generate(self, params, verb, server_string, path):
if params['SignatureVersion'] == '0':
return self._calc_signature_0(params)
diff --git a/nova/auth/users.py b/nova/auth/users.py
deleted file mode 100644
index fc08dc34d..000000000
--- a/nova/auth/users.py
+++ /dev/null
@@ -1,974 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Nova users and user management, including RBAC hooks.
-"""
-
-import datetime
-import logging
-import os
-import shutil
-import signer
-import string
-import tempfile
-import uuid
-import zipfile
-
-try:
- import ldap
-except Exception, e:
- import fakeldap as ldap
-
-import fakeldap
-
-# TODO(termie): clean up these imports
-from nova import datastore
-from nova import exception
-from nova import flags
-from nova import crypto
-from nova import utils
-from nova import objectstore # for flags
-
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string('ldap_url', 'ldap://localhost',
- 'Point this at your ldap server')
-flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
-flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com',
- 'DN of admin user')
-flags.DEFINE_string('user_unit', 'Users', 'OID for Users')
-flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com',
- 'OU for Users')
-flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com',
- 'OU for Projects')
-flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com',
- 'OU for Roles')
-
-# NOTE(vish): mapping with these flags is necessary because we're going
-# to tie in to an existing ldap schema
-flags.DEFINE_string('ldap_cloudadmin',
- 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
-flags.DEFINE_string('ldap_itsec',
- 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
-flags.DEFINE_string('ldap_sysadmin',
- 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
-flags.DEFINE_string('ldap_netadmin',
- 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
-flags.DEFINE_string('ldap_developer',
- 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
-
-# NOTE(vish): a user with one of these roles will be a superuser and
-# have access to all api commands
-flags.DEFINE_list('superuser_roles', ['cloudadmin'],
- 'roles that ignore rbac checking completely')
-
-# NOTE(vish): a user with one of these roles will have it for every
-# project, even if he or she is not a member of the project
-flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
- 'roles that apply to all projects')
-
-flags.DEFINE_string('credentials_template',
- utils.abspath('auth/novarc.template'),
- 'Template for creating users rc file')
-flags.DEFINE_string('vpn_client_template',
- utils.abspath('cloudpipe/client.ovpn.template'),
- 'Template for creating users vpn file')
-flags.DEFINE_string('credential_key_file', 'pk.pem',
- 'Filename of private key in credentials zip')
-flags.DEFINE_string('credential_cert_file', 'cert.pem',
- 'Filename of certificate in credentials zip')
-flags.DEFINE_string('credential_rc_file', 'novarc',
- 'Filename of rc in credentials zip')
-
-flags.DEFINE_integer('vpn_start_port', 1000,
- 'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
- 'End port for the cloudpipe VPN servers')
-
-flags.DEFINE_string('credential_cert_subject',
- '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
- 'OU=NovaDev/CN=%s-%s',
- 'Subject for certificate for users')
-
-flags.DEFINE_string('vpn_ip', '127.0.0.1',
- 'Public IP for the cloudpipe VPN servers')
-
-
-class AuthBase(object):
- @classmethod
- def safe_id(cls, obj):
- """Safe get object id.
-
- This method will return the id of the object if the object
- is of this class, otherwise it will return the original object.
- This allows methods to accept objects or ids as paramaters.
-
- """
- if isinstance(obj, cls):
- return obj.id
- else:
- return obj
-
-
-class User(AuthBase):
- """id and name are currently the same"""
- def __init__(self, id, name, access, secret, admin):
- self.id = id
- self.name = name
- self.access = access
- self.secret = secret
- self.admin = admin
-
- def is_superuser(self):
- """allows user to bypass rbac completely"""
- if self.admin:
- return True
- for role in FLAGS.superuser_roles:
- if self.has_role(role):
- return True
-
- def is_admin(self):
- """allows user to see objects from all projects"""
- if self.is_superuser():
- return True
- for role in FLAGS.global_roles:
- if self.has_role(role):
- return True
-
- def has_role(self, role):
- return UserManager.instance().has_role(self, role)
-
- def add_role(self, role):
- return UserManager.instance().add_role(self, role)
-
- def remove_role(self, role):
- return UserManager.instance().remove_role(self, role)
-
- def is_project_member(self, project):
- return UserManager.instance().is_project_member(self, project)
-
- def is_project_manager(self, project):
- return UserManager.instance().is_project_manager(self, project)
-
- def generate_rc(self, project=None):
- if project is None:
- project = self.id
- rc = open(FLAGS.credentials_template).read()
- rc = rc % { 'access': self.access,
- 'project': project,
- 'secret': self.secret,
- 'ec2': FLAGS.ec2_url,
- 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
- 'nova': FLAGS.ca_file,
- 'cert': FLAGS.credential_cert_file,
- 'key': FLAGS.credential_key_file,
- }
- return rc
-
- def generate_key_pair(self, name):
- return UserManager.instance().generate_key_pair(self.id, name)
-
- def create_key_pair(self, name, public_key, fingerprint):
- return UserManager.instance().create_key_pair(self.id,
- name,
- public_key,
- fingerprint)
-
- def get_key_pair(self, name):
- return UserManager.instance().get_key_pair(self.id, name)
-
- def delete_key_pair(self, name):
- return UserManager.instance().delete_key_pair(self.id, name)
-
- def get_key_pairs(self):
- return UserManager.instance().get_key_pairs(self.id)
-
- def __repr__(self):
- return "User('%s', '%s', '%s', '%s', %s)" % (
- self.id, self.name, self.access, self.secret, self.admin)
-
-
-class KeyPair(AuthBase):
- def __init__(self, id, owner_id, public_key, fingerprint):
- self.id = id
- self.name = id
- self.owner_id = owner_id
- self.public_key = public_key
- self.fingerprint = fingerprint
-
- def delete(self):
- return UserManager.instance().delete_key_pair(self.owner, self.name)
-
- def __repr__(self):
- return "KeyPair('%s', '%s', '%s', '%s')" % (
- self.id, self.owner_id, self.public_key, self.fingerprint)
-
-
-class Group(AuthBase):
- """id and name are currently the same"""
- def __init__(self, id, description = None, member_ids = None):
- self.id = id
- self.name = id
- self.description = description
- self.member_ids = member_ids
-
- def has_member(self, user):
- return User.safe_id(user) in self.member_ids
-
- def __repr__(self):
- return "Group('%s', '%s', %s)" % (
- self.id, self.description, self.member_ids)
-
-
-class Project(Group):
- def __init__(self, id, project_manager_id, description, member_ids):
- self.project_manager_id = project_manager_id
- super(Project, self).__init__(id, description, member_ids)
-
- @property
- def project_manager(self):
- return UserManager.instance().get_user(self.project_manager_id)
-
- def has_manager(self, user):
- return User.safe_id(user) == self.project_manager_id
-
- def add_role(self, user, role):
- return UserManager.instance().add_role(user, role, self)
-
- def remove_role(self, user, role):
- return UserManager.instance().remove_role(user, role, self)
-
- def has_role(self, user, role):
- return UserManager.instance().has_role(user, role, self)
-
- @property
- def vpn_ip(self):
- return Vpn(self.id).ip
-
- @property
- def vpn_port(self):
- return Vpn(self.id).port
-
- def get_credentials(self, user):
- if not isinstance(user, User):
- user = UserManager.instance().get_user(user)
- rc = user.generate_rc(self.id)
- private_key, signed_cert = self.generate_x509_cert(user)
-
- configfile = open(FLAGS.vpn_client_template,"r")
- s = string.Template(configfile.read())
- configfile.close()
- config = s.substitute(keyfile=FLAGS.credential_key_file,
- certfile=FLAGS.credential_cert_file,
- ip=self.vpn_ip,
- port=self.vpn_port)
-
- tmpdir = tempfile.mkdtemp()
- zf = os.path.join(tmpdir, "temp.zip")
- zippy = zipfile.ZipFile(zf, 'w')
- zippy.writestr(FLAGS.credential_rc_file, rc)
- zippy.writestr(FLAGS.credential_key_file, private_key)
- zippy.writestr(FLAGS.credential_cert_file, signed_cert)
- zippy.writestr("nebula-client.conf", config)
- zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(self.id))
- zippy.close()
- with open(zf, 'rb') as f:
- buffer = f.read()
-
- shutil.rmtree(tmpdir)
- return buffer
-
- def generate_x509_cert(self, user):
- return UserManager.instance().generate_x509_cert(user, self)
-
- def __repr__(self):
- return "Project('%s', '%s', '%s', %s)" % (
- self.id, self.project_manager_id,
- self.description, self.member_ids)
-
-
-class NoMorePorts(exception.Error):
- pass
-
-
-class Vpn(datastore.BasicModel):
- def __init__(self, project_id):
- self.project_id = project_id
- super(Vpn, self).__init__()
-
- @property
- def identifier(self):
- return self.project_id
-
- @classmethod
- def create(cls, project_id):
- # TODO(vish): get list of vpn ips from redis
- port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
- vpn = cls(project_id)
- # save ip for project
- vpn['project'] = project_id
- vpn['ip'] = FLAGS.vpn_ip
- vpn['port'] = port
- vpn.save()
- return vpn
-
- @classmethod
- def find_free_port_for_ip(cls, ip):
- # TODO(vish): these redis commands should be generalized and
- # placed into a base class. Conceptually, it is
- # similar to an association, but we are just
- # storing a set of values instead of keys that
- # should be turned into objects.
- redis = datastore.Redis.instance()
- key = 'ip:%s:ports' % ip
- # TODO(vish): these ports should be allocated through an admin
- # command instead of a flag
- if (not redis.exists(key) and
- not redis.exists(cls._redis_association_name('ip', ip))):
- for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
- redis.sadd(key, i)
-
- port = redis.spop(key)
- if not port:
- raise NoMorePorts()
- return port
-
- @classmethod
- def num_ports_for_ip(cls, ip):
- return datastore.Redis.instance().scard('ip:%s:ports' % ip)
-
- @property
- def ip(self):
- return self['ip']
-
- @property
- def port(self):
- return int(self['port'])
-
- def save(self):
- self.associate_with('ip', self.ip)
- super(Vpn, self).save()
-
- def destroy(self):
- self.unassociate_with('ip', self.ip)
- datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
- super(Vpn, self).destroy()
-
-
-class UserManager(object):
- def __init__(self):
- if hasattr(self.__class__, '_instance'):
- raise Exception('Attempted to instantiate singleton')
-
- @classmethod
- def instance(cls):
- if not hasattr(cls, '_instance'):
- inst = UserManager()
- cls._instance = inst
- if FLAGS.fake_users:
- try:
- inst.create_user('fake', 'fake', 'fake')
- except: pass
- try:
- inst.create_user('user', 'user', 'user')
- except: pass
- try:
- inst.create_user('admin', 'admin', 'admin', True)
- except: pass
- return cls._instance
-
- def authenticate(self, access, signature, params, verb='GET',
- server_string='127.0.0.1:8773', path='/',
- verify_signature=True):
- # TODO: Check for valid timestamp
- (access_key, sep, project_name) = access.partition(':')
-
- user = self.get_user_from_access_key(access_key)
- if user == None:
- raise exception.NotFound('No user found for access key %s' %
- access_key)
- if project_name is '':
- project_name = user.name
-
- project = self.get_project(project_name)
- if project == None:
- raise exception.NotFound('No project called %s could be found' %
- project_name)
- if not user.is_admin() and not project.has_member(user):
- raise exception.NotFound('User %s is not a member of project %s' %
- (user.id, project.id))
- if verify_signature:
- # NOTE(vish): hmac can't handle unicode, so encode ensures that
- # secret isn't unicode
- expected_signature = signer.Signer(user.secret.encode()).generate(
- params, verb, server_string, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
- if signature != expected_signature:
- raise exception.NotAuthorized('Signature does not match')
- return (user, project)
-
- def has_role(self, user, role, project=None):
- with LDAPWrapper() as conn:
- if role == 'projectmanager':
- if not project:
- raise exception.Error("Must specify project")
- return self.is_project_manager(user, project)
-
- global_role = conn.has_role(User.safe_id(user),
- role,
- None)
- if not global_role:
- return global_role
-
- if not project or role in FLAGS.global_roles:
- return global_role
-
- return conn.has_role(User.safe_id(user),
- role,
- Project.safe_id(project))
-
- def add_role(self, user, role, project=None):
- with LDAPWrapper() as conn:
- return conn.add_role(User.safe_id(user), role,
- Project.safe_id(project))
-
- def remove_role(self, user, role, project=None):
- with LDAPWrapper() as conn:
- return conn.remove_role(User.safe_id(user), role,
- Project.safe_id(project))
-
- def create_project(self, name, manager_user,
- description=None, member_users=None):
- if member_users:
- member_users = [User.safe_id(u) for u in member_users]
- # NOTE(vish): try to associate a vpn ip and port first because
- # if it throws an exception, we save having to
- # create and destroy a project
- Vpn.create(name)
- with LDAPWrapper() as conn:
- return conn.create_project(name,
- User.safe_id(manager_user),
- description,
- member_users)
-
-
- def get_projects(self):
- with LDAPWrapper() as conn:
- return conn.find_projects()
-
-
- def get_project(self, project):
- with LDAPWrapper() as conn:
- return conn.find_project(Project.safe_id(project))
-
- def add_to_project(self, user, project):
- with LDAPWrapper() as conn:
- return conn.add_to_project(User.safe_id(user),
- Project.safe_id(project))
-
- def is_project_manager(self, user, project):
- if not isinstance(project, Project):
- project = self.get_project(project)
- return project.has_manager(user)
-
- def is_project_member(self, user, project):
- if isinstance(project, Project):
- return project.has_member(user)
- else:
- with LDAPWrapper() as conn:
- return conn.is_in_project(User.safe_id(user), project)
-
- def remove_from_project(self, user, project):
- with LDAPWrapper() as conn:
- return conn.remove_from_project(User.safe_id(user),
- Project.safe_id(project))
-
- def delete_project(self, project):
- with LDAPWrapper() as conn:
- return conn.delete_project(Project.safe_id(project))
-
- def get_user(self, uid):
- with LDAPWrapper() as conn:
- return conn.find_user(uid)
-
- def get_user_from_access_key(self, access_key):
- with LDAPWrapper() as conn:
- return conn.find_user_by_access_key(access_key)
-
- def get_users(self):
- with LDAPWrapper() as conn:
- return conn.find_users()
-
- def create_user(self, user, access=None, secret=None,
- admin=False, create_project=True):
- if access == None: access = str(uuid.uuid4())
- if secret == None: secret = str(uuid.uuid4())
- with LDAPWrapper() as conn:
- user = User.safe_id(user)
- result = conn.create_user(user, access, secret, admin)
- if create_project:
- # NOTE(vish): if the project creation fails, we delete
- # the user and return an exception
- try:
- conn.create_project(user, user, user)
- except Exception:
- with LDAPWrapper() as conn:
- conn.delete_user(user)
- raise
- return result
-
- def delete_user(self, user, delete_project=True):
- with LDAPWrapper() as conn:
- user = User.safe_id(user)
- if delete_project:
- try:
- conn.delete_project(user)
- except exception.NotFound:
- pass
- conn.delete_user(user)
-
- def generate_key_pair(self, user, key_name):
- # generating key pair is slow so delay generation
- # until after check
- user = User.safe_id(user)
- with LDAPWrapper() as conn:
- if not conn.user_exists(user):
- raise exception.NotFound("User %s doesn't exist" % user)
- if conn.key_pair_exists(user, key_name):
- raise exception.Duplicate("The keypair %s already exists"
- % key_name)
- private_key, public_key, fingerprint = crypto.generate_key_pair()
- self.create_key_pair(User.safe_id(user), key_name,
- public_key, fingerprint)
- return private_key, fingerprint
-
- def create_key_pair(self, user, key_name, public_key, fingerprint):
- with LDAPWrapper() as conn:
- return conn.create_key_pair(User.safe_id(user), key_name,
- public_key, fingerprint)
-
- def get_key_pair(self, user, key_name):
- with LDAPWrapper() as conn:
- return conn.find_key_pair(User.safe_id(user), key_name)
-
- def get_key_pairs(self, user):
- with LDAPWrapper() as conn:
- return conn.find_key_pairs(User.safe_id(user))
-
- def delete_key_pair(self, user, key_name):
- with LDAPWrapper() as conn:
- conn.delete_key_pair(User.safe_id(user), key_name)
-
- def generate_x509_cert(self, user, project):
- (private_key, csr) = crypto.generate_x509_cert(
- self.__cert_subject(User.safe_id(user)))
- # TODO - This should be async call back to the cloud controller
- signed_cert = crypto.sign_csr(csr, Project.safe_id(project))
- return (private_key, signed_cert)
-
- def __cert_subject(self, uid):
- # FIXME(ja) - this should be pulled from a global configuration
- return FLAGS.credential_cert_subject % (uid, utils.isotime())
-
-
-class LDAPWrapper(object):
- def __init__(self):
- self.user = FLAGS.user_dn
- self.passwd = FLAGS.ldap_password
-
- def __enter__(self):
- self.connect()
- return self
-
- def __exit__(self, type, value, traceback):
- self.conn.unbind_s()
- return False
-
- def connect(self):
- """ connect to ldap as admin user """
- if FLAGS.fake_users:
- self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT
- self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION
- self.conn = fakeldap.initialize(FLAGS.ldap_url)
- else:
- self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT
- self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION
- self.conn = ldap.initialize(FLAGS.ldap_url)
- self.conn.simple_bind_s(self.user, self.passwd)
-
- def find_object(self, dn, query = None):
- objects = self.find_objects(dn, query)
- if len(objects) == 0:
- return None
- return objects[0]
-
- def find_dns(self, dn, query=None):
- try:
- res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
- except self.NO_SUCH_OBJECT:
- return []
- # just return the DNs
- return [dn for dn, attributes in res]
-
- def find_objects(self, dn, query = None):
- try:
- res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
- except self.NO_SUCH_OBJECT:
- return []
- # just return the attributes
- return [attributes for dn, attributes in res]
-
- def find_users(self):
- attrs = self.find_objects(FLAGS.user_ldap_subtree,
- '(objectclass=novaUser)')
- return [self.__to_user(attr) for attr in attrs]
-
- def find_key_pairs(self, uid):
- attrs = self.find_objects(self.__uid_to_dn(uid),
- '(objectclass=novaKeyPair)')
- return [self.__to_key_pair(uid, attr) for attr in attrs]
-
- def find_projects(self):
- attrs = self.find_objects(FLAGS.project_ldap_subtree,
- '(objectclass=novaProject)')
- return [self.__to_project(attr) for attr in attrs]
-
- def find_roles(self, tree):
- attrs = self.find_objects(tree,
- '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
- return [self.__to_group(attr) for attr in attrs]
-
- def find_group_dns_with_member(self, tree, uid):
- dns = self.find_dns(tree,
- '(&(objectclass=groupOfNames)(member=%s))' %
- self.__uid_to_dn(uid))
- return dns
-
- def find_user(self, uid):
- attr = self.find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
- return self.__to_user(attr)
-
- def find_key_pair(self, uid, key_name):
- dn = 'cn=%s,%s' % (key_name,
- self.__uid_to_dn(uid))
- attr = self.find_object(dn, '(objectclass=novaKeyPair)')
- return self.__to_key_pair(uid, attr)
-
- def find_group(self, dn):
- """uses dn directly instead of custructing it from name"""
- attr = self.find_object(dn, '(objectclass=groupOfNames)')
- return self.__to_group(attr)
-
- def find_project(self, name):
- dn = 'cn=%s,%s' % (name,
- FLAGS.project_ldap_subtree)
- attr = self.find_object(dn, '(objectclass=novaProject)')
- return self.__to_project(attr)
-
- def user_exists(self, name):
- return self.find_user(name) != None
-
- def key_pair_exists(self, uid, key_name):
- return self.find_key_pair(uid, key_name) != None
-
- def project_exists(self, name):
- return self.find_project(name) != None
-
- def group_exists(self, dn):
- return self.find_group(dn) != None
-
- def delete_key_pairs(self, uid):
- keys = self.find_key_pairs(uid)
- if keys != None:
- for key in keys:
- self.delete_key_pair(uid, key.name)
-
- def create_user(self, name, access_key, secret_key, is_admin):
- if self.user_exists(name):
- raise exception.Duplicate("LDAP user %s already exists" % name)
- attr = [
- ('objectclass', ['person',
- 'organizationalPerson',
- 'inetOrgPerson',
- 'novaUser']),
- ('ou', [FLAGS.user_unit]),
- ('uid', [name]),
- ('sn', [name]),
- ('cn', [name]),
- ('secretKey', [secret_key]),
- ('accessKey', [access_key]),
- ('isAdmin', [str(is_admin).upper()]),
- ]
- self.conn.add_s(self.__uid_to_dn(name), attr)
- return self.__to_user(dict(attr))
-
- def create_project(self, name, manager_uid,
- description=None, member_uids=None):
- if self.project_exists(name):
- raise exception.Duplicate("Project can't be created because "
- "project %s already exists" % name)
- if not self.user_exists(manager_uid):
- raise exception.NotFound("Project can't be created because "
- "manager %s doesn't exist" % manager_uid)
- manager_dn = self.__uid_to_dn(manager_uid)
- # description is a required attribute
- if description is None:
- description = name
- members = []
- if member_uids != None:
- for member_uid in member_uids:
- if not self.user_exists(member_uid):
- raise exception.NotFound("Project can't be created "
- "because user %s doesn't exist" % member_uid)
- members.append(self.__uid_to_dn(member_uid))
- # always add the manager as a member because members is required
- if not manager_dn in members:
- members.append(manager_dn)
- attr = [
- ('objectclass', ['novaProject']),
- ('cn', [name]),
- ('description', [description]),
- ('projectManager', [manager_dn]),
- ('member', members)
- ]
- self.conn.add_s('cn=%s,%s' % (name, FLAGS.project_ldap_subtree), attr)
- return self.__to_project(dict(attr))
-
- def add_to_project(self, uid, project_id):
- dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
- return self.add_to_group(uid, dn)
-
- def remove_from_project(self, uid, project_id):
- dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
- return self.remove_from_group(uid, dn)
-
- def is_in_project(self, uid, project_id):
- dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
- return self.is_in_group(uid, dn)
-
- def __role_to_dn(self, role, project_id=None):
- if project_id == None:
- return FLAGS.__getitem__("ldap_%s" % role).value
- else:
- return 'cn=%s,cn=%s,%s' % (role,
- project_id,
- FLAGS.project_ldap_subtree)
-
- def __create_group(self, group_dn, name, uid,
- description, member_uids = None):
- if self.group_exists(group_dn):
- raise exception.Duplicate("Group can't be created because "
- "group %s already exists" % name)
- members = []
- if member_uids != None:
- for member_uid in member_uids:
- if not self.user_exists(member_uid):
- raise exception.NotFound("Group can't be created "
- "because user %s doesn't exist" % member_uid)
- members.append(self.__uid_to_dn(member_uid))
- dn = self.__uid_to_dn(uid)
- if not dn in members:
- members.append(dn)
- attr = [
- ('objectclass', ['groupOfNames']),
- ('cn', [name]),
- ('description', [description]),
- ('member', members)
- ]
- self.conn.add_s(group_dn, attr)
- return self.__to_group(dict(attr))
-
- def has_role(self, uid, role, project_id=None):
- role_dn = self.__role_to_dn(role, project_id)
- return self.is_in_group(uid, role_dn)
-
- def add_role(self, uid, role, project_id=None):
- role_dn = self.__role_to_dn(role, project_id)
- if not self.group_exists(role_dn):
- # create the role if it doesn't exist
- description = '%s role for %s' % (role, project_id)
- self.__create_group(role_dn, role, uid, description)
- else:
- return self.add_to_group(uid, role_dn)
-
- def remove_role(self, uid, role, project_id=None):
- role_dn = self.__role_to_dn(role, project_id)
- return self.remove_from_group(uid, role_dn)
-
- def is_in_group(self, uid, group_dn):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be searched in group "
- "becuase the user doesn't exist" % (uid,))
- if not self.group_exists(group_dn):
- return False
- res = self.find_object(group_dn,
- '(member=%s)' % self.__uid_to_dn(uid))
- return res != None
-
- def add_to_group(self, uid, group_dn):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be added to the group "
- "becuase the user doesn't exist" % (uid,))
- if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- (group_dn,))
- if self.is_in_group(uid, group_dn):
- raise exception.Duplicate("User %s is already a member of "
- "the group %s" % (uid, group_dn))
- attr = [
- (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
- ]
- self.conn.modify_s(group_dn, attr)
-
- def remove_from_group(self, uid, group_dn):
- if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- (group_dn,))
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from the "
- "group because the user doesn't exist" % (uid,))
- if not self.is_in_group(uid, group_dn):
- raise exception.NotFound("User %s is not a member of the group" %
- (uid,))
- self._safe_remove_from_group(group_dn, uid)
-
- def _safe_remove_from_group(self, group_dn, uid):
- # FIXME(vish): what if deleted user is a project manager?
- attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
- try:
- self.conn.modify_s(group_dn, attr)
- except self.OBJECT_CLASS_VIOLATION:
- logging.debug("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead." % group_dn )
- self.delete_group(group_dn)
-
- def remove_from_all(self, uid):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from all "
- "because the user doesn't exist" % (uid,))
- dn = self.__uid_to_dn(uid)
- role_dns = self.find_group_dns_with_member(
- FLAGS.role_ldap_subtree, uid)
- for role_dn in role_dns:
- self._safe_remove_from_group(role_dn, uid)
- project_dns = self.find_group_dns_with_member(
- FLAGS.project_ldap_subtree, uid)
- for project_dn in project_dns:
- self._safe_remove_from_group(project_dn, uid)
-
- def create_key_pair(self, uid, key_name, public_key, fingerprint):
- """create's a public key in the directory underneath the user"""
- # TODO(vish): possibly refactor this to store keys in their own ou
- # and put dn reference in the user object
- attr = [
- ('objectclass', ['novaKeyPair']),
- ('cn', [key_name]),
- ('sshPublicKey', [public_key]),
- ('keyFingerprint', [fingerprint]),
- ]
- self.conn.add_s('cn=%s,%s' % (key_name,
- self.__uid_to_dn(uid)),
- attr)
- return self.__to_key_pair(uid, dict(attr))
-
- def find_user_by_access_key(self, access):
- query = '(accessKey=%s)' % access
- dn = FLAGS.user_ldap_subtree
- return self.__to_user(self.find_object(dn, query))
-
- def delete_user(self, uid):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s doesn't exist" % uid)
- self.delete_key_pairs(uid)
- self.remove_from_all(uid)
- self.conn.delete_s('uid=%s,%s' % (uid,
- FLAGS.user_ldap_subtree))
-
- def delete_key_pair(self, uid, key_name):
- if not self.key_pair_exists(uid, key_name):
- raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
- (key_name, uid))
- self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
- FLAGS.user_ldap_subtree))
-
- def delete_group(self, group_dn):
- if not self.group_exists(group_dn):
- raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
- self.conn.delete_s(group_dn)
-
- def delete_roles(self, project_dn):
- roles = self.find_roles(project_dn)
- for role in roles:
- self.delete_group('cn=%s,%s' % (role.id, project_dn))
-
- def delete_project(self, name):
- project_dn = 'cn=%s,%s' % (name, FLAGS.project_ldap_subtree)
- self.delete_roles(project_dn)
- self.delete_group(project_dn)
-
- def __to_user(self, attr):
- if attr == None:
- return None
- return User(
- id = attr['uid'][0],
- name = attr['cn'][0],
- access = attr['accessKey'][0],
- secret = attr['secretKey'][0],
- admin = (attr['isAdmin'][0] == 'TRUE')
- )
-
- def __to_key_pair(self, owner, attr):
- if attr == None:
- return None
- return KeyPair(
- id = attr['cn'][0],
- owner_id = owner,
- public_key = attr['sshPublicKey'][0],
- fingerprint = attr['keyFingerprint'][0],
- )
-
- def __to_group(self, attr):
- if attr == None:
- return None
- member_dns = attr.get('member', [])
- return Group(
- id = attr['cn'][0],
- description = attr.get('description', [None])[0],
- member_ids = [self.__dn_to_uid(x) for x in member_dns]
- )
-
- def __to_project(self, attr):
- if attr == None:
- return None
- member_dns = attr.get('member', [])
- return Project(
- id = attr['cn'][0],
- project_manager_id = self.__dn_to_uid(attr['projectManager'][0]),
- description = attr.get('description', [None])[0],
- member_ids = [self.__dn_to_uid(x) for x in member_dns]
- )
-
- def __dn_to_uid(self, dn):
- return dn.split(',')[0].split('=')[1]
-
- def __uid_to_dn(self, dn):
- return 'uid=%s,%s' % (dn, FLAGS.user_ldap_subtree)
diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py
index a5f78a16d..0bffe9aa3 100644
--- a/nova/cloudpipe/api.py
+++ b/nova/cloudpipe/api.py
@@ -25,7 +25,7 @@ import tornado.web
import urllib
from nova import crypto
-from nova.auth import users
+from nova.auth import manager
_log = logging.getLogger("api")
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 63f7ae222..5b0ed3471 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -31,7 +31,7 @@ import zipfile
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import api
@@ -44,7 +44,7 @@ flags.DEFINE_string('boot_script_template',
class CloudPipe(object):
def __init__(self, cloud_controller):
self.controller = cloud_controller
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def launch_vpn_instance(self, project_id):
logging.debug( "Launching VPN for %s" % (project_id))
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 08a22556e..1ffcca685 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -40,7 +40,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
formatted as ext2.
In the diagram below, dashes represent drive sectors.
- 0 a b c d e
+ +-----+------. . .-------+------. . .------+
+ | 0 a| b c|d e|
+-----+------. . .-------+------. . .------+
| mbr | primary partiton | local partition |
+-----+------. . .-------+------. . .------+
@@ -64,8 +65,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
last_sector = local_last # e
# create an empty file
- execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (outfile, last_sector, sector_size))
+ yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, last_sector, sector_size))
# make mbr partition
yield execute('parted --script %s mklabel msdos' % outfile)
diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py
index 48e07da66..861ce779b 100644
--- a/nova/compute/linux_net.py
+++ b/nova/compute/linux_net.py
@@ -29,7 +29,7 @@ from nova import flags
FLAGS=flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
- '/etc/nova-dhcpbridge.conf',
+ '/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
def execute(cmd, addl_env=None):
diff --git a/nova/compute/model.py b/nova/compute/model.py
index cda188183..212830d3c 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -40,9 +40,11 @@ True
True
"""
+import datetime
import logging
import time
import redis
+import uuid
from nova import datastore
from nova import exception
@@ -228,6 +230,78 @@ class Daemon(datastore.BasicModel):
for x in cls.associated_to("host", hostname):
yield x
+class SessionToken(datastore.BasicModel):
+ """This is a short-lived auth token that is passed through web requests"""
+
+ def __init__(self, session_token):
+ self.token = session_token
+ self.default_ttl = FLAGS.auth_token_ttl
+ super(SessionToken, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.token
+
+ def default_state(self):
+ now = datetime.datetime.utcnow()
+ diff = datetime.timedelta(seconds=self.default_ttl)
+ expires = now + diff
+ return {'user': None, 'session_type': None, 'token': self.token,
+ 'expiry': expires.strftime(utils.TIME_FORMAT)}
+
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ if not self['user']:
+ raise exception.Invalid("SessionToken requires a User association")
+ success = super(SessionToken, self).save()
+ if success:
+ self.associate_with("user", self['user'])
+ return True
+
+ @classmethod
+ def lookup(cls, key):
+ token = super(SessionToken, cls).lookup(key)
+ if token:
+ expires_at = utils.parse_isotime(token['expiry'])
+ if datetime.datetime.utcnow() >= expires_at:
+ token.destroy()
+ return None
+ return token
+
+ @classmethod
+ def generate(cls, userid, session_type=None):
+ """make a new token for the given user"""
+ token = str(uuid.uuid4())
+ while cls.lookup(token):
+ token = str(uuid.uuid4())
+ instance = cls(token)
+ instance['user'] = userid
+ instance['session_type'] = session_type
+ instance.save()
+ return instance
+
+ def update_expiry(self, **kwargs):
+ """updates the expirty attribute, but doesn't save"""
+ if not kwargs:
+ kwargs['seconds'] = self.default_ttl
+ time = datetime.datetime.utcnow()
+ diff = datetime.timedelta(**kwargs)
+ expires = time + diff
+ self['expiry'] = expires.strftime(utils.TIME_FORMAT)
+
+ def is_expired(self):
+ now = datetime.datetime.utcnow()
+ expires = utils.parse_isotime(self['expiry'])
+ return expires <= now
+
+ def ttl(self):
+ """number of seconds remaining before expiration"""
+ now = datetime.datetime.utcnow()
+ expires = utils.parse_isotime(self['expiry'])
+ delta = expires - now
+ return (delta.seconds + (delta.days * 24 * 3600))
+
+
if __name__ == "__main__":
import doctest
doctest.testmod()
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 43011f696..62d892e58 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -29,7 +29,7 @@ from nova import datastore
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import exception as compute_exception
from nova.compute import linux_net
@@ -144,7 +144,7 @@ class Vlan(datastore.BasicModel):
@datastore.absorb_connection_error
def destroy(self):
set_name = self._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().hdel(set_name, self.project)
+ datastore.Redis.instance().hdel(set_name, self.project_id)
def subnet(self):
vlan = int(self.vlan_id)
@@ -210,11 +210,11 @@ class BaseNetwork(datastore.BasicModel):
@property
def user(self):
- return users.UserManager.instance().get_user(self['user_id'])
+ return manager.AuthManager().get_user(self['user_id'])
@property
def project(self):
- return users.UserManager.instance().get_project(self['project_id'])
+ return manager.AuthManager().get_project(self['project_id'])
@property
def _hosts_key(self):
@@ -516,7 +516,7 @@ def get_vlan_for_project(project_id):
if not known_vlans.has_key(vstr):
return Vlan.create(project_id, vnum)
old_project_id = known_vlans[vstr]
- if not users.UserManager.instance().get_project(old_project_id):
+ if not manager.AuthManager().get_project(old_project_id):
vlan = Vlan.lookup(old_project_id)
if vlan:
# NOTE(todd): This doesn't check for vlan id match, because
@@ -529,6 +529,7 @@ def get_vlan_for_project(project_id):
# don't orphan any VLANs. It is basically
# garbage collection for after projects abandoned
# their reference.
+ vlan.destroy()
vlan.project_id = project_id
vlan.save()
return vlan
@@ -542,7 +543,7 @@ def get_network_by_interface(iface, security_group='default'):
def get_network_by_address(address):
logging.debug("Get Network By Address: %s" % address)
- for project in users.UserManager.instance().get_projects():
+ for project in manager.AuthManager().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
logging.debug("Found %s in %s" % (address, project.id))
@@ -582,7 +583,7 @@ def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
# TODO(todd): It looks goofy to get a project from a UserManager.
# Refactor to still use the LDAP backend, but not User specific.
- project = users.UserManager.instance().get_project(project_id)
+ project = manager.AuthManager().get_project(project_id)
if not project:
raise exception.Error("Project %s doesn't exist, uhoh." %
project_id)
@@ -592,5 +593,5 @@ def get_project_network(project_id, security_group='default'):
def restart_nets():
""" Ensure the network for each user is enabled"""
- for project in users.UserManager.instance().get_projects():
+ for project in manager.AuthManager().get_projects():
get_project_network(project.id).express()
diff --git a/nova/compute/node.py b/nova/compute/service.py
index 533670b12..9b162edc7 100644
--- a/nova/compute/node.py
+++ b/nova/compute/service.py
@@ -17,31 +17,34 @@
# under the License.
"""
-Compute Node:
+Compute Service:
- Runs on each compute node, managing the
+ Runs on each compute host, managing the
hypervisor using the virt module.
"""
import base64
+import json
import logging
import os
import sys
from twisted.internet import defer
from twisted.internet import task
-from twisted.application import service
from nova import exception
from nova import flags
from nova import process
+from nova import service
from nova import utils
+from nova.compute import disk
from nova.compute import model
from nova.compute import network
from nova.compute import power_state
from nova.compute.instance_types import INSTANCE_TYPES
+from nova.objectstore import image # for image_path flag
from nova.virt import connection as virt_connection
-from nova.volume import storage
+from nova.volume import service as volume_service
FLAGS = flags.FLAGS
@@ -49,13 +52,13 @@ flags.DEFINE_string('instances_path', utils.abspath('../instances'),
'where instances are stored on disk')
-class Node(object, service.Service):
+class ComputeService(service.Service):
"""
Manages the running instances.
"""
def __init__(self):
""" load configuration options for this node and connect to the hypervisor"""
- super(Node, self).__init__()
+ super(ComputeService, self).__init__()
self._instances = {}
self._conn = virt_connection.get_connection()
self.instdir = model.InstanceDirectory()
@@ -174,29 +177,33 @@ class Node(object, service.Service):
@exception.wrap_exception
def attach_volume(self, instance_id = None,
volume_id = None, mountpoint = None):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
yield self._init_aoe()
- yield utils.runthis("Attached Volume: %s",
- "sudo virsh attach-disk %s /dev/etherd/%s %s"
- % (instance_id, volume['aoe_device'], mountpoint.split("/")[-1]))
+ yield process.simple_execute(
+ "sudo virsh attach-disk %s /dev/etherd/%s %s" %
+ (instance_id,
+ volume['aoe_device'],
+ mountpoint.rpartition('/dev/')[2]))
volume.finish_attach()
defer.returnValue(True)
+ @defer.inlineCallbacks
def _init_aoe(self):
- utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover")
- utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat")
+ yield process.simple_execute("sudo aoe-discover")
+ yield process.simple_execute("sudo aoe-stat")
+ @defer.inlineCallbacks
@exception.wrap_exception
def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
# despite the documentation, virsh detach-disk just wants the device
# name without the leading /dev/
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
target = volume['mountpoint'].rpartition('/dev/')[2]
- utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s "
- % (instance_id, target))
+ yield process.simple_execute(
+ "sudo virsh detach-disk %s %s " % (instance_id, target))
volume.finish_detach()
- return defer.succeed(True)
+ defer.returnValue(True)
class Group(object):
diff --git a/nova/datastore.py b/nova/datastore.py
index e57177e04..660ad9d90 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -184,21 +184,19 @@ class BasicModel(object):
@absorb_connection_error
def add_to_index(self):
+ """Each insance of Foo has its id tracked int the set named Foos"""
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().sadd(set_name, self.identifier)
@absorb_connection_error
def remove_from_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
+ """Remove id of this instance from the set tracking ids of this type"""
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().srem(set_name, self.identifier)
@absorb_connection_error
def associate_with(self, foreign_type, foreign_id):
+ """Add this class id into the set foreign_type:foreign_id:this_types"""
# note the extra 's' on the end is for plurality
# to match the old data without requiring a migration of any sort
self.add_associated_model_to_its_set(foreign_type, foreign_id)
@@ -208,21 +206,24 @@ class BasicModel(object):
@absorb_connection_error
def unassociate_with(self, foreign_type, foreign_id):
+ """Delete from foreign_type:foreign_id:this_types set"""
redis_set = self.__class__._redis_association_name(foreign_type,
foreign_id)
Redis.instance().srem(redis_set, self.identifier)
- def add_associated_model_to_its_set(self, my_type, my_id):
+ def add_associated_model_to_its_set(self, model_type, model_id):
+ """
+ When associating an X to a Y, save Y for newer timestamp, etc, and to
+ make sure to save it if Y is a new record.
+ If the model_type isn't found as a usable class, ignore it, this can
+ happen when associating to things stored in LDAP (user, project, ...).
+ """
table = globals()
- klsname = my_type.capitalize()
+ klsname = model_type.capitalize()
if table.has_key(klsname):
- my_class = table[klsname]
- my_inst = my_class(my_id)
- my_inst.save()
- else:
- logging.warning("no model class for %s when building"
- " association from %s",
- klsname, self)
+ model_class = table[klsname]
+ model_inst = model_class(model_id)
+ model_inst.save()
@absorb_connection_error
def save(self):
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index b97a6727f..55a8e4238 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -22,7 +22,7 @@ Admin API controller, exposed through http via the api worker.
import base64
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
def user_dict(user, base64_file=None):
@@ -69,18 +69,18 @@ class AdminController(object):
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
- return user_dict(users.UserManager.instance().get_user(name))
+ return user_dict(manager.AuthManager().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
- [user_dict(u) for u in users.UserManager.instance().get_users()] }
+ [user_dict(u) for u in manager.AuthManager().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
- return user_dict(users.UserManager.instance().create_user(name))
+ return user_dict(manager.AuthManager().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -88,7 +88,7 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
- users.UserManager.instance().delete_user(name)
+ manager.AuthManager().delete_user(name)
return True
@@ -100,8 +100,8 @@ class AdminController(object):
"""
if project is None:
project = name
- project = users.UserManager.instance().get_project(project)
- user = users.UserManager.instance().get_user(name)
+ project = manager.AuthManager().get_project(project)
+ user = manager.AuthManager().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index 79a2aaddb..8915e4742 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -35,7 +35,7 @@ from nova import crypto
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
import nova.cloudpipe.api
from nova.endpoint import cloud
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 48e47018a..21581ffd2 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -23,7 +23,6 @@ datastore.
"""
import base64
-import json
import logging
import os
import time
@@ -35,13 +34,13 @@ from nova import flags
from nova import rpc
from nova import utils
from nova.auth import rbac
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
from nova.compute import network
-from nova.compute import node
from nova.compute.instance_types import INSTANCE_TYPES
+from nova.compute import service as compute_service
from nova.endpoint import images
-from nova.volume import storage
+from nova.volume import service as volume_service
FLAGS = flags.FLAGS
@@ -49,10 +48,10 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
def _gen_key(user_id, key_name):
- """ Tuck this into UserManager """
+ """ Tuck this into AuthManager """
try:
- manager = users.UserManager.instance()
- private_key, fingerprint = manager.generate_key_pair(user_id, key_name)
+ mgr = manager.AuthManager()
+ private_key, fingerprint = mgr.generate_key_pair(user_id, key_name)
except Exception as ex:
return {'exception': ex}
return {'private_key': private_key, 'fingerprint': fingerprint}
@@ -77,7 +76,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
yield volume
def __str__(self):
@@ -297,8 +296,8 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
- # TODO(vish): refactor this to create the volume object here and tell storage to create it
- res = rpc.call(FLAGS.storage_topic, {"method": "create_volume",
+ # TODO(vish): refactor this to create the volume object here and tell service to create it
+ res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
"args" : {"size": size,
"user_id": context.user.id,
"project_id": context.project.id}})
@@ -332,7 +331,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -579,7 +578,7 @@ class CloudController(object):
"args": {"instance_id" : inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
- # TODO: Make the NetworkComputeNode figure out the network name from ip.
+ # TODO: Make Network figure out the network name from ip.
return defer.succeed(self._format_instances(
context, reservation_id))
@@ -629,8 +628,8 @@ class CloudController(object):
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume = self._get_volume(context, volume_id)
- storage_node = volume['node_name']
- rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
+ volume_node = volume['node_name']
+ rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
{"method": "delete_volume",
"args" : {"volume_id": volume_id}})
return defer.succeed(True)
diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py
index 9208ddab7..de05ba2da 100644
--- a/nova/endpoint/rackspace.py
+++ b/nova/endpoint/rackspace.py
@@ -34,12 +34,11 @@ from nova import exception
from nova import flags
from nova import rpc
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.endpoint import images
from nova.endpoint import wsgi
-from nova.volume import storage
FLAGS = flags.FLAGS
@@ -78,11 +77,11 @@ class Api(object):
def build_context(self, env):
rv = {}
if env.has_key("HTTP_X_AUTH_TOKEN"):
- rv['user'] = users.UserManager.instance().get_user_from_access_key(
+ rv['user'] = manager.AuthManager().get_user_from_access_key(
env['HTTP_X_AUTH_TOKEN']
)
if rv['user']:
- rv['project'] = users.UserManager.instance().get_project(
+ rv['project'] = manager.AuthManager().get_project(
rv['user'].name
)
return rv
diff --git a/nova/exception.py b/nova/exception.py
index bda002d1e..52497a19e 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -44,6 +44,12 @@ class Duplicate(Error):
class NotAuthorized(Error):
pass
+class NotEmpty(Error):
+ pass
+
+class Invalid(Error):
+ pass
+
def wrap_exception(f):
def _wrap(*args, **kw):
try:
diff --git a/nova/flags.py b/nova/flags.py
index dc8fc9d10..f35f5fa10 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -41,11 +41,12 @@ DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
-DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on')
+DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
+DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
+
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
-DEFINE_bool('fake_users', False, 'use fake users')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
@@ -74,6 +75,8 @@ DEFINE_string('vpn_key_suffix',
'-key',
'Suffix to add to project name for vpn key')
+DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
+
# UNUSED
DEFINE_string('node_availability_zone',
'nova',
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
new file mode 100644
index 000000000..dcc54db09
--- /dev/null
+++ b/nova/network/__init__.py
@@ -0,0 +1,32 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`nova.network` -- Network Nodes
+=====================================================
+
+.. automodule:: nova.network
+ :platform: Unix
+ :synopsis: Network is responsible for managing networking
+.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
+.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
+.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
+.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
+.. moduleauthor:: Manish Singh <yosh@gimp.org>
+.. moduleauthor:: Andy Smith <andy@anarkystic.com>
+"""
diff --git a/nova/network/service.py b/nova/network/service.py
new file mode 100644
index 000000000..9d87e05e6
--- /dev/null
+++ b/nova/network/service.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Network Nodes are responsible for allocating ips and setting up network
+"""
+
+import logging
+
+from nova import flags
+from nova import service
+
+
+FLAGS = flags.FLAGS
+
+class NetworkService(service.Service):
+ """Allocates ips and sets up networks"""
+
+ def __init__(self):
+ logging.debug("Network node working")
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index 090ef4e61..b42a96233 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -107,7 +107,7 @@ class Bucket(object):
try:
return context.user.is_admin() or self.owner_id == context.project.id
except Exception, e:
- pass
+ return False
def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
object_names = []
@@ -161,7 +161,7 @@ class Bucket(object):
def delete(self):
if len(os.listdir(self.path)) > 0:
- raise exception.NotAuthorized()
+ raise exception.NotEmpty()
os.rmdir(self.path)
os.remove(self.path+'.json')
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index 098e7a167..344d75f6b 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -47,12 +47,12 @@ import urllib
from twisted.application import internet, service
from twisted.web.resource import Resource
-from twisted.web import server, static
+from twisted.web import server, static, error
from nova import exception
from nova import flags
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import api
from nova.objectstore import bucket
from nova.objectstore import image
@@ -107,10 +107,14 @@ def get_context(request):
if not authorization_header:
raise exception.NotAuthorized
access, sep, secret = authorization_header.split(' ')[1].rpartition(':')
- um = users.UserManager.instance()
- print 'um %s' % um
- (user, project) = um.authenticate(access, secret, {}, request.method, request.host, request.uri, False)
- # FIXME: check signature here!
+ (user, project) = manager.AuthManager().authenticate(access,
+ secret,
+ {},
+ request.method,
+ request.getRequestHostname(),
+ request.uri,
+ headers=request.getAllHeaders(),
+ check_type='s3')
return api.APIRequestContext(None, user, project)
except exception.Error as ex:
logging.debug("Authentication Failure: %s" % ex)
@@ -134,15 +138,15 @@ class S3(ErrorHandlingResource):
"""Implementation of an S3-like storage server based on local files."""
def getChild(self, name, request):
request.context = get_context(request)
-
if name == '':
return self
elif name == '_images':
- return ImageResource()
+ return ImagesResource()
else:
return BucketResource(name)
def render_GET(self, request):
+ logging.debug('List of buckets requested')
buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
@@ -164,7 +168,10 @@ class BucketResource(ErrorHandlingResource):
def render_GET(self, request):
logging.debug("List keys for bucket %s" % (self.name))
- bucket_object = bucket.Bucket(self.name)
+ try:
+ bucket_object = bucket.Bucket(self.name)
+ except exception.NotFound, e:
+ return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
raise exception.NotAuthorized
@@ -180,13 +187,10 @@ class BucketResource(ErrorHandlingResource):
def render_PUT(self, request):
logging.debug("Creating bucket %s" % (self.name))
- try:
- print 'user is %s' % request.context
- except Exception as e:
- logging.exception(e)
logging.debug("calling bucket.Bucket.create(%r, %r)" % (self.name, request.context))
bucket.Bucket.create(self.name, request.context)
- return ''
+ request.finish()
+ return server.NOT_DONE_YET
def render_DELETE(self, request):
logging.debug("Deleting bucket %s" % (self.name))
@@ -244,13 +248,19 @@ class ObjectResource(ErrorHandlingResource):
class ImageResource(ErrorHandlingResource):
isLeaf = True
+ def __init__(self, name):
+ Resource.__init__(self)
+ self.img = image.Image(name)
+
+ def render_GET(self, request):
+ return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request)
+
+class ImagesResource(Resource):
def getChild(self, name, request):
if name == '':
return self
else:
- request.setHeader("Content-Type", "application/octet-stream")
- img = image.Image(name)
- return static.File(img.image_path)
+ return ImageResource(name)
def render_GET(self, request):
""" returns a json listing of all images
@@ -312,9 +322,13 @@ class ImageResource(ErrorHandlingResource):
request.setResponseCode(204)
return ''
-def get_application():
+def get_site():
root = S3()
- factory = server.Site(root)
+ site = server.Site(root)
+ return site
+
+def get_application():
+ factory = get_site()
application = service.Application("objectstore")
objectStoreService = internet.TCPServer(FLAGS.s3_port, factory)
objectStoreService.setServiceParent(application)
diff --git a/nova/service.py b/nova/service.py
new file mode 100644
index 000000000..96281bc6b
--- /dev/null
+++ b/nova/service.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Generic Node baseclass for all workers that run on hosts
+"""
+
+import inspect
+import logging
+import os
+
+from twisted.internet import defer
+from twisted.internet import task
+from twisted.application import service
+
+from nova import datastore
+from nova import flags
+from nova import rpc
+from nova.compute import model
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_integer('report_interval', 10,
+ 'seconds between nodes reporting state to cloud',
+ lower_bound=1)
+
+class Service(object, service.Service):
+ """Base class for workers that run on hosts"""
+
+ @classmethod
+ def create(cls,
+ report_interval=None, # defaults to flag
+ bin_name=None, # defaults to basename of executable
+ topic=None): # defaults to basename - "nova-" part
+ """Instantiates class and passes back application object"""
+ if not report_interval:
+ # NOTE(vish): set here because if it is set to flag in the
+ # parameter list, it wrongly uses the default
+ report_interval = FLAGS.report_interval
+ # NOTE(vish): magic to automatically determine bin_name and topic
+ if not bin_name:
+ bin_name = os.path.basename(inspect.stack()[-1][1])
+ if not topic:
+ topic = bin_name.rpartition("nova-")[2]
+ logging.warn("Starting %s node" % topic)
+ node_instance = cls()
+
+ conn = rpc.Connection.instance()
+ consumer_all = rpc.AdapterConsumer(
+ connection=conn,
+ topic='%s' % topic,
+ proxy=node_instance)
+
+ consumer_node = rpc.AdapterConsumer(
+ connection=conn,
+ topic='%s.%s' % (topic, FLAGS.node_name),
+ proxy=node_instance)
+
+ pulse = task.LoopingCall(node_instance.report_state,
+ FLAGS.node_name,
+ bin_name)
+ pulse.start(interval=report_interval, now=False)
+
+ consumer_all.attach_to_twisted()
+ consumer_node.attach_to_twisted()
+
+ # This is the parent service that twistd will be looking for when it
+ # parses this file, return it so that we can get it into globals below
+ application = service.Application(bin_name)
+ node_instance.setServiceParent(application)
+ return application
+
+ @defer.inlineCallbacks
+ def report_state(self, nodename, daemon):
+ # TODO(termie): make this pattern be more elegant. -todd
+ try:
+ record = model.Daemon(nodename, daemon)
+ record.heartbeat()
+ if getattr(self, "model_disconnected", False):
+ self.model_disconnected = False
+ logging.error("Recovered model server connection!")
+
+ except datastore.ConnectionError, ex:
+ if not getattr(self, "model_disconnected", False):
+ self.model_disconnected = True
+ logging.exception("model server went away")
+ yield
diff --git a/nova/test.py b/nova/test.py
index 5dcf0b9b0..6fbcab5e4 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -156,9 +156,9 @@ class BaseTestCase(TrialTestCase):
Example (callback chain, ugly):
- d = self.node.terminate_instance(instance_id) # a Deferred instance
+ d = self.compute.terminate_instance(instance_id) # a Deferred instance
def _describe(_):
- d_desc = self.node.describe_instances() # another Deferred instance
+ d_desc = self.compute.describe_instances() # another Deferred instance
return d_desc
def _checkDescribe(rv):
self.assertEqual(rv, [])
@@ -169,8 +169,8 @@ class BaseTestCase(TrialTestCase):
Example (inline callbacks! yay!):
- yield self.node.terminate_instance(instance_id)
- rv = yield self.node.describe_instances()
+ yield self.compute.terminate_instance(instance_id)
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv, [])
If the test fits the Inline Callbacks pattern we will automatically
diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py
index 6cf7e893d..fa0a090a0 100644
--- a/nova/tests/access_unittest.py
+++ b/nova/tests/access_unittest.py
@@ -22,7 +22,7 @@ import logging
from nova import exception
from nova import flags
from nova import test
-from nova.auth.users import UserManager
+from nova.auth import manager
from nova.auth import rbac
@@ -35,7 +35,7 @@ class AccessTestCase(test.BaseTestCase):
super(AccessTestCase, self).setUp()
FLAGS.connection_type = 'fake'
FLAGS.fake_storage = True
- um = UserManager.instance()
+ um = manager.AuthManager()
# Make test users
try:
self.testadmin = um.create_user('testadmin')
@@ -79,7 +79,7 @@ class AccessTestCase(test.BaseTestCase):
#user is set in each test
def tearDown(self):
- um = UserManager.instance()
+ um = manager.AuthManager()
# Delete the test project
um.delete_project('testproj')
# Delete the test user
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index e5e2afe26..9d072866c 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -26,7 +26,7 @@ from twisted.internet import defer
from nova import flags
from nova import test
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import api
from nova.endpoint import cloud
@@ -43,7 +43,11 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
connection should be a FakeTornadoHttpConnection instance
"""
- headers = httpserver.HTTPHeaders()
+ try:
+ headers = httpserver.HTTPHeaders()
+ except AttributeError:
+ from tornado import httputil
+ headers = httputil.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
@@ -150,7 +154,7 @@ class ApiEc2TestCase(test.BaseTestCase):
def setUp(self):
super(ApiEc2TestCase, self).setUp()
- self.users = users.UserManager.instance()
+ self.manager = manager.AuthManager()
self.cloud = cloud.CloudController()
self.host = '127.0.0.1'
@@ -175,25 +179,22 @@ class ApiEc2TestCase(test.BaseTestCase):
def test_describe_instances(self):
self.expect_http()
self.mox.ReplayAll()
- try:
- self.users.create_user('fake', 'fake', 'fake')
- except Exception, _err:
- pass # User may already exist
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
self.assertEqual(self.ec2.get_all_instances(), [])
- self.users.delete_user('fake')
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
def test_get_all_key_pairs(self):
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
- try:
- self.users.create_user('fake', 'fake', 'fake')
- except Exception, _err:
- pass # User may already exist
- self.users.generate_key_pair('fake', keyname)
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+ self.manager.generate_key_pair(user.id, keyname)
rv = self.ec2.get_all_key_pairs()
self.assertTrue(filter(lambda k: k.name == keyname, rv))
- self.users.delete_user('fake')
-
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
diff --git a/nova/tests/users_unittest.py b/nova/tests/auth_unittest.py
index 824d5cff6..073ff71d2 100644
--- a/nova/tests/users_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -25,41 +25,41 @@ import unittest
from nova import crypto
from nova import flags
from nova import test
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import cloud
FLAGS = flags.FLAGS
-class UserTestCase(test.BaseTestCase):
+class AuthTestCase(test.BaseTestCase):
flush_db = False
def setUp(self):
- super(UserTestCase, self).setUp()
+ super(AuthTestCase, self).setUp()
self.flags(connection_type='fake',
fake_storage=True)
- self.users = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def test_001_can_create_users(self):
- self.users.create_user('test1', 'access', 'secret')
- self.users.create_user('test2')
+ self.manager.create_user('test1', 'access', 'secret')
+ self.manager.create_user('test2')
def test_002_can_get_user(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
def test_003_can_retreive_properties(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
self.assertEqual('test1', user.id)
self.assertEqual('access', user.access)
self.assertEqual('secret', user.secret)
def test_004_signature_is_valid(self):
- #self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? ))
+ #self.assertTrue(self.manager.authenticate( **boto.generate_url ... ? ? ? ))
pass
#raise NotImplementedError
def test_005_can_get_credentials(self):
return
- credentials = self.users.get_user('test1').get_credentials()
+ credentials = self.manager.get_user('test1').get_credentials()
self.assertEqual(credentials,
'export EC2_ACCESS_KEY="access"\n' +
'export EC2_SECRET_KEY="secret"\n' +
@@ -68,14 +68,14 @@ class UserTestCase(test.BaseTestCase):
'export EC2_USER_ID="test1"\n')
def test_006_test_key_storage(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
user.create_key_pair('public', 'key', 'fingerprint')
key = user.get_key_pair('public')
self.assertEqual('key', key.public_key)
self.assertEqual('fingerprint', key.fingerprint)
def test_007_test_key_generation(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
private_key, fingerprint = user.generate_key_pair('public2')
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
@@ -87,71 +87,71 @@ class UserTestCase(test.BaseTestCase):
converted.split(" ")[1].strip())
def test_008_can_list_key_pairs(self):
- keys = self.users.get_user('test1').get_key_pairs()
+ keys = self.manager.get_user('test1').get_key_pairs()
self.assertTrue(filter(lambda k: k.name == 'public', keys))
self.assertTrue(filter(lambda k: k.name == 'public2', keys))
def test_009_can_delete_key_pair(self):
- self.users.get_user('test1').delete_key_pair('public')
- keys = self.users.get_user('test1').get_key_pairs()
+ self.manager.get_user('test1').delete_key_pair('public')
+ keys = self.manager.get_user('test1').get_key_pairs()
self.assertFalse(filter(lambda k: k.name == 'public', keys))
def test_010_can_list_users(self):
- users = self.users.get_users()
+ users = self.manager.get_users()
logging.warn(users)
self.assertTrue(filter(lambda u: u.id == 'test1', users))
def test_101_can_add_user_role(self):
- self.assertFalse(self.users.has_role('test1', 'itsec'))
- self.users.add_role('test1', 'itsec')
- self.assertTrue(self.users.has_role('test1', 'itsec'))
+ self.assertFalse(self.manager.has_role('test1', 'itsec'))
+ self.manager.add_role('test1', 'itsec')
+ self.assertTrue(self.manager.has_role('test1', 'itsec'))
def test_199_can_remove_user_role(self):
- self.assertTrue(self.users.has_role('test1', 'itsec'))
- self.users.remove_role('test1', 'itsec')
- self.assertFalse(self.users.has_role('test1', 'itsec'))
+ self.assertTrue(self.manager.has_role('test1', 'itsec'))
+ self.manager.remove_role('test1', 'itsec')
+ self.assertFalse(self.manager.has_role('test1', 'itsec'))
def test_201_can_create_project(self):
- project = self.users.create_project('testproj', 'test1', 'A test project', ['test1'])
- self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
+ project = self.manager.create_project('testproj', 'test1', 'A test project', ['test1'])
+ self.assertTrue(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
self.assertEqual(project.name, 'testproj')
self.assertEqual(project.description, 'A test project')
self.assertEqual(project.project_manager_id, 'test1')
self.assertTrue(project.has_member('test1'))
def test_202_user1_is_project_member(self):
- self.assertTrue(self.users.get_user('test1').is_project_member('testproj'))
+ self.assertTrue(self.manager.get_user('test1').is_project_member('testproj'))
def test_203_user2_is_not_project_member(self):
- self.assertFalse(self.users.get_user('test2').is_project_member('testproj'))
+ self.assertFalse(self.manager.get_user('test2').is_project_member('testproj'))
def test_204_user1_is_project_manager(self):
- self.assertTrue(self.users.get_user('test1').is_project_manager('testproj'))
+ self.assertTrue(self.manager.get_user('test1').is_project_manager('testproj'))
def test_205_user2_is_not_project_manager(self):
- self.assertFalse(self.users.get_user('test2').is_project_manager('testproj'))
+ self.assertFalse(self.manager.get_user('test2').is_project_manager('testproj'))
def test_206_can_add_user_to_project(self):
- self.users.add_to_project('test2', 'testproj')
- self.assertTrue(self.users.get_project('testproj').has_member('test2'))
+ self.manager.add_to_project('test2', 'testproj')
+ self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
def test_208_can_remove_user_from_project(self):
- self.users.remove_from_project('test2', 'testproj')
- self.assertFalse(self.users.get_project('testproj').has_member('test2'))
+ self.manager.remove_from_project('test2', 'testproj')
+ self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
def test_209_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
self.cloud.setup()
- private_key, signed_cert_string = self.users.get_project('testproj').generate_x509_cert('test1')
- logging.debug(signed_cert_string)
+ _key, cert_str = self.manager._generate_x509_cert('test1', 'testproj')
+ logging.debug(cert_str)
# Need to verify that it's signed by the right intermediate CA
full_chain = crypto.fetch_ca(project_id='testproj', chain=True)
int_cert = crypto.fetch_ca(project_id='testproj', chain=False)
cloud_cert = crypto.fetch_ca()
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
- signed_cert = X509.load_cert_string(signed_cert_string)
+ signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
cloud_cert = X509.load_cert_string(cloud_cert)
@@ -164,42 +164,45 @@ class UserTestCase(test.BaseTestCase):
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
def test_210_can_add_project_role(self):
- project = self.users.get_project('testproj')
+ project = self.manager.get_project('testproj')
self.assertFalse(project.has_role('test1', 'sysadmin'))
- self.users.add_role('test1', 'sysadmin')
+ self.manager.add_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
project.add_role('test1', 'sysadmin')
self.assertTrue(project.has_role('test1', 'sysadmin'))
def test_211_can_remove_project_role(self):
- project = self.users.get_project('testproj')
+ project = self.manager.get_project('testproj')
self.assertTrue(project.has_role('test1', 'sysadmin'))
project.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
- self.users.remove_role('test1', 'sysadmin')
+ self.manager.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
def test_212_vpn_ip_and_port_looks_valid(self):
- project = self.users.get_project('testproj')
+ project = self.manager.get_project('testproj')
self.assert_(project.vpn_ip)
self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
def test_213_too_many_vpns(self):
- for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
- users.Vpn.create("vpnuser%s" % i)
- self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom")
+ vpns = []
+ for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
+ vpns.append(manager.Vpn.create("vpnuser%s" % i))
+ self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
+ for vpn in vpns:
+ vpn.destroy()
def test_299_can_delete_project(self):
- self.users.delete_project('testproj')
- self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
+ self.manager.delete_project('testproj')
+ self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
def test_999_can_delete_users(self):
- self.users.delete_user('test1')
- users = self.users.get_users()
+ self.manager.delete_user('test1')
+ users = self.manager.get_users()
self.assertFalse(filter(lambda u: u.id == 'test1', users))
- self.users.delete_user('test2')
- self.assertEqual(self.users.get_user('test2'), None)
+ self.manager.delete_user('test2')
+ self.assertEqual(self.manager.get_user('test2'), None)
if __name__ == "__main__":
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 8040f6331..40837405c 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -27,8 +27,8 @@ from xml.etree import ElementTree
from nova import flags
from nova import rpc
from nova import test
-from nova.auth import users
-from nova.compute import node
+from nova.auth import manager
+from nova.compute import service
from nova.endpoint import api
from nova.endpoint import cloud
@@ -40,8 +40,7 @@ class CloudTestCase(test.BaseTestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.flags(connection_type='fake',
- fake_storage=True,
- fake_users=True)
+ fake_storage=True)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
@@ -53,34 +52,34 @@ class CloudTestCase(test.BaseTestCase):
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
- # set up a node
- self.node = node.Node()
- self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
+ # set up a service
+ self.compute = service.ComputeService()
+ self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
- proxy=self.node)
- self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
+ proxy=self.compute)
+ self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
try:
- users.UserManager.instance().create_user('admin', 'admin', 'admin')
+ manager.AuthManager().create_user('admin', 'admin', 'admin')
except: pass
- admin = users.UserManager.instance().get_user('admin')
- project = users.UserManager.instance().create_project('proj', 'admin', 'proj')
+ admin = manager.AuthManager().get_user('admin')
+ project = manager.AuthManager().create_project('proj', 'admin', 'proj')
self.context = api.APIRequestContext(handler=None,project=project,user=admin)
def tearDown(self):
- users.UserManager.instance().delete_project('proj')
- users.UserManager.instance().delete_user('admin')
+ manager.AuthManager().delete_project('proj')
+ manager.AuthManager().delete_user('admin')
def test_console_output(self):
if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
return
instance_id = 'foo'
- inst = yield self.node.run_instance(instance_id)
+ inst = yield self.compute.run_instance(instance_id)
output = yield self.cloud.get_console_output(self.context, [instance_id])
logging.debug(output)
self.assert_(output)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
@@ -112,7 +111,7 @@ class CloudTestCase(test.BaseTestCase):
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
logging.debug("Terminating instance %s" % instance['instance_id'])
- rv = yield self.node.terminate_instance(instance['instance_id'])
+ rv = yield self.compute.terminate_instance(instance['instance_id'])
def test_instance_update_state(self):
def instance(num):
diff --git a/nova/tests/node_unittest.py b/nova/tests/compute_unittest.py
index 86d9775fd..da0f82e3a 100644
--- a/nova/tests/node_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -26,7 +26,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
-from nova.compute import node
+from nova.compute import service
FLAGS = flags.FLAGS
@@ -53,14 +53,13 @@ class InstanceXmlTestCase(test.TrialTestCase):
# rv = yield first_node.terminate_instance(instance_id)
-class NodeConnectionTestCase(test.TrialTestCase):
+class ComputeConnectionTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
- super(NodeConnectionTestCase, self).setUp()
+ super(ComputeConnectionTestCase, self).setUp()
self.flags(connection_type='fake',
- fake_storage=True,
- fake_users=True)
- self.node = node.Node()
+ fake_storage=True)
+ self.compute = service.ComputeService()
def create_instance(self):
instdir = model.InstanceDirectory()
@@ -81,48 +80,48 @@ class NodeConnectionTestCase(test.TrialTestCase):
def test_run_describe_terminate(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
logging.info("Running instances: %s", rv)
self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
logging.info("After terminating instances: %s", rv)
self.assertEqual(rv, {})
@defer.inlineCallbacks
def test_reboot(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- yield self.node.reboot_instance(instance_id)
+ yield self.compute.reboot_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_console_output(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- console = yield self.node.get_console_output(instance_id)
+ console = yield self.compute.get_console_output(instance_id)
self.assert_(console)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- self.assertRaises(exception.Error, self.node.run_instance, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 5fcd2bcac..a7310fb26 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -24,5 +24,5 @@ FLAGS.connection_type = 'fake'
FLAGS.fake_storage = True
FLAGS.fake_rabbit = True
FLAGS.fake_network = True
-FLAGS.fake_users = True
+FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.verbose = True
diff --git a/nova/tests/future_unittest.py b/nova/tests/future_unittest.py
deleted file mode 100644
index 31ec83065..000000000
--- a/nova/tests/future_unittest.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-import mox
-import StringIO
-import time
-from tornado import ioloop
-from twisted.internet import defer
-import unittest
-from xml.etree import ElementTree
-
-from nova import cloud
-from nova import exception
-from nova import flags
-from nova import node
-from nova import rpc
-from nova import test
-
-
-FLAGS = flags.FLAGS
-
-
-class AdminTestCase(test.BaseTestCase):
- def setUp(self):
- super(AdminTestCase, self).setUp()
- self.flags(connection_type='fake',
- fake_rabbit=True)
-
- self.conn = rpc.Connection.instance()
-
- logging.getLogger().setLevel(logging.INFO)
-
- # set up our cloud
- self.cloud = cloud.CloudController()
- self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
- topic=FLAGS.cloud_topic,
- proxy=self.cloud)
- self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
-
- # set up a node
- self.node = node.Node()
- self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
- topic=FLAGS.compute_topic,
- proxy=self.node)
- self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
-
- def test_flush_terminated(self):
- # Launch an instance
-
- # Wait until it's running
-
- # Terminate it
-
- # Wait until it's terminated
-
- # Flush terminated nodes
-
- # ASSERT that it's gone
- pass
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
index b9eb2ac96..6825cfe2a 100644
--- a/nova/tests/model_unittest.py
+++ b/nova/tests/model_unittest.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from datetime import datetime, timedelta
import logging
import time
from twisted.internet import defer
@@ -25,7 +26,6 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
-from nova.compute import node
FLAGS = flags.FLAGS
@@ -35,8 +35,7 @@ class ModelTestCase(test.TrialTestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
self.flags(connection_type='fake',
- fake_storage=True,
- fake_users=True)
+ fake_storage=True)
def tearDown(self):
model.Instance('i-test').destroy()
@@ -66,6 +65,12 @@ class ModelTestCase(test.TrialTestCase):
daemon.save()
return daemon
+ def create_session_token(self):
+ session_token = model.SessionToken('tk12341234')
+ session_token['user'] = 'testuser'
+ session_token.save()
+ return session_token
+
@defer.inlineCallbacks
def test_create_instance(self):
"""store with create_instace, then test that a load finds it"""
@@ -204,3 +209,91 @@ class ModelTestCase(test.TrialTestCase):
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assertTrue(found)
+
+ @defer.inlineCallbacks
+ def test_create_session_token(self):
+ """create"""
+ d = yield self.create_session_token()
+ d = model.SessionToken(d.token)
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_session_token(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_session_token()
+ yield instance.destroy()
+ newinst = yield model.SessionToken(instance.token)
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_session_token_added_to_set(self):
+ """create, then check that it is included in list"""
+ instance = yield self.create_session_token()
+ found = False
+ for x in model.SessionToken.all():
+ if x.identifier == instance.token:
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_session_token_associates_user(self):
+ """create, then check that it is listed for the user"""
+ instance = yield self.create_session_token()
+ found = False
+ for x in model.SessionToken.associated_to('user', 'testuser'):
+ if x.identifier == instance.identifier:
+ found = True
+ self.assertTrue(found)
+
+ @defer.inlineCallbacks
+ def test_session_token_generation(self):
+ instance = yield model.SessionToken.generate('username', 'TokenType')
+ self.assertFalse(instance.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_find_generated_session_token(self):
+ instance = yield model.SessionToken.generate('username', 'TokenType')
+ found = yield model.SessionToken.lookup(instance.identifier)
+ self.assert_(found)
+
+ def test_update_session_token_expiry(self):
+ instance = model.SessionToken('tk12341234')
+ oldtime = datetime.utcnow()
+ instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT)
+ instance.update_expiry()
+ expiry = utils.parse_isotime(instance['expiry'])
+ self.assert_(expiry > datetime.utcnow())
+
+ @defer.inlineCallbacks
+ def test_session_token_lookup_when_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
+ instance.save()
+ inst = model.SessionToken.lookup(instance.identifier)
+ self.assertFalse(inst)
+
+ @defer.inlineCallbacks
+ def test_session_token_lookup_when_not_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ inst = model.SessionToken.lookup(instance.identifier)
+ self.assert_(inst)
+
+ @defer.inlineCallbacks
+ def test_session_token_is_expired_when_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
+ self.assert_(instance.is_expired())
+
+ @defer.inlineCallbacks
+ def test_session_token_is_expired_when_not_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ self.assertFalse(instance.is_expired())
+
+ @defer.inlineCallbacks
+ def test_session_token_ttl(self):
+ instance = yield model.SessionToken.generate("testuser")
+ now = datetime.utcnow()
+ delta = timedelta(hours=1)
+ instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
+ # give 5 seconds of fuzziness
+ self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 69278e896..f24eefb0d 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -23,7 +23,7 @@ import logging
from nova import flags
from nova import test
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import network
from nova.compute.exception import NoMoreAddresses
@@ -32,66 +32,71 @@ FLAGS = flags.FLAGS
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
super(NetworkTestCase, self).setUp()
+ # NOTE(vish): if you change these flags, make sure to change the
+ # flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_storage=True,
fake_network=True,
+ auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
self.dnsmasq = FakeDNSMasq()
- try:
- self.manager.create_user('netuser', 'netuser', 'netuser')
- except: pass
+ self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
+ self.projects = []
+ self.projects.append(self.manager.create_project('netuser',
+ 'netuser',
+ 'netuser'))
for i in range(0, 6):
name = 'project%s' % i
- if not self.manager.get_project(name):
- self.manager.create_project(name, 'netuser', name)
+ self.projects.append(self.manager.create_project(name,
+ 'netuser',
+ name))
self.network = network.PublicNetworkController()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
- for i in range(0, 6):
- name = 'project%s' % i
- self.manager.delete_project(name)
- self.manager.delete_user('netuser')
+ for project in self.projects:
+ self.manager.delete_project(project)
+ self.manager.delete_user(self.user)
def test_public_network_allocation(self):
pubnet = IPy.IP(flags.FLAGS.public_range)
- address = self.network.allocate_ip("netuser", "project0", "public")
+ address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public")
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_ip(self):
address = network.allocate_ip(
- "netuser", "project0", utils.generate_mac())
+ self.user.id, self.projects[0].id, utils.generate_mac())
logging.debug("Was allocated %s" % (address))
- net = network.get_project_network("project0", "default")
- self.assertEqual(True, is_in_project(address, "project0"))
+ net = network.get_project_network(self.projects[0].id, "default")
+ self.assertEqual(True, is_in_project(address, self.projects[0].id))
mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
# Doesn't go away until it's dhcp released
- self.assertEqual(True, is_in_project(address, "project0"))
+ self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
mac = utils.generate_mac()
secondmac = utils.generate_mac()
hostname = "test-host"
address = network.allocate_ip(
- "netuser", "project0", mac)
+ self.user.id, self.projects[0].id, mac)
secondaddress = network.allocate_ip(
- "netuser", "project1", secondmac)
- net = network.get_project_network("project0", "default")
- secondnet = network.get_project_network("project1", "default")
+ self.user, self.projects[1].id, secondmac)
+ net = network.get_project_network(self.projects[0].id, "default")
+ secondnet = network.get_project_network(self.projects[1].id, "default")
- self.assertEqual(True, is_in_project(address, "project0"))
- self.assertEqual(True, is_in_project(secondaddress, "project1"))
- self.assertEqual(False, is_in_project(address, "project1"))
+ self.assertEqual(True, is_in_project(address, self.projects[0].id))
+ self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
+ self.assertEqual(False, is_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
@@ -100,34 +105,34 @@ class NetworkTestCase(test.TrialTestCase):
rv = network.deallocate_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
- self.assertEqual(True, is_in_project(secondaddress, "project1"))
+ self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
- self.assertEqual(False, is_in_project(secondaddress, "project1"))
+ self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
def test_subnet_edge(self):
- secondaddress = network.allocate_ip("netuser", "project0",
+ secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
utils.generate_mac())
hostname = "toomany-hosts"
- for project in range(1,5):
- project_id = "project%s" % (project)
+ for i in range(1,5):
+ project_id = self.projects[i].id
mac = utils.generate_mac()
mac2 = utils.generate_mac()
mac3 = utils.generate_mac()
address = network.allocate_ip(
- "netuser", project_id, mac)
+ self.user, project_id, mac)
address2 = network.allocate_ip(
- "netuser", project_id, mac2)
+ self.user, project_id, mac2)
address3 = network.allocate_ip(
- "netuser", project_id, mac3)
- self.assertEqual(False, is_in_project(address, "project0"))
- self.assertEqual(False, is_in_project(address2, "project0"))
- self.assertEqual(False, is_in_project(address3, "project0"))
+ self.user, project_id, mac3)
+ self.assertEqual(False, is_in_project(address, self.projects[0].id))
+ self.assertEqual(False, is_in_project(address2, self.projects[0].id))
+ self.assertEqual(False, is_in_project(address3, self.projects[0].id))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
@@ -135,7 +140,7 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
- net = network.get_project_network("project0", "default")
+ net = network.get_project_network(self.projects[0].id, "default")
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
@@ -153,34 +158,36 @@ class NetworkTestCase(test.TrialTestCase):
environment's setup.
Network size is set in test fixture's setUp method.
-
+
There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS)
And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary
services (gateway, CloudPipe, etc)
- So we should get flags.network_size - (NUM_STATIC_IPS +
- NUM_PREALLOCATED_IPS +
+ So we should get flags.network_size - (NUM_STATIC_IPS +
+ NUM_PREALLOCATED_IPS +
NUM_RESERVED_VPN_IPS)
usable addresses
"""
- net = network.get_project_network("project0", "default")
+ net = network.get_project_network(self.projects[0].id, "default")
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
num_preallocated_ips = len(net.hosts.keys())
num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients
- num_available_ips = flags.FLAGS.network_size - (num_static_ips + num_preallocated_ips + num_reserved_vpn_ips)
+ num_available_ips = flags.FLAGS.network_size - (num_static_ips +
+ num_preallocated_ips +
+ num_reserved_vpn_ips)
hostname = "toomany-hosts"
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
macs[i] = utils.generate_mac()
- addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
+ addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
- self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
+ self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
for i in range(0, (num_available_ips - 1)):
rv = network.deallocate_ip(addresses[i])
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index f22256aaf..dd00377e7 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import boto
import glob
import hashlib
import logging
@@ -28,8 +29,13 @@ from nova import objectstore
from nova.objectstore import bucket # for buckets_path flag
from nova.objectstore import image # for images_path flag
from nova import test
-from nova.auth import users
+from nova.auth import manager
+from nova.objectstore.handler import S3
+from nova.exception import NotEmpty, NotFound, NotAuthorized
+from boto.s3.connection import S3Connection, OrdinaryCallingFormat
+from twisted.internet import reactor, threads, defer
+from twisted.web import http, server
FLAGS = flags.FLAGS
@@ -53,13 +59,12 @@ os.makedirs(os.path.join(oss_tempdir, 'buckets'))
class ObjectStoreTestCase(test.BaseTestCase):
def setUp(self):
super(ObjectStoreTestCase, self).setUp()
- self.flags(fake_users=True,
- buckets_path=os.path.join(oss_tempdir, 'buckets'),
+ self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
logging.getLogger().setLevel(logging.DEBUG)
- self.um = users.UserManager.instance()
+ self.um = manager.AuthManager()
try:
self.um.create_user('user1')
except: pass
@@ -98,49 +103,37 @@ class ObjectStoreTestCase(test.BaseTestCase):
# another user is not authorized
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
- self.assert_(bucket.is_authorized(self.context) == False)
+ self.assertFalse(bucket.is_authorized(self.context))
# admin is authorized to use bucket
self.context.user = self.um.get_user('admin_user')
self.context.project = None
- self.assert_(bucket.is_authorized(self.context))
+ self.assertTrue(bucket.is_authorized(self.context))
# new buckets are empty
- self.assert_(bucket.list_keys()['Contents'] == [])
+ self.assertTrue(bucket.list_keys()['Contents'] == [])
# storing keys works
bucket['foo'] = "bar"
- self.assert_(len(bucket.list_keys()['Contents']) == 1)
+ self.assertEquals(len(bucket.list_keys()['Contents']), 1)
- self.assert_(bucket['foo'].read() == 'bar')
+ self.assertEquals(bucket['foo'].read(), 'bar')
# md5 of key works
- self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest())
-
- # deleting non-empty bucket throws exception
- exception = False
- try:
- bucket.delete()
- except:
- exception = True
+ self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
- self.assert_(exception)
+ # deleting non-empty bucket should throw a NotEmpty exception
+ self.assertRaises(NotEmpty, bucket.delete)
# deleting key
del bucket['foo']
- # deleting empty button
+ # deleting empty bucket
bucket.delete()
# accessing deleted bucket throws exception
- exception = False
- try:
- objectstore.bucket.Bucket('new_bucket')
- except:
- exception = True
-
- self.assert_(exception)
+ self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
self.context.user = self.um.get_user('user1')
@@ -169,37 +162,108 @@ class ObjectStoreTestCase(test.BaseTestCase):
# verify image permissions
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
- self.assert_(my_img.is_authorized(self.context) == False)
-
-# class ApiObjectStoreTestCase(test.BaseTestCase):
-# def setUp(self):
-# super(ApiObjectStoreTestCase, self).setUp()
-# FLAGS.fake_users = True
-# FLAGS.buckets_path = os.path.join(tempdir, 'buckets')
-# FLAGS.images_path = os.path.join(tempdir, 'images')
-# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA')
-#
-# self.users = users.UserManager.instance()
-# self.app = handler.Application(self.users)
-#
-# self.host = '127.0.0.1'
-#
-# self.conn = boto.s3.connection.S3Connection(
-# aws_access_key_id=user.access,
-# aws_secret_access_key=user.secret,
-# is_secure=False,
-# calling_format=boto.s3.connection.OrdinaryCallingFormat(),
-# port=FLAGS.s3_port,
-# host=FLAGS.s3_host)
-#
-# self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
-#
-# def tearDown(self):
-# FLAGS.Reset()
-# super(ApiObjectStoreTestCase, self).tearDown()
-#
-# def test_describe_instances(self):
-# self.expect_http()
-# self.mox.ReplayAll()
-#
-# self.assertEqual(self.ec2.get_all_instances(), [])
+ self.assertFalse(my_img.is_authorized(self.context))
+
+
+class TestHTTPChannel(http.HTTPChannel):
+ # Otherwise we end up with an unclean reactor
+ def checkPersistence(self, _, __):
+ return False
+
+
+class TestSite(server.Site):
+ protocol = TestHTTPChannel
+
+
+class S3APITestCase(test.TrialTestCase):
+ def setUp(self):
+ super(S3APITestCase, self).setUp()
+
+ FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
+ FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets')
+
+ self.um = manager.AuthManager()
+ self.admin_user = self.um.create_user('admin', admin=True)
+ self.admin_project = self.um.create_project('admin', self.admin_user)
+
+ shutil.rmtree(FLAGS.buckets_path)
+ os.mkdir(FLAGS.buckets_path)
+
+ root = S3()
+ self.site = TestSite(root)
+ self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1')
+ self.tcp_port = self.listening_port.getHost().port
+
+
+ if not boto.config.has_section('Boto'):
+ boto.config.add_section('Boto')
+ boto.config.set('Boto', 'num_retries', '0')
+ self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
+ aws_secret_access_key=self.admin_user.secret,
+ host='127.0.0.1',
+ port=self.tcp_port,
+ is_secure=False,
+ calling_format=OrdinaryCallingFormat())
+
+ # Don't attempt to reuse connections
+ def get_http_connection(host, is_secure):
+ return self.conn.new_http_connection(host, is_secure)
+ self.conn.get_http_connection = get_http_connection
+
+ def _ensure_empty_list(self, l):
+ self.assertEquals(len(l), 0, "List was not empty")
+ return True
+
+ def _ensure_only_bucket(self, l, name):
+ self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
+ self.assertEquals(l[0].name, name, "Wrong name")
+
+ def test_000_list_buckets(self):
+ d = threads.deferToThread(self.conn.get_all_buckets)
+ d.addCallback(self._ensure_empty_list)
+ return d
+
+ def test_001_create_and_delete_bucket(self):
+ bucket_name = 'testbucket'
+
+ d = threads.deferToThread(self.conn.create_bucket, bucket_name)
+ d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
+
+ def ensure_only_bucket(l, name):
+ self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
+ self.assertEquals(l[0].name, name, "Wrong name")
+ d.addCallback(ensure_only_bucket, bucket_name)
+
+ d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name))
+ d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
+ d.addCallback(self._ensure_empty_list)
+ return d
+
+ def test_002_create_bucket_and_key_and_delete_key_again(self):
+ bucket_name = 'testbucket'
+ key_name = 'somekey'
+ key_contents = 'somekey'
+
+ d = threads.deferToThread(self.conn.create_bucket, bucket_name)
+ d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name))
+ d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents))
+ def ensure_key_contents(bucket_name, key_name, contents):
+ bucket = self.conn.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ self.assertEquals(key.get_contents_as_string(), contents, "Bad contents")
+ d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents))
+ def delete_key(bucket_name, key_name):
+ bucket = self.conn.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ key.delete()
+ d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name))
+ d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name))
+ d.addCallback(lambda b:threads.deferToThread(b.get_all_keys))
+ d.addCallback(self._ensure_empty_list)
+ return d
+
+ def tearDown(self):
+ self.um.delete_user('admin')
+ self.um.delete_project('admin')
+ return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)])
+ super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py
index 690fb640a..121f4eb41 100644
--- a/nova/tests/real_flags.py
+++ b/nova/tests/real_flags.py
@@ -24,5 +24,4 @@ FLAGS.connection_type = 'libvirt'
FLAGS.fake_storage = False
FLAGS.fake_rabbit = False
FLAGS.fake_network = False
-FLAGS.fake_users = False
FLAGS.verbose = False
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
new file mode 100644
index 000000000..b536ac383
--- /dev/null
+++ b/nova/tests/volume_unittest.py
@@ -0,0 +1,115 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import test
+from nova.volume import service as volume_service
+
+
+FLAGS = flags.FLAGS
+
+
+class VolumeTestCase(test.TrialTestCase):
+ def setUp(self):
+ logging.getLogger().setLevel(logging.DEBUG)
+ super(VolumeTestCase, self).setUp()
+ self.compute = compute.service.ComputeService()
+ self.volume = None
+ self.flags(connection_type='fake',
+ fake_storage=True)
+ self.volume = volume_service.VolumeService()
+
+ def test_run_create_volume(self):
+ vol_size = '0'
+ user_id = 'fake'
+ project_id = 'fake'
+ volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+ # TODO(termie): get_volume returns differently than create_volume
+ self.assertEqual(volume_id,
+ volume_service.get_volume(volume_id)['volume_id'])
+
+ rv = self.volume.delete_volume(volume_id)
+ self.assertRaises(exception.Error,
+ volume_service.get_volume,
+ volume_id)
+
+ def test_too_big_volume(self):
+ vol_size = '1001'
+ user_id = 'fake'
+ project_id = 'fake'
+ self.assertRaises(TypeError,
+ self.volume.create_volume,
+ vol_size, user_id, project_id)
+
+ def test_too_many_volumes(self):
+ vol_size = '1'
+ user_id = 'fake'
+ project_id = 'fake'
+ num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
+ total_slots = FLAGS.slots_per_shelf * num_shelves
+ vols = []
+ for i in xrange(total_slots):
+ vid = self.volume.create_volume(vol_size, user_id, project_id)
+ vols.append(vid)
+ self.assertRaises(volume_service.NoMoreVolumes,
+ self.volume.create_volume,
+ vol_size, user_id, project_id)
+ for id in vols:
+ self.volume.delete_volume(id)
+
+ def test_run_attach_detach_volume(self):
+ # Create one volume and one compute to test with
+ instance_id = "storage-test"
+ vol_size = "5"
+ user_id = "fake"
+ project_id = 'fake'
+ mountpoint = "/dev/sdf"
+ volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+
+ volume_obj = volume_service.get_volume(volume_id)
+ volume_obj.start_attach(instance_id, mountpoint)
+ rv = yield self.compute.attach_volume(volume_id,
+ instance_id,
+ mountpoint)
+ self.assertEqual(volume_obj['status'], "in-use")
+ self.assertEqual(volume_obj['attachStatus'], "attached")
+ self.assertEqual(volume_obj['instance_id'], instance_id)
+ self.assertEqual(volume_obj['mountpoint'], mountpoint)
+
+ self.assertRaises(exception.Error,
+ self.volume.delete_volume,
+ volume_id)
+
+ rv = yield self.volume.detach_volume(volume_id)
+ volume_obj = volume_service.get_volume(volume_id)
+ self.assertEqual(volume_obj['status'], "available")
+
+ rv = self.volume.delete_volume(volume_id)
+ self.assertRaises(exception.Error,
+ volume_service.get_volume,
+ volume_id)
+
+ def test_multi_node(self):
+ # TODO(termie): Figure out how to test with two nodes,
+ # each of them having a different FLAG for storage_node
+ # This will allow us to test cross-node interactions
+ pass
diff --git a/nova/twistd.py b/nova/twistd.py
index 32a46ce03..ecb6e2892 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import logging
-import logging.handlers
import os
import signal
import sys
@@ -32,7 +31,6 @@ from twisted.python import log
from twisted.python import reflect
from twisted.python import runtime
from twisted.python import usage
-import UserDict
from nova import flags
@@ -161,6 +159,13 @@ def WrapTwistedOptions(wrapped):
except (AttributeError, KeyError):
self._data[key] = value
+ def get(self, key, default):
+ key = key.replace('-', '_')
+ try:
+ return getattr(FLAGS, key)
+ except (AttributeError, KeyError):
+ self._data.get(key, default)
+
return TwistedOptionsToFlags
@@ -209,9 +214,14 @@ def serve(filename):
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
-
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
+ elif FLAGS.logfile.endswith('twistd.log'):
+ FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
+ if not FLAGS.prefix:
+ FLAGS.prefix = name
+ elif FLAGS.prefix.endswith('twisted'):
+ FLAGS.prefix = FLAGS.prefix.replace('twisted', name)
action = 'start'
if len(argv) > 1:
@@ -228,8 +238,16 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- formatter = logging.Formatter(
- name + '(%(name)s): %(levelname)s %(message)s')
+ class NoNewlineFormatter(logging.Formatter):
+ """Strips newlines from default formatter"""
+ def format(self, record):
+ """Grabs default formatter's output and strips newlines"""
+ data = logging.Formatter.format(self, record)
+ return data.replace("\n", "--")
+
+ # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well
+ formatter = NoNewlineFormatter(
+ '(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
@@ -239,11 +257,6 @@ def serve(filename):
else:
logging.getLogger().setLevel(logging.WARNING)
- if FLAGS.syslog:
- syslog = logging.handlers.SysLogHandler(address='/dev/log')
- syslog.setFormatter(formatter)
- logging.getLogger().addHandler(syslog)
-
logging.debug("Full set of FLAGS:")
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
diff --git a/nova/utils.py b/nova/utils.py
index 9ecceafe0..a1eb0a092 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -20,7 +20,7 @@
System-level utilities and helper functions.
"""
-from datetime import datetime
+from datetime import datetime, timedelta
import inspect
import logging
import os
@@ -32,7 +32,7 @@ import sys
from nova import flags
FLAGS = flags.FLAGS
-
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
@@ -118,4 +118,7 @@ def get_my_ip():
def isotime(at=None):
if not at:
at = datetime.utcnow()
- return at.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return at.strftime(TIME_FORMAT)
+
+def parse_isotime(timestr):
+ return datetime.strptime(timestr, TIME_FORMAT)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 12338fd80..92210e242 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -22,9 +22,11 @@ Handling of VM disk images.
"""
import os.path
+import time
from nova import flags
from nova import process
+from nova.auth import signer
FLAGS = flags.FLAGS
@@ -32,18 +34,34 @@ flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
-def fetch(image, path):
+def fetch(image, path, user):
if FLAGS.use_s3:
f = _fetch_s3_image
else:
f = _fetch_local_image
- return f(image, path)
+ return f(image, path, user)
-def _fetch_s3_image(image, path):
+def _fetch_s3_image(image, path, user):
url = _image_url('%s/image' % image)
- return process.simple_execute('curl --silent %s -o %s' % (url, path))
-def _fetch_local_image(image, path):
+ # This should probably move somewhere else, like e.g. a download_as
+ # method on User objects and at the same time get rewritten to use
+ # twisted web client.
+ headers = {}
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
+
+ uri = '/' + url.partition('/')[2]
+ auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri)
+ headers['Authorization'] = 'AWS %s:%s' % (user.access, auth)
+
+ cmd = ['/usr/bin/curl', '--silent', url]
+ for (k,v) in headers.iteritems():
+ cmd += ['-H', '%s: %s' % (k,v)]
+
+ cmd += ['-o', path]
+ return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
+
+def _fetch_local_image(image, path, _):
source = _image_path('%s/image' % image)
return process.simple_execute('cp %s %s' % (source, path))
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 2c34711bc..c545e4190 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -34,6 +34,7 @@ from nova import exception
from nova import flags
from nova import process
from nova import utils
+from nova.auth import manager
from nova.compute import disk
from nova.compute import instance_types
from nova.compute import power_state
@@ -185,12 +186,13 @@ class LibvirtConnection(object):
f.write(libvirt_xml)
f.close()
+ user = manager.AuthManager().get_user(data['user_id'])
if not os.path.exists(basepath('disk')):
- yield images.fetch(data['image_id'], basepath('disk-raw'))
+ yield images.fetch(data['image_id'], basepath('disk-raw'), user)
if not os.path.exists(basepath('kernel')):
- yield images.fetch(data['kernel_id'], basepath('kernel'))
+ yield images.fetch(data['kernel_id'], basepath('kernel'), user)
if not os.path.exists(basepath('ramdisk')):
- yield images.fetch(data['ramdisk_id'], basepath('ramdisk'))
+ yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user)
execute = lambda cmd, input=None: \
process.simple_execute(cmd=cmd,
diff --git a/nova/volume/storage.py b/nova/volume/service.py
index de20f30b5..87a47f40a 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/service.py
@@ -28,13 +28,14 @@ import os
import shutil
import socket
import tempfile
-import time
-from tornado import ioloop
+
from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
+from nova import process
+from nova import service
from nova import utils
from nova import validate
@@ -48,13 +49,13 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('storage_name',
socket.gethostname(),
- 'name of this node')
+ 'name of this service')
flags.DEFINE_integer('first_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10,
- 'AoE starting shelf_id for this node')
+ 'AoE starting shelf_id for this service')
flags.DEFINE_integer('last_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10 + 9,
- 'AoE starting shelf_id for this node')
+ 'AoE starting shelf_id for this service')
flags.DEFINE_string('aoe_export_dir',
'/var/lib/vblade-persist/vblades',
'AoE directory where exports are created')
@@ -63,7 +64,7 @@ flags.DEFINE_integer('slots_per_shelf',
'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
- 'availability zone of this node')
+ 'availability zone of this service')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
@@ -80,14 +81,14 @@ def get_volume(volume_id):
return volume_class(volume_id=volume_id)
raise exception.Error("Volume does not exist")
-class BlockStore(object):
+class VolumeService(service.Service):
"""
- There is one BlockStore running on each volume node.
- However, each BlockStore can report on the state of
+ There is one VolumeNode running on each host.
+ However, each VolumeNode can report on the state of
*all* volumes in the cluster.
"""
def __init__(self):
- super(BlockStore, self).__init__()
+ super(VolumeService, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
FLAGS.aoe_export_dir = tempfile.mkdtemp()
@@ -102,10 +103,6 @@ class BlockStore(object):
except Exception, err:
pass
- def report_state(self):
- #TODO: aggregate the state of the system
- pass
-
@validate.rangetest(size=(0, 1000))
def create_volume(self, size, user_id, project_id):
"""
@@ -143,17 +140,24 @@ class BlockStore(object):
datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
return True
+ @defer.inlineCallbacks
def _restart_exports(self):
if FLAGS.fake_storage:
return
- utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all")
- utils.runthis("Starting all exports: %s", "sudo vblade-persist start all")
+ yield process.simple_execute(
+ "sudo vblade-persist auto all")
+ yield process.simple_execute(
+ "sudo vblade-persist start all")
+ @defer.inlineCallbacks
def _init_volume_group(self):
if FLAGS.fake_storage:
return
- utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
- utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
+ yield process.simple_execute(
+ "sudo pvcreate %s" % (FLAGS.storage_dev))
+ yield process.simple_execute(
+ "sudo vgcreate %s %s" % (FLAGS.volume_group,
+ FLAGS.storage_dev))
class Volume(datastore.BasicModel):
@@ -227,15 +231,22 @@ class Volume(datastore.BasicModel):
self._delete_lv()
super(Volume, self).destroy()
+ @defer.inlineCallbacks
def create_lv(self):
if str(self['size']) == '0':
sizestr = '100M'
else:
sizestr = '%sG' % self['size']
- utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group))
+ yield process.simple_execute(
+ "sudo lvcreate -L %s -n %s %s" % (sizestr,
+ self['volume_id'],
+ FLAGS.volume_group))
+ @defer.inlineCallbacks
def _delete_lv(self):
- utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
+ yield process.simple_execute(
+ "sudo lvremove -f %s/%s" % (FLAGS.volume_group,
+ self['volume_id']))
def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
@@ -245,8 +256,9 @@ class Volume(datastore.BasicModel):
self.save()
self._exec_export()
+ @defer.inlineCallbacks
def _exec_export(self):
- utils.runthis("Creating AOE export: %s",
+ yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(self['shelf_id'],
self['blade_id'],
@@ -254,9 +266,14 @@ class Volume(datastore.BasicModel):
FLAGS.volume_group,
self['volume_id']))
+ @defer.inlineCallbacks
def _remove_export(self):
- utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
- utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']))
+ yield process.simple_execute(
+ "sudo vblade-persist stop %s %s" % (self['shelf_id'],
+ self['blade_id']))
+ yield process.simple_execute(
+ "sudo vblade-persist destroy %s %s" % (self['shelf_id'],
+ self['blade_id']))
class FakeVolume(Volume):