summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorTodd Willey <todd@rubidine.com>2010-07-14 19:32:07 -0400
committerTodd Willey <todd@rubidine.com>2010-07-14 19:32:07 -0400
commitb01f71ae7ba387a5e7ff2c6ad4a4775f9ab6e22b (patch)
treeb44acd06b38171b1492fe44a536c2b067b163b79 /nova
parent6bba6d3b7c63054acf7828cba7eb87b0f1d84d1c (diff)
parent5e8337aec03f5a697c90779eb66a457aae4e7ae0 (diff)
Merge branch 'master' into apply_api
Conflicts: nova/compute/node.py nova/volume/storage.py
Diffstat (limited to 'nova')
-rw-r--r--nova/auth/fakeldap.py267
-rwxr-xr-xnova/auth/slap.sh6
-rw-r--r--nova/auth/users.py232
-rw-r--r--nova/cloudpipe/pipelib.py5
-rw-r--r--nova/compute/disk.py4
-rw-r--r--nova/compute/node.py14
-rw-r--r--nova/crypto.py2
-rw-r--r--nova/endpoint/cloud.py28
-rw-r--r--nova/flags.py3
-rw-r--r--nova/rpc.py28
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/storage_unittest.py17
-rw-r--r--nova/volume/storage.py84
13 files changed, 454 insertions, 238 deletions
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 27dde314d..116fcbb78 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -18,128 +18,207 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
- Fake LDAP server for test harnesses.
+Fake LDAP server for test harnesses.
+
+This class does very little error checking, and knows nothing about ldap
+class definitions. It implements the minimum emulation of the python ldap
+library to work with nova.
"""
-import logging
+import json
from nova import datastore
-SCOPE_SUBTREE = 1
+
+SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
-SUBS = {
- 'groupOfNames': ['novaProject']
-}
-
class NO_SUCH_OBJECT(Exception):
pass
+class OBJECT_CLASS_VIOLATION(Exception):
+ pass
+
+
def initialize(uri):
- return FakeLDAP(uri)
+ return FakeLDAP()
+
+
+def _match_query(query, attrs):
+ """Match an ldap query to an attribute dictionary.
+
+ &, |, and ! are supported in the query. No syntax checking is performed,
+ so malformed querys will not work correctly.
+
+ """
+ # cut off the parentheses
+ inner = query[1:-1]
+ if inner.startswith('&'):
+ # cut off the &
+ l, r = _paren_groups(inner[1:])
+ return _match_query(l, attrs) and _match_query(r, attrs)
+ if inner.startswith('|'):
+ # cut off the |
+ l, r = _paren_groups(inner[1:])
+ return _match_query(l, attrs) or _match_query(r, attrs)
+ if inner.startswith('!'):
+ # cut off the ! and the nested parentheses
+ return not _match_query(query[2:-1], attrs)
+
+ (k, sep, v) = inner.partition('=')
+ return _match(k, v, attrs)
+
+
+def _paren_groups(source):
+ """Split a string into parenthesized groups."""
+ count = 0
+ start = 0
+ result = []
+ for pos in xrange(len(source)):
+ if source[pos] == '(':
+ if count == 0:
+ start = pos
+ count += 1
+ if source[pos] == ')':
+ count -= 1
+ if count == 0:
+ result.append(source[start:pos+1])
+ return result
+
+
+def _match(k, v, attrs):
+ """Match a given key and value against an attribute list."""
+ if k not in attrs:
+ return False
+ if k != "objectclass":
+ return v in attrs[k]
+ # it is an objectclass check, so check subclasses
+ values = _subs(v)
+ for value in values:
+ if value in attrs[k]:
+ return True
+ return False
+
+
+def _subs(value):
+ """Returns a list of subclass strings.
+
+ The strings represent the ldap objectclass plus any subclasses that
+ inherit from it. Fakeldap doesn't know about the ldap object structure,
+ so subclasses need to be defined manually in the dictionary below.
+
+ """
+ subs = {'groupOfNames': ['novaProject']}
+ if value in subs:
+ return [value] + subs[value]
+ return [value]
+
+
+def _from_json(encoded):
+ """Convert attribute values from json representation.
+
+ Args:
+ encoded -- a json encoded string
+
+ Returns a list of strings
+
+ """
+ return [str(x) for x in json.loads(encoded)]
+
+
+def _to_json(unencoded):
+ """Convert attribute values into json representation.
+
+ Args:
+ unencoded -- an unencoded string or list of strings. If it
+ is a single string, it will be converted into a list.
+
+ Returns a json string
+
+ """
+ return json.dumps(list(unencoded))
class FakeLDAP(object):
- def __init__(self, _uri):
- self.keeper = datastore.Keeper('fakeldap')
- if self.keeper['objects'] is None:
- self.keeper['objects'] = {}
+ #TODO(vish): refactor this class to use a wrapper instead of accessing
+ # redis directly
def simple_bind_s(self, dn, password):
+ """This method is ignored, but provided for compatibility."""
pass
def unbind_s(self):
+ """This method is ignored, but provided for compatibility."""
pass
- def _paren_groups(self, source):
- count = 0
- start = 0
- result = []
- for pos in xrange(len(source)):
- if source[pos] == '(':
- if count == 0:
- start = pos
- count += 1
- if source[pos] == ')':
- count -= 1
- if count == 0:
- result.append(source[start:pos+1])
-
- def _match_query(self, query, attrs):
- inner = query[1:-1]
- if inner.startswith('&'):
- l, r = self._paren_groups(inner[1:])
- return self._match_query(l, attrs) and self._match_query(r, attrs)
- if inner.startswith('|'):
- l, r = self._paren_groups(inner[1:])
- return self._match_query(l, attrs) or self._match_query(r, attrs)
- if inner.startswith('!'):
- return not self._match_query(query[2:-1], attrs)
-
- (k, sep, v) = inner.partition('=')
- return self._match(k, v, attrs)
-
- def _subs(self, v):
- if v in SUBS:
- return [v] + SUBS[v]
- return [v]
-
- def _match(self, k, v, attrs):
- if attrs.has_key(k):
- for v in self._subs(v):
- if (v in attrs[k]):
- return True
- return False
+ def add_s(self, dn, attr):
+ """Add an object with the specified attributes at dn."""
+ key = "%s%s" % (self.__redis_prefix, dn)
- def search_s(self, dn, scope, query=None, fields=None):
- #logging.debug("searching for %s" % dn)
- filtered = {}
- d = self.keeper['objects'] or {}
- for cn, attrs in d.iteritems():
- if cn[-len(dn):] == dn:
- filtered[cn] = attrs
- objects = filtered
- if query:
- objects = {}
- for cn, attrs in filtered.iteritems():
- if self._match_query(query, attrs):
- objects[cn] = attrs
- if objects == {}:
- raise NO_SUCH_OBJECT()
- return objects.items()
-
- def add_s(self, cn, attr):
- #logging.debug("adding %s" % cn)
- stored = {}
- for k, v in attr:
- if type(v) is list:
- stored[k] = v
- else:
- stored[k] = [v]
- d = self.keeper['objects']
- d[cn] = stored
- self.keeper['objects'] = d
-
- def delete_s(self, cn):
- logging.debug("deleting %s" % cn)
- d = self.keeper['objects']
- del d[cn]
- self.keeper['objects'] = d
-
- def modify_s(self, cn, attr):
- logging.debug("modifying %s" % cn)
- d = self.keeper['objects']
- for cmd, k, v in attr:
- logging.debug("command %s" % cmd)
+ value_dict = dict([(k, _to_json(v)) for k, v in attr])
+ datastore.Redis.instance().hmset(key, value_dict)
+
+ def delete_s(self, dn):
+ """Remove the ldap object at specified dn."""
+ datastore.Redis.instance().delete("%s%s" % (self.__redis_prefix, dn))
+
+ def modify_s(self, dn, attrs):
+ """Modify the object at dn using the attribute list.
+
+ Args:
+ dn -- a dn
+ attrs -- a list of tuples in the following form:
+ ([MOD_ADD | MOD_DELETE], attribute, value)
+
+ """
+ redis = datastore.Redis.instance()
+ key = "%s%s" % (self.__redis_prefix, dn)
+
+ for cmd, k, v in attrs:
+ values = _from_json(redis.hget(key, k))
if cmd == MOD_ADD:
- d[cn][k].append(v)
+ values.append(v)
else:
- d[cn][k].remove(v)
- self.keeper['objects'] = d
+ values.remove(v)
+ values = redis.hset(key, k, _to_json(values))
+
+ def search_s(self, dn, scope, query=None, fields=None):
+ """Search for all matching objects under dn using the query.
+
+ Args:
+ dn -- dn to search under
+ scope -- only SCOPE_SUBTREE is supported
+ query -- query to filter objects by
+ fields -- fields to return. Returns all fields if not specified
+
+ """
+ if scope != SCOPE_SUBTREE:
+ raise NotImplementedError(str(scope))
+ redis = datastore.Redis.instance()
+ keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
+ objects = []
+ for key in keys:
+ # get the attributes from redis
+ attrs = redis.hgetall(key)
+ # turn the values from redis into lists
+ attrs = dict([(k, _from_json(v))
+ for k, v in attrs.iteritems()])
+ # filter the objects by query
+ if not query or _match_query(query, attrs):
+ # filter the attributes by fields
+ attrs = dict([(k, v) for k, v in attrs.iteritems()
+ if not fields or k in fields])
+ objects.append((key[len(self.__redis_prefix):], attrs))
+ if objects == []:
+ raise NO_SUCH_OBJECT()
+ return objects
+ @property
+ def __redis_prefix(self):
+ return 'ldap:'
diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh
index 277ae2bcd..90dc7a9d6 100755
--- a/nova/auth/slap.sh
+++ b/nova/auth/slap.sh
@@ -221,6 +221,12 @@ objectClass: simpleSecurityObject
# create the sysadmin entry
+dn: cn=developers,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: developers
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: sysadmins
diff --git a/nova/auth/users.py b/nova/auth/users.py
index b9d77f86f..7b703aa82 100644
--- a/nova/auth/users.py
+++ b/nova/auth/users.py
@@ -52,15 +52,21 @@ from nova import objectstore # for flags
FLAGS = flags.FLAGS
-flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server')
+flags.DEFINE_string('ldap_url', 'ldap://localhost',
+ 'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
-flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user')
+flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com',
+ 'DN of admin user')
flags.DEFINE_string('user_unit', 'Users', 'OID for Users')
-flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users')
-flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects')
-flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Roles')
-
-# mapping with these flags is necessary because we're going to tie in to an existing ldap schema
+flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com',
+ 'OU for Users')
+flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Projects')
+flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Roles')
+
+# NOTE(vish): mapping with these flags is necessary because we're going
+# to tie in to an existing ldap schema
flags.DEFINE_string('ldap_cloudadmin',
'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
flags.DEFINE_string('ldap_itsec',
@@ -72,11 +78,15 @@ flags.DEFINE_string('ldap_netadmin',
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
-# a user with one of these roles will be a superuser and have access to all api commands
-flags.DEFINE_list('superuser_roles', ['cloudadmin'], 'roles that ignore rbac checking completely')
+# NOTE(vish): a user with one of these roles will be a superuser and
+# have access to all api commands
+flags.DEFINE_list('superuser_roles', ['cloudadmin'],
+ 'roles that ignore rbac checking completely')
-# a user with one of these roles will have it for every project, even if he or she is not a member of the project
-flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], 'roles that apply to all projects')
+# NOTE(vish): a user with one of these roles will have it for every
+# project, even if he or she is not a member of the project
+flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
+ 'roles that apply to all projects')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
@@ -90,15 +100,20 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
-flags.DEFINE_string('vpn_ip', '127.0.0.1', 'Public IP for the cloudpipe VPN servers')
+flags.DEFINE_string('vpn_ip', '127.0.0.1',
+ 'Public IP for the cloudpipe VPN servers')
class AuthBase(object):
@classmethod
def safe_id(cls, obj):
- """this method will return the id of the object if the object is of this class, otherwise
- it will return the original object. This allows methods to accept objects or
- ids as paramaters"""
+ """Safe get object id.
+
+ This method will return the id of the object if the object
+ is of this class, otherwise it will return the original object.
+ This allows methods to accept objects or ids as paramaters.
+
+ """
if isinstance(obj, cls):
return obj.id
else:
@@ -195,7 +210,8 @@ class User(AuthBase):
return UserManager.instance().get_key_pairs(self.id)
def __repr__(self):
- return "User('%s', '%s', '%s', '%s', %s)" % (self.id, self.name, self.access, self.secret, self.admin)
+ return "User('%s', '%s', '%s', '%s', %s)" % (
+ self.id, self.name, self.access, self.secret, self.admin)
class KeyPair(AuthBase):
def __init__(self, id, owner_id, public_key, fingerprint):
@@ -209,7 +225,8 @@ class KeyPair(AuthBase):
return UserManager.instance().delete_key_pair(self.owner, self.name)
def __repr__(self):
- return "KeyPair('%s', '%s', '%s', '%s')" % (self.id, self.owner_id, self.public_key, self.fingerprint)
+ return "KeyPair('%s', '%s', '%s', '%s')" % (
+ self.id, self.owner_id, self.public_key, self.fingerprint)
class Group(AuthBase):
"""id and name are currently the same"""
@@ -223,7 +240,8 @@ class Group(AuthBase):
return User.safe_id(user) in self.member_ids
def __repr__(self):
- return "Group('%s', '%s', %s)" % (self.id, self.description, self.member_ids)
+ return "Group('%s', '%s', %s)" % (
+ self.id, self.description, self.member_ids)
class Project(Group):
def __init__(self, id, project_manager_id, description, member_ids):
@@ -298,7 +316,9 @@ class Project(Group):
return UserManager.instance().generate_x509_cert(user, self)
def __repr__(self):
- return "Project('%s', '%s', '%s', %s)" % (self.id, self.project_manager_id, self.description, self.member_ids)
+ return "Project('%s', '%s', '%s', %s)" % (
+ self.id, self.project_manager_id,
+ self.description, self.member_ids)
class UserManager(object):
def __init__(self):
@@ -322,7 +342,9 @@ class UserManager(object):
except: pass
return cls._instance
- def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', verify_signature=True):
+ def authenticate(self, access, signature, params, verb='GET',
+ server_string='127.0.0.1:8773', path='/',
+ verify_signature=True):
# TODO: Check for valid timestamp
(access_key, sep, project_name) = access.partition(':')
@@ -335,12 +357,16 @@ class UserManager(object):
project = self.get_project(project_name)
if project == None:
- raise exception.NotFound('No project called %s could be found' % project_name)
+ raise exception.NotFound('No project called %s could be found' %
+ project_name)
if not user.is_admin() and not project.has_member(user):
- raise exception.NotFound('User %s is not a member of project %s' % (user.id, project.id))
+ raise exception.NotFound('User %s is not a member of project %s' %
+ (user.id, project.id))
if verify_signature:
- # hmac can't handle unicode, so encode ensures that secret isn't unicode
- expected_signature = signer.Signer(user.secret.encode()).generate(params, verb, server_string, path)
+ # NOTE(vish): hmac can't handle unicode, so encode ensures that
+ # secret isn't unicode
+ expected_signature = signer.Signer(user.secret.encode()).generate(
+ params, verb, server_string, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
@@ -350,7 +376,9 @@ class UserManager(object):
def has_role(self, user, role, project=None):
with LDAPWrapper() as conn:
- if project and role == 'projectmanager':
+ if role == 'projectmanager':
+ if not project:
+ raise exception.Error("Must specify project")
return self.is_project_manager(user, project)
global_role = conn.has_role(User.safe_id(user),
@@ -368,17 +396,21 @@ class UserManager(object):
def add_role(self, user, role, project=None):
with LDAPWrapper() as conn:
- return conn.add_role(User.safe_id(user), role, Project.safe_id(project))
+ return conn.add_role(User.safe_id(user), role,
+ Project.safe_id(project))
def remove_role(self, user, role, project=None):
with LDAPWrapper() as conn:
- return conn.remove_role(User.safe_id(user), role, Project.safe_id(project))
+ return conn.remove_role(User.safe_id(user), role,
+ Project.safe_id(project))
- def create_project(self, name, manager_user, description=None, member_users=None):
+ def create_project(self, name, manager_user,
+ description=None, member_users=None):
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with LDAPWrapper() as conn:
- return conn.create_project(name, User.safe_id(manager_user), description, member_users)
+ return conn.create_project(name, User.safe_id(manager_user),
+ description, member_users)
def get_projects(self):
with LDAPWrapper() as conn:
@@ -391,7 +423,8 @@ class UserManager(object):
def add_to_project(self, user, project):
with LDAPWrapper() as conn:
- return conn.add_to_project(User.safe_id(user), Project.safe_id(project))
+ return conn.add_to_project(User.safe_id(user),
+ Project.safe_id(project))
def is_project_manager(self, user, project):
if not isinstance(project, Project):
@@ -407,7 +440,8 @@ class UserManager(object):
def remove_from_project(self, user, project):
with LDAPWrapper() as conn:
- return conn.remove_from_project(User.safe_id(user), Project.safe_id(project))
+ return conn.remove_from_project(User.safe_id(user),
+ Project.safe_id(project))
def delete_project(self, project):
with LDAPWrapper() as conn:
@@ -425,7 +459,8 @@ class UserManager(object):
with LDAPWrapper() as conn:
return conn.find_users()
- def create_user(self, user, access=None, secret=None, admin=False, create_project=True):
+ def create_user(self, user, access=None, secret=None,
+ admin=False, create_project=True):
if access == None: access = str(uuid.uuid4())
if secret == None: secret = str(uuid.uuid4())
with LDAPWrapper() as conn:
@@ -482,7 +517,7 @@ class UserManager(object):
def __cert_subject(self, uid):
# FIXME(ja) - this should be pulled from a global configuration
- return "/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
+ return "/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
class LDAPWrapper(object):
@@ -502,9 +537,12 @@ class LDAPWrapper(object):
def connect(self):
""" connect to ldap as admin user """
if FLAGS.fake_users:
+ self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT
+ self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION
self.conn = fakeldap.initialize(FLAGS.ldap_url)
else:
- assert(ldap.__name__ != 'fakeldap')
+ self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT
+ self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION
self.conn = ldap.initialize(FLAGS.ldap_url)
self.conn.simple_bind_s(self.user, self.passwd)
@@ -514,36 +552,51 @@ class LDAPWrapper(object):
return None
return objects[0]
+ def find_dns(self, dn, query=None):
+ try:
+ res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
+ except self.NO_SUCH_OBJECT:
+ return []
+ # just return the DNs
+ return [dn for dn, attributes in res]
+
def find_objects(self, dn, query = None):
try:
res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
- except Exception:
+ except self.NO_SUCH_OBJECT:
return []
# just return the attributes
- return [x[1] for x in res]
+ return [attributes for dn, attributes in res]
def find_users(self):
- attrs = self.find_objects(FLAGS.user_ldap_subtree, '(objectclass=novaUser)')
+ attrs = self.find_objects(FLAGS.user_ldap_subtree,
+ '(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs]
def find_key_pairs(self, uid):
- attrs = self.find_objects(self.__uid_to_dn(uid), '(objectclass=novaKeyPair)')
+ attrs = self.find_objects(self.__uid_to_dn(uid),
+ '(objectclass=novaKeyPair)')
return [self.__to_key_pair(uid, attr) for attr in attrs]
def find_projects(self):
- attrs = self.find_objects(FLAGS.project_ldap_subtree, '(objectclass=novaProject)')
+ attrs = self.find_objects(FLAGS.project_ldap_subtree,
+ '(objectclass=novaProject)')
return [self.__to_project(attr) for attr in attrs]
def find_roles(self, tree):
- attrs = self.find_objects(tree, '(&(objectclass=groupOfNames)(!(objectclass=NovaProject)))')
+ attrs = self.find_objects(tree,
+ '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
return [self.__to_group(attr) for attr in attrs]
- def find_groups_with_member(self, tree, dn):
- attrs = self.find_objects(tree, '(&(objectclass=groupOfNames)(member=%s))' % dn )
- return [self.__to_group(attr) for attr in attrs]
+ def find_group_dns_with_member(self, tree, uid):
+ dns = self.find_dns(tree,
+ '(&(objectclass=groupOfNames)(member=%s))' %
+ self.__uid_to_dn(uid))
+ return dns
def find_user(self, uid):
- attr = self.find_object(self.__uid_to_dn(uid), '(objectclass=novaUser)')
+ attr = self.find_object(self.__uid_to_dn(uid),
+ '(objectclass=novaUser)')
return self.__to_user(attr)
def find_key_pair(self, uid, key_name):
@@ -600,11 +653,14 @@ class LDAPWrapper(object):
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
- def create_project(self, name, manager_uid, description=None, member_uids=None):
+ def create_project(self, name, manager_uid,
+ description=None, member_uids=None):
if self.project_exists(name):
- raise exception.Duplicate("Project can't be created because project %s already exists" % name)
+ raise exception.Duplicate("Project can't be created because "
+ "project %s already exists" % name)
if not self.user_exists(manager_uid):
- raise exception.NotFound("Project can't be created because manager %s doesn't exist" % manager_uid)
+ raise exception.NotFound("Project can't be created because "
+ "manager %s doesn't exist" % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
@@ -613,7 +669,8 @@ class LDAPWrapper(object):
if member_uids != None:
for member_uid in member_uids:
if not self.user_exists(member_uid):
- raise exception.NotFound("Project can't be created because user %s doesn't exist" % member_uid)
+ raise exception.NotFound("Project can't be created "
+ "because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -644,16 +701,21 @@ class LDAPWrapper(object):
if project_id == None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
- return 'cn=%s,cn=%s,%s' % (role, project_id, FLAGS.project_ldap_subtree)
+ return 'cn=%s,cn=%s,%s' % (role,
+ project_id,
+ FLAGS.project_ldap_subtree)
- def __create_group(self, group_dn, name, uid, description, member_uids = None):
+ def __create_group(self, group_dn, name, uid,
+ description, member_uids = None):
if self.group_exists(name):
- raise exception.Duplicate("Group can't be created because group %s already exists" % name)
+ raise exception.Duplicate("Group can't be created because "
+ "group %s already exists" % name)
members = []
if member_uids != None:
for member_uid in member_uids:
if not self.user_exists(member_uid):
- raise exception.NotFound("Group can't be created because user %s doesn't exist" % member_uid)
+ raise exception.NotFound("Group can't be created "
+ "because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -682,15 +744,12 @@ class LDAPWrapper(object):
def remove_role(self, uid, role, project_id=None):
role_dn = self.__role_to_dn(role, project_id)
- try:
- return self.remove_from_group(uid, role_dn)
- except Exception, ex:
- print type(ex), ex
-
+ return self.remove_from_group(uid, role_dn)
def is_in_group(self, uid, group_dn):
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be searched in group becuase the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be searched in group "
+ "becuase the user doesn't exist" % (uid,))
if not self.group_exists(group_dn):
return False
res = self.find_object(group_dn,
@@ -699,11 +758,14 @@ class LDAPWrapper(object):
def add_to_group(self, uid, group_dn):
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be added to the group becuase the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be added to the group "
+ "becuase the user doesn't exist" % (uid,))
if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" % (group_dn,))
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
if self.is_in_group(uid, group_dn):
- raise exception.Duplicate("User %s is already a member of the group %s" % (uid, group_dn))
+ raise exception.Duplicate("User %s is already a member of "
+ "the group %s" % (uid, group_dn))
attr = [
(ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
]
@@ -711,34 +773,39 @@ class LDAPWrapper(object):
def remove_from_group(self, uid, group_dn):
if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" % (group_dn,))
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from the group because the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be removed from the "
+ "group because the user doesn't exist" % (uid,))
if not self.is_in_group(uid, group_dn):
- raise exception.NotFound("User %s is not a member of the group" % (uid,))
- attr = [
- (ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))
- ]
+ raise exception.NotFound("User %s is not a member of the group" %
+ (uid,))
+ self._safe_remove_from_group(group_dn, uid)
+
+ def _safe_remove_from_group(self, group_dn, uid):
+ # FIXME(vish): what if deleted user is a project manager?
+ attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
- except ldap.OBJECT_CLASS_VIOLATION:
- logging.debug("Attempted to remove the last member of a group. Deleting the group instead.")
+ except self.OBJECT_CLASS_VIOLATION:
+ logging.debug("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead." % group_dn )
self.delete_group(group_dn)
def remove_from_all(self, uid):
- # FIXME(vish): what if deleted user is a project manager?
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from all because the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be removed from all "
+ "because the user doesn't exist" % (uid,))
dn = self.__uid_to_dn(uid)
- attr = [
- (ldap.MOD_DELETE, 'member', dn)
- ]
- roles = self.find_groups_with_member(FLAGS.role_ldap_subtree, dn)
- for role in roles:
- self.conn.modify_s('cn=%s,%s' % (role.id, FLAGS.role_ldap_subtree), attr)
- projects = self.find_groups_with_member(FLAGS.project_ldap_subtree, dn)
- for project in projects:
- self.conn.modify_s('cn=%s,%s' % (project.id, FLAGS.project_ldap_subtree), attr)
+ role_dns = self.find_group_dns_with_member(
+ FLAGS.role_ldap_subtree, uid)
+ for role_dn in role_dns:
+ self._safe_remove_from_group(role_dn, uid)
+ project_dns = self.find_group_dns_with_member(
+ FLAGS.project_ldap_subtree, uid)
+ for project_dn in project_dns:
+ self._safe_remove_from_group(project_dn, uid)
def create_key_pair(self, uid, key_name, public_key, fingerprint):
"""create's a public key in the directory underneath the user"""
@@ -782,9 +849,8 @@ class LDAPWrapper(object):
def delete_roles(self, project_dn):
roles = self.find_roles(project_dn)
- if roles != None:
- for role in roles:
- self.delete_group('cn=%s,%s' % (role.id, project_dn))
+ for role in roles:
+ self.delete_group('cn=%s,%s' % (role.id, project_dn))
def delete_project(self, name):
project_dn = 'cn=%s,%s' % (name, FLAGS.project_ldap_subtree)
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 09da71c64..5f6ccf82e 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -76,12 +76,13 @@ class CloudPipe(object):
zippy.close()
def setup_keypair(self, user_id, project_id):
- key_name = '%s-key' % project_id
+ key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
try:
private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name)
try:
key_dir = os.path.join(FLAGS.keys_path, user_id)
- os.makedirs(key_dir)
+ if not os.path.exists(key_dir):
+ os.makedirs(key_dir)
with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f:
f.write(private_key)
except:
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index e7090dad3..bd6a010ee 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -36,7 +36,7 @@ from nova import exception
def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
"""Takes a single partition represented by infile and writes a bootable
drive image into outfile.
-
+
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
@@ -142,5 +142,5 @@ def _inject_into_fs(key, fs, execute=None):
yield execute('sudo chown root %s' % sshdir)
yield execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- yield execute('sudo bash -c "cat >> %s"' % keyfile, '\n' + key + '\n')
+ yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
diff --git a/nova/compute/node.py b/nova/compute/node.py
index 5b664f82d..8df620a6b 100644
--- a/nova/compute/node.py
+++ b/nova/compute/node.py
@@ -30,7 +30,6 @@ import base64
import json
import logging
import os
-import random
import shutil
import sys
@@ -469,7 +468,7 @@ class Instance(object):
# ensure directories exist and are writable
yield self._pool.simpleExecute('mkdir -p %s' % basepath())
yield self._pool.simpleExecute('chmod 0777 %s' % basepath())
-
+
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
@@ -477,11 +476,11 @@ class Instance(object):
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
-
+
if FLAGS.fake_libvirt:
logging.info('fake_libvirt, nothing to do for create_image')
raise defer.returnValue(None);
-
+
if FLAGS.use_s3:
_fetch_file = self._fetch_s3_image
else:
@@ -508,7 +507,7 @@ class Instance(object):
* 1024 * 1024 * 1024)
yield disk.partition(
basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
-
+
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self):
@@ -519,7 +518,7 @@ class Instance(object):
self.set_state(Instance.NOSTATE, 'launching')
logging.info('self %s', self)
try:
- yield self._create_image(xml)
+ yield self._create_image(xml)
self._conn.createXML(xml, 0)
# TODO(termie): this should actually register
# a callback to check for successful boot
@@ -542,8 +541,11 @@ class Instance(object):
timer.f = _wait_for_boot
timer.start(interval=0.5, now=True)
except Exception, ex:
+<<<<<<< HEAD
# FIXME(todd): this is just for debugging during testing
print "FUUUUUUUUUUUUUUUUUUUUUU: %s" % ex
+=======
+>>>>>>> master
logging.debug(ex)
self.set_state(Instance.SHUTDOWN)
diff --git a/nova/crypto.py b/nova/crypto.py
index 80b4ef9de..413796ccc 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -96,7 +96,7 @@ def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
return '%s %s@%s\n' %(out.strip(), name, suffix)
-def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024):
+def generate_x509_cert(subject, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index d6c164163..931c6c6e1 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -170,6 +170,28 @@ class CloudController(object):
'zoneState': 'available'}]}
@rbac.allow('all')
+ def describe_regions(self, context, region_name=None, **kwargs):
+ # TODO(vish): region_name is an array. Support filtering
+ return {'regionInfo': [{'regionName': 'nova',
+ 'regionUrl': FLAGS.ec2_url}]}
+
+ @rbac.allow('all')
+ def describe_snapshots(self,
+ context,
+ snapshot_id=None,
+ owner=None,
+ restorable_by=None,
+ **kwargs):
+ return {'snapshotSet': [{'snapshotId': 'fixme',
+ 'volumeId': 'fixme',
+ 'status': 'fixme',
+ 'startTime': 'fixme',
+ 'progress': 'fixme',
+ 'ownerId': 'fixme',
+ 'volumeSize': 0,
+ 'description': 'fixme'}]}
+
+ @rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = context.user.get_key_pairs()
if not key_name is None:
@@ -178,7 +200,8 @@ class CloudController(object):
result = []
for key_pair in key_pairs:
# filter out the vpn keys
- if context.user.is_admin() or not key_pair.name.endswith('-key'):
+ suffix = FLAGS.vpn_key_suffix
+ if context.user.is_admin() or not key_pair.name.endswith(suffix):
result.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
@@ -609,9 +632,8 @@ class CloudController(object):
result = { 'image_id': image_id, 'launchPermission': [] }
if image['isPublic']:
result['launchPermission'].append({ 'group': 'all' })
-
return defer.succeed(result)
-
+
@rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
diff --git a/nova/flags.py b/nova/flags.py
index bf7b6e3a3..985f9ba04 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -74,6 +74,9 @@ DEFINE_string('default_instance_type',
'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
+DEFINE_string('vpn_key_suffix',
+ '-key',
+ 'Suffix to add to project name for vpn key')
# UNUSED
DEFINE_string('node_availability_zone',
diff --git a/nova/rpc.py b/nova/rpc.py
index 54843973a..b0f6ef7f3 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -63,6 +63,10 @@ class Connection(connection.BrokerConnection):
cls._instance = cls(**params)
return cls._instance
+ @classmethod
+ def recreate(cls):
+ del cls._instance
+ return cls.instance()
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
@@ -79,9 +83,22 @@ class Consumer(messaging.Consumer):
attachToTornado = attach_to_tornado
- @exception.wrap_exception
def fetch(self, *args, **kwargs):
- super(Consumer, self).fetch(*args, **kwargs)
+ # TODO(vish): the logic for failed connections and logging should be
+ # refactored into some sort of connection manager object
+ try:
+ if getattr(self, 'failed_connection', False):
+ # attempt to reconnect
+ self.conn = Connection.recreate()
+ self.backend = self.conn.create_backend()
+ super(Consumer, self).fetch(*args, **kwargs)
+ if getattr(self, 'failed_connection', False):
+ logging.error("Reconnected to queue")
+ self.failed_connection = False
+ except Exception, ex:
+ if not getattr(self, 'failed_connection', False):
+ logging.exception("Failed to fetch message from queue")
+ self.failed_connection = True
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
@@ -115,9 +132,10 @@ class AdapterConsumer(TopicConsumer):
args = message_data.get('args', {})
message.ack()
if not method:
- # vish: we may not want to ack here, but that means that bad messages
- # stay in the queue indefinitely, so for now we just log the
- # message and send an error string back to the caller
+ # NOTE(vish): we may not want to ack here, but that means that bad
+ # messages stay in the queue indefinitely, so for now
+ # we just log the message and send an error string
+ # back to the caller
_log.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index 89c1d59c5..cee567c8b 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -28,7 +28,6 @@ import tempfile
from nova import vendor
from nova import flags
-from nova import rpc
from nova import objectstore
from nova import test
from nova.auth import users
@@ -57,7 +56,6 @@ class ObjectStoreTestCase(test.BaseTestCase):
buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()
diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py
index 73215c5ca..36fcc6f19 100644
--- a/nova/tests/storage_unittest.py
+++ b/nova/tests/storage_unittest.py
@@ -38,10 +38,7 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage = None
self.flags(fake_libvirt=True,
fake_storage=True)
- if FLAGS.fake_storage:
- self.mystorage = storage.FakeBlockStore()
- else:
- self.mystorage = storage.BlockStore()
+ self.mystorage = storage.BlockStore()
def test_run_create_volume(self):
vol_size = '0'
@@ -65,6 +62,18 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage.create_volume,
vol_size, user_id, project_id)
+ def test_too_many_volumes(self):
+ vol_size = '1'
+ user_id = 'fake'
+ project_id = 'fake'
+ num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
+ total_slots = FLAGS.slots_per_shelf * num_shelves
+ for i in xrange(total_slots):
+ self.mystorage.create_volume(vol_size, user_id, project_id)
+ self.assertRaises(storage.NoMoreVolumes,
+ self.mystorage.create_volume,
+ vol_size, user_id, project_id)
+
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
instance_id = "storage-test"
diff --git a/nova/volume/storage.py b/nova/volume/storage.py
index 273a6afd1..82d7a4c22 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/storage.py
@@ -26,9 +26,10 @@ Currently uses Ata-over-Ethernet.
import glob
import logging
-import random
+import os
import socket
-import subprocess
+import shutil
+import tempfile
import time
from nova import vendor
@@ -38,7 +39,6 @@ from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
-from nova import rpc
from nova import utils
from nova import validate
from nova.compute import model
@@ -54,16 +54,27 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
flags.DEFINE_string('storage_name',
socket.gethostname(),
'name of this node')
-flags.DEFINE_integer('shelf_id',
- utils.last_octet(utils.get_my_ip()),
- 'AoE shelf_id for this node')
+flags.DEFINE_integer('first_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_integer('last_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10 + 9,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_string('aoe_export_dir',
+ '/var/lib/vblade-persist/vblades',
+ 'AoE directory where exports are created')
+flags.DEFINE_integer('slots_per_shelf',
+ 16,
+ 'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this node')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
-# TODO(joshua) Index of volumes by project
+
+class NoMoreVolumes(exception.Error):
+ pass
def get_volume(volume_id):
""" Returns a redis-backed volume object """
@@ -84,9 +95,14 @@ class BlockStore(object):
super(BlockStore, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
+ FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
+ def __del__(self):
+ if FLAGS.fake_storage:
+ shutil.rmtree(FLAGS.aoe_export_dir)
+
def report_state(self):
#TODO: aggregate the state of the system
pass
@@ -140,18 +156,6 @@ class BlockStore(object):
utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
-
-class FakeBlockStore(BlockStore):
- def __init__(self):
- super(FakeBlockStore, self).__init__()
-
- def _init_volume_group(self):
- pass
-
- def _restart_exports(self):
- pass
-
-
class Volume(model.BasicModel):
def __init__(self, volume_id=None):
@@ -182,7 +186,7 @@ class Volume(model.BasicModel):
vol['delete_on_termination'] = 'False'
vol.save()
vol.create_lv()
- vol.setup_export()
+ vol._setup_export()
# TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
# TODO(joshua
vol['status'] = "available"
@@ -234,15 +238,22 @@ class Volume(model.BasicModel):
def _delete_lv(self):
utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
- def setup_export(self):
+ def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
+ self._exec_export()
+
+ def _exec_export(self):
utils.runthis("Creating AOE export: %s",
- "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, self['volume_id']))
+ "sudo vblade- persist setup %s %s %s /dev/%s/%s" %
+ (self['shelf_id'],
+ self['blade_id'],
+ FLAGS.aoe_eth_dev,
+ FLAGS.volume_group,
+ self['volume_id']))
def _remove_export(self):
utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
@@ -253,13 +264,10 @@ class FakeVolume(Volume):
def create_lv(self):
pass
- def setup_export(self):
- # TODO(???): This may not be good enough?
- blade_id = ''.join([random.choice('0123456789') for x in xrange(3)])
- self['shelf_id'] = FLAGS.shelf_id
- self['blade_id'] = blade_id
- self['aoe_device'] = "e%s.%s" % (FLAGS.shelf_id, blade_id)
- self.save()
+ def _exec_export(self):
+ fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
+ f = file(fname, "w")
+ f.close()
def _remove_export(self):
pass
@@ -268,9 +276,13 @@ class FakeVolume(Volume):
pass
def get_next_aoe_numbers():
- aoes = glob.glob("/var/lib/vblade-persist/vblades/e*")
- aoes.extend(['e0.0'])
- blade_id = int(max([int(a.split('.')[1]) for a in aoes])) + 1
- logging.debug("Next blade_id is %s" % (blade_id))
- shelf_id = FLAGS.shelf_id
- return (shelf_id, blade_id)
+ for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
+ aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
+ if not aoes:
+ blade_id = 0
+ else:
+ blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
+ if blade_id < FLAGS.slots_per_shelf:
+ logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
+ return (shelf_id, blade_id)
+ raise NoMoreVolumes()