summaryrefslogtreecommitdiffstats
path: root/openstack
diff options
context:
space:
mode:
authorKevin L. Mitchell <kevin.mitchell@rackspace.com>2012-10-09 13:23:41 +0100
committerMark McLoughlin <markmc@redhat.com>2012-10-09 21:16:19 +0100
commitfa7dc58b7f0a5de137b30299bfc4d3f3aaa8d0cf (patch)
treebe29e4fd34bd679c6f7b3e3c2fc58fc10ff6b473 /openstack
parent8c6e7a7cb1610956b0fcc9f6e8ebd967baafc6d6 (diff)
downloadoslo-fa7dc58b7f0a5de137b30299bfc4d3f3aaa8d0cf.tar.gz
oslo-fa7dc58b7f0a5de137b30299bfc4d3f3aaa8d0cf.tar.xz
oslo-fa7dc58b7f0a5de137b30299bfc4d3f3aaa8d0cf.zip
Add a new policy language
Implements blueprint fine-grained-policy Add a new policy language with "and" and "or" operators to replace the old list-of-lists syntax. New '@' and '!' operators are also added. This new language will enable us add more advanced features than the old syntax would have allowed. Backwards compat support for the old list-of-list syntax is retained. Change-Id: I872cb6abf6f8051c3ff502a0fc7590cff4f63a25
Diffstat (limited to 'openstack')
-rw-r--r--openstack/common/policy.py274
1 files changed, 270 insertions, 4 deletions
diff --git a/openstack/common/policy.py b/openstack/common/policy.py
index 8b5f463..f3efe55 100644
--- a/openstack/common/policy.py
+++ b/openstack/common/policy.py
@@ -18,19 +18,42 @@
"""
Common Policy Engine Implementation
-Policies are be expressed as a list-of-lists where each check inside the
-innermost list is combined as with an "and" conjunction--for that check to
-pass, all the specified checks must pass. These innermost lists are then
-combined as with an "or" conjunction.
+Policies can be expressed in one of two forms: A list of lists, or a
+string written in the new policy language.
+
+In the list-of-lists representation, each check inside the innermost
+list is combined as with an "and" conjunction--for that check to pass,
+all the specified checks must pass. These innermost lists are then
+combined as with an "or" conjunction. This is the original way of
+expressing policies, but there now exists a new way: the policy
+language.
+
+In the policy language, each check is specified the same way as in the
+list-of-lists representation: a simple "a:b" pair that is matched to
+the correct code to perform that check. However, conjunction
+operators are available, allowing for more expressiveness in crafting
+policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
+
+In the policy language, this becomes::
+
+ role:admin or (project_id:%(project_id)s and role:projectadmin)
+
+Finally, two special policy checks should be mentioned; the policy
+check "@" will always accept an access, and the policy check "!" will
+always reject an access. (Note that if a rule is either the empty
+list ("[]") or the empty string, this is equivalent to the "@" policy
+check.) Of these, the "!" policy check is probably the most useful,
+as it allows particular rules to be explicitly disabled.
"""
import abc
import logging
+import re
import urllib
import urllib2
@@ -324,6 +347,13 @@ def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
+
+ # Handle the special checks
+ if rule == '!':
+ return FalseCheck()
+ elif rule == '@':
+ return TrueCheck()
+
try:
kind, match = rule.split(':', 1)
except Exception:
@@ -380,10 +410,246 @@ def _parse_list_rule(rule):
return OrCheck(or_list)
+# Used for tokenizing the policy language
+_tokenize_re = re.compile(r'\s+')
+
+
+def _parse_tokenize(rule):
+ """
+ Tokenizer for the policy language.
+
+ Most of the single-character tokens are specified in the
+ _tokenize_re; however, parentheses need to be handled specially,
+ because they can appear inside a check string. Thankfully, those
+ parentheses that appear inside a check string can never occur at
+ the very beginning or end ("%(variable)s" is the correct syntax).
+ """
+
+ for tok in _tokenize_re.split(rule):
+ # Skip empty tokens
+ if not tok or tok.isspace():
+ continue
+
+ # Handle leading parens on the token
+ clean = tok.lstrip('(')
+ for i in range(len(tok) - len(clean)):
+ yield '(', '('
+
+ # If it was only parentheses, continue
+ if not clean:
+ continue
+ else:
+ tok = clean
+
+ # Handle trailing parens on the token
+ clean = tok.rstrip(')')
+ trail = len(tok) - len(clean)
+
+ # Yield the cleaned token
+ lowered = clean.lower()
+ if lowered in ('and', 'or'):
+ # Special tokens
+ yield lowered, clean
+ elif clean:
+ # Not a special token, but not composed solely of ')'
+ if len(tok) >= 2 and ((tok[0], tok[-1]) in
+ [('"', '"'), ("'", "'")]):
+ # It's a quoted string
+ yield 'string', tok[1:-1]
+ else:
+ yield 'check', _parse_check(clean)
+
+ # Yield the trailing parens
+ for i in range(trail):
+ yield ')', ')'
+
+
+class ParseStateMeta(type):
+ """
+ Metaclass for the ParseState class. Facilitates identifying
+ reduction methods.
+ """
+
+ def __new__(mcs, name, bases, cls_dict):
+ """
+ Create the class. Injects the 'reducers' list, a list of
+ tuples matching token sequences to the names of the
+ corresponding reduction methods.
+ """
+
+ reducers = []
+
+ for key, value in cls_dict.items():
+ if not hasattr(value, 'reducers'):
+ continue
+ for reduction in value.reducers:
+ reducers.append((reduction, key))
+
+ cls_dict['reducers'] = reducers
+
+ return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
+
+
+def reducer(*tokens):
+ """
+ Decorator for reduction methods. Arguments are a sequence of
+ tokens, in order, which should trigger running this reduction
+ method.
+ """
+
+ def decorator(func):
+ # Make sure we have a list of reducer sequences
+ if not hasattr(func, 'reducers'):
+ func.reducers = []
+
+ # Add the tokens to the list of reducer sequences
+ func.reducers.append(list(tokens))
+
+ return func
+
+ return decorator
+
+
+class ParseState(object):
+ """
+ Implement the core of parsing the policy language. Uses a greedy
+ reduction algorithm to reduce a sequence of tokens into a single
+ terminal, the value of which will be the root of the Check tree.
+
+ Note: error reporting is rather lacking. The best we can get with
+ this parser formulation is an overall "parse failed" error.
+ Fortunately, the policy language is simple enough that this
+ shouldn't be that big a problem.
+ """
+
+ __metaclass__ = ParseStateMeta
+
+ def __init__(self):
+ """Initialize the ParseState."""
+
+ self.tokens = []
+ self.values = []
+
+ def reduce(self):
+ """
+ Perform a greedy reduction of the token stream. If a reducer
+ method matches, it will be executed, then the reduce() method
+ will be called recursively to search for any more possible
+ reductions.
+ """
+
+ for reduction, methname in self.reducers:
+ if (len(self.tokens) >= len(reduction) and
+ self.tokens[-len(reduction):] == reduction):
+ # Get the reduction method
+ meth = getattr(self, methname)
+
+ # Reduce the token stream
+ results = meth(*self.values[-len(reduction):])
+
+ # Update the tokens and values
+ self.tokens[-len(reduction):] = [r[0] for r in results]
+ self.values[-len(reduction):] = [r[1] for r in results]
+
+ # Check for any more reductions
+ return self.reduce()
+
+ def shift(self, tok, value):
+ """Adds one more token to the state. Calls reduce()."""
+
+ self.tokens.append(tok)
+ self.values.append(value)
+
+ # Do a greedy reduce...
+ self.reduce()
+
+ @property
+ def result(self):
+ """
+ Obtain the final result of the parse. Raises ValueError if
+ the parse failed to reduce to a single result.
+ """
+
+ if len(self.values) != 1:
+ raise ValueError("Could not parse rule")
+ return self.values[0]
+
+ @reducer('(', 'check', ')')
+ @reducer('(', 'and_expr', ')')
+ @reducer('(', 'or_expr', ')')
+ def _wrap_check(self, _p1, check, _p2):
+ """Turn parenthesized expressions into a 'check' token."""
+
+ return [('check', check)]
+
+ @reducer('check', 'and', 'check')
+ def _make_and_expr(self, check1, _and, check2):
+ """
+ Create an 'and_expr' from two checks joined by the 'and'
+ operator.
+ """
+
+ return [('and_expr', AndCheck([check1, check2]))]
+
+ @reducer('and_expr', 'and', 'check')
+ def _extend_and_expr(self, and_expr, _and, check):
+ """
+ Extend an 'and_expr' by adding one more check.
+ """
+
+ return [('and_expr', and_expr.add_check(check))]
+
+ @reducer('check', 'or', 'check')
+ def _make_or_expr(self, check1, _or, check2):
+ """
+ Create an 'or_expr' from two checks joined by the 'or'
+ operator.
+ """
+
+ return [('or_expr', OrCheck([check1, check2]))]
+
+ @reducer('or_expr', 'or', 'check')
+ def _extend_or_expr(self, or_expr, _or, check):
+ """
+ Extend an 'or_expr' by adding one more check.
+ """
+
+ return [('or_expr', or_expr.add_check(check))]
+
+
+def _parse_text_rule(rule):
+ """
+ Translates a policy written in the policy language into a tree of
+ Check objects.
+ """
+
+ # Empty rule means always accept
+ if not rule:
+ return TrueCheck()
+
+ # Parse the token stream
+ state = ParseState()
+ for tok, value in _parse_tokenize(rule):
+ state.shift(tok, value)
+
+ try:
+ return state.result
+ except ValueError:
+ # Couldn't parse the rule
+ LOG.exception(_("Failed to understand rule %(rule)r") % locals())
+
+ # Fail closed
+ return FalseCheck()
+
+
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
+
+ # If the rule is a string, it's in the policy language
+ if isinstance(rule, basestring):
+ return _parse_text_rule(rule)
return _parse_list_rule(rule)