summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openstack/common/policy.py274
-rw-r--r--tests/unit/test_policy.py278
2 files changed, 548 insertions, 4 deletions
diff --git a/openstack/common/policy.py b/openstack/common/policy.py
index 8b5f463..f3efe55 100644
--- a/openstack/common/policy.py
+++ b/openstack/common/policy.py
@@ -18,19 +18,42 @@
"""
Common Policy Engine Implementation
-Policies are be expressed as a list-of-lists where each check inside the
-innermost list is combined as with an "and" conjunction--for that check to
-pass, all the specified checks must pass. These innermost lists are then
-combined as with an "or" conjunction.
+Policies can be expressed in one of two forms: A list of lists, or a
+string written in the new policy language.
+
+In the list-of-lists representation, each check inside the innermost
+list is combined as with an "and" conjunction--for that check to pass,
+all the specified checks must pass. These innermost lists are then
+combined as with an "or" conjunction. This is the original way of
+expressing policies, but there now exists a new way: the policy
+language.
+
+In the policy language, each check is specified the same way as in the
+list-of-lists representation: a simple "a:b" pair that is matched to
+the correct code to perform that check. However, conjunction
+operators are available, allowing for more expressiveness in crafting
+policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
+
+In the policy language, this becomes::
+
+ role:admin or (project_id:%(project_id)s and role:projectadmin)
+
+Finally, two special policy checks should be mentioned; the policy
+check "@" will always accept an access, and the policy check "!" will
+always reject an access. (Note that if a rule is either the empty
+list ("[]") or the empty string, this is equivalent to the "@" policy
+check.) Of these, the "!" policy check is probably the most useful,
+as it allows particular rules to be explicitly disabled.
"""
import abc
import logging
+import re
import urllib
import urllib2
@@ -324,6 +347,13 @@ def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
+
+ # Handle the special checks
+ if rule == '!':
+ return FalseCheck()
+ elif rule == '@':
+ return TrueCheck()
+
try:
kind, match = rule.split(':', 1)
except Exception:
@@ -380,10 +410,246 @@ def _parse_list_rule(rule):
return OrCheck(or_list)
+# Used for tokenizing the policy language
+_tokenize_re = re.compile(r'\s+')
+
+
+def _parse_tokenize(rule):
+ """
+ Tokenizer for the policy language.
+
+ Most of the single-character tokens are specified in the
+ _tokenize_re; however, parentheses need to be handled specially,
+ because they can appear inside a check string. Thankfully, those
+ parentheses that appear inside a check string can never occur at
+ the very beginning or end ("%(variable)s" is the correct syntax).
+ """
+
+ for tok in _tokenize_re.split(rule):
+ # Skip empty tokens
+ if not tok or tok.isspace():
+ continue
+
+ # Handle leading parens on the token
+ clean = tok.lstrip('(')
+ for i in range(len(tok) - len(clean)):
+ yield '(', '('
+
+ # If it was only parentheses, continue
+ if not clean:
+ continue
+ else:
+ tok = clean
+
+ # Handle trailing parens on the token
+ clean = tok.rstrip(')')
+ trail = len(tok) - len(clean)
+
+ # Yield the cleaned token
+ lowered = clean.lower()
+ if lowered in ('and', 'or'):
+ # Special tokens
+ yield lowered, clean
+ elif clean:
+ # Not a special token, but not composed solely of ')'
+ if len(tok) >= 2 and ((tok[0], tok[-1]) in
+ [('"', '"'), ("'", "'")]):
+ # It's a quoted string
+ yield 'string', tok[1:-1]
+ else:
+ yield 'check', _parse_check(clean)
+
+ # Yield the trailing parens
+ for i in range(trail):
+ yield ')', ')'
+
+
+class ParseStateMeta(type):
+ """
+ Metaclass for the ParseState class. Facilitates identifying
+ reduction methods.
+ """
+
+ def __new__(mcs, name, bases, cls_dict):
+ """
+ Create the class. Injects the 'reducers' list, a list of
+ tuples matching token sequences to the names of the
+ corresponding reduction methods.
+ """
+
+ reducers = []
+
+ for key, value in cls_dict.items():
+ if not hasattr(value, 'reducers'):
+ continue
+ for reduction in value.reducers:
+ reducers.append((reduction, key))
+
+ cls_dict['reducers'] = reducers
+
+ return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
+
+
+def reducer(*tokens):
+ """
+ Decorator for reduction methods. Arguments are a sequence of
+ tokens, in order, which should trigger running this reduction
+ method.
+ """
+
+ def decorator(func):
+ # Make sure we have a list of reducer sequences
+ if not hasattr(func, 'reducers'):
+ func.reducers = []
+
+ # Add the tokens to the list of reducer sequences
+ func.reducers.append(list(tokens))
+
+ return func
+
+ return decorator
+
+
+class ParseState(object):
+ """
+ Implement the core of parsing the policy language. Uses a greedy
+ reduction algorithm to reduce a sequence of tokens into a single
+ terminal, the value of which will be the root of the Check tree.
+
+ Note: error reporting is rather lacking. The best we can get with
+ this parser formulation is an overall "parse failed" error.
+ Fortunately, the policy language is simple enough that this
+ shouldn't be that big a problem.
+ """
+
+ __metaclass__ = ParseStateMeta
+
+ def __init__(self):
+ """Initialize the ParseState."""
+
+ self.tokens = []
+ self.values = []
+
+ def reduce(self):
+ """
+ Perform a greedy reduction of the token stream. If a reducer
+ method matches, it will be executed, then the reduce() method
+ will be called recursively to search for any more possible
+ reductions.
+ """
+
+ for reduction, methname in self.reducers:
+ if (len(self.tokens) >= len(reduction) and
+ self.tokens[-len(reduction):] == reduction):
+ # Get the reduction method
+ meth = getattr(self, methname)
+
+ # Reduce the token stream
+ results = meth(*self.values[-len(reduction):])
+
+ # Update the tokens and values
+ self.tokens[-len(reduction):] = [r[0] for r in results]
+ self.values[-len(reduction):] = [r[1] for r in results]
+
+ # Check for any more reductions
+ return self.reduce()
+
+ def shift(self, tok, value):
+ """Adds one more token to the state. Calls reduce()."""
+
+ self.tokens.append(tok)
+ self.values.append(value)
+
+ # Do a greedy reduce...
+ self.reduce()
+
+ @property
+ def result(self):
+ """
+ Obtain the final result of the parse. Raises ValueError if
+ the parse failed to reduce to a single result.
+ """
+
+ if len(self.values) != 1:
+ raise ValueError("Could not parse rule")
+ return self.values[0]
+
+ @reducer('(', 'check', ')')
+ @reducer('(', 'and_expr', ')')
+ @reducer('(', 'or_expr', ')')
+ def _wrap_check(self, _p1, check, _p2):
+ """Turn parenthesized expressions into a 'check' token."""
+
+ return [('check', check)]
+
+ @reducer('check', 'and', 'check')
+ def _make_and_expr(self, check1, _and, check2):
+ """
+ Create an 'and_expr' from two checks joined by the 'and'
+ operator.
+ """
+
+ return [('and_expr', AndCheck([check1, check2]))]
+
+ @reducer('and_expr', 'and', 'check')
+ def _extend_and_expr(self, and_expr, _and, check):
+ """
+ Extend an 'and_expr' by adding one more check.
+ """
+
+ return [('and_expr', and_expr.add_check(check))]
+
+ @reducer('check', 'or', 'check')
+ def _make_or_expr(self, check1, _or, check2):
+ """
+ Create an 'or_expr' from two checks joined by the 'or'
+ operator.
+ """
+
+ return [('or_expr', OrCheck([check1, check2]))]
+
+ @reducer('or_expr', 'or', 'check')
+ def _extend_or_expr(self, or_expr, _or, check):
+ """
+ Extend an 'or_expr' by adding one more check.
+ """
+
+ return [('or_expr', or_expr.add_check(check))]
+
+
+def _parse_text_rule(rule):
+ """
+ Translates a policy written in the policy language into a tree of
+ Check objects.
+ """
+
+ # Empty rule means always accept
+ if not rule:
+ return TrueCheck()
+
+ # Parse the token stream
+ state = ParseState()
+ for tok, value in _parse_tokenize(rule):
+ state.shift(tok, value)
+
+ try:
+ return state.result
+ except ValueError:
+ # Couldn't parse the rule
+ LOG.exception(_("Failed to understand rule %(rule)r") % locals())
+
+ # Fail closed
+ return FalseCheck()
+
+
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
+
+ # If the rule is a string, it's in the policy language
+ if isinstance(rule, basestring):
+ return _parse_text_rule(rule)
return _parse_list_rule(rule)
diff --git a/tests/unit/test_policy.py b/tests/unit/test_policy.py
index d7284a4..fd9f9fb 100644
--- a/tests/unit/test_policy.py
+++ b/tests/unit/test_policy.py
@@ -84,6 +84,16 @@ class RulesTestCase(unittest.TestCase):
default=[],
))
+ def test_str(self):
+ exemplar = """{
+ "admin_or_owner": "role:admin or project_id:%(project_id)s"
+}"""
+ rules = policy.Rules(dict(
+ admin_or_owner="role:admin or project_id:%(project_id)s",
+ ))
+
+ self.assertEqual(str(rules), exemplar)
+
def test_str_true(self):
exemplar = """{
"admin_or_owner": ""
@@ -256,6 +266,16 @@ class OrCheckTestCase(unittest.TestCase):
class ParseCheckTestCase(unittest.TestCase):
+ def test_false(self):
+ result = policy._parse_check('!')
+
+ self.assertTrue(isinstance(result, policy.FalseCheck))
+
+ def test_true(self):
+ result = policy._parse_check('@')
+
+ self.assertTrue(isinstance(result, policy.TrueCheck))
+
def test_bad_rule(self):
result = policy._parse_check('foobar')
@@ -358,6 +378,264 @@ class ParseListRuleTestCase(unittest.TestCase):
'((rule1 and rule2) or (rule3 and rule4))')
+class ParseTokenizeTestCase(unittest.TestCase):
+ @mock.patch.object(policy, '_parse_check', lambda x: x)
+ def test_tokenize(self):
+ exemplar = ("(( ( ((() And)) or ) (check:%(miss)s))) "
+ "'a-string' \"another-string\"")
+ expected = [
+ ('(', '('), ('(', '('), ('(', '('), ('(', '('), ('(', '('),
+ ('(', '('), (')', ')'), ('and', 'And'),
+ (')', ')'), (')', ')'), ('or', 'or'), (')', ')'), ('(', '('),
+ ('check', 'check:%(miss)s'), (')', ')'),
+ (')', ')'), (')', ')'),
+ ('string', 'a-string'),
+ ('string', 'another-string'),
+ ]
+
+ result = list(policy._parse_tokenize(exemplar))
+
+ self.assertEqual(result, expected)
+
+
+class ParseStateMetaTestCase(unittest.TestCase):
+ def test_reducer(self):
+ @policy.reducer('a', 'b', 'c')
+ @policy.reducer('d', 'e', 'f')
+ def spam():
+ pass
+
+ self.assertTrue(hasattr(spam, 'reducers'))
+ self.assertEqual(spam.reducers, [['d', 'e', 'f'], ['a', 'b', 'c']])
+
+ def test_parse_state_meta(self):
+ class FakeState(object):
+ __metaclass__ = policy.ParseStateMeta
+
+ @policy.reducer('a', 'b', 'c')
+ @policy.reducer('d', 'e', 'f')
+ def reduce1(self):
+ pass
+
+ @policy.reducer('g', 'h', 'i')
+ def reduce2(self):
+ pass
+
+ self.assertTrue(hasattr(FakeState, 'reducers'))
+ for reduction, reducer in FakeState.reducers:
+ if (reduction == ['a', 'b', 'c'] or
+ reduction == ['d', 'e', 'f']):
+ self.assertEqual(reducer, 'reduce1')
+ elif reduction == ['g', 'h', 'i']:
+ self.assertEqual(reducer, 'reduce2')
+ else:
+ self.fail("Unrecognized reducer discovered")
+
+
+class ParseStateTestCase(unittest.TestCase):
+ def test_init(self):
+ state = policy.ParseState()
+
+ self.assertEqual(state.tokens, [])
+ self.assertEqual(state.values, [])
+
+ @mock.patch.object(policy.ParseState, 'reducers', [(['tok1'], 'meth')])
+ @mock.patch.object(policy.ParseState, 'meth', create=True)
+ def test_reduce_none(self, mock_meth):
+ state = policy.ParseState()
+ state.tokens = ['tok2']
+ state.values = ['val2']
+
+ state.reduce()
+
+ self.assertEqual(state.tokens, ['tok2'])
+ self.assertEqual(state.values, ['val2'])
+ self.assertFalse(mock_meth.called)
+
+ @mock.patch.object(policy.ParseState, 'reducers',
+ [(['tok1', 'tok2'], 'meth')])
+ @mock.patch.object(policy.ParseState, 'meth', create=True)
+ def test_reduce_short(self, mock_meth):
+ state = policy.ParseState()
+ state.tokens = ['tok1']
+ state.values = ['val1']
+
+ state.reduce()
+
+ self.assertEqual(state.tokens, ['tok1'])
+ self.assertEqual(state.values, ['val1'])
+ self.assertFalse(mock_meth.called)
+
+ @mock.patch.object(policy.ParseState, 'reducers',
+ [(['tok1', 'tok2'], 'meth')])
+ @mock.patch.object(policy.ParseState, 'meth', create=True,
+ return_value=[('tok3', 'val3')])
+ def test_reduce_one(self, mock_meth):
+ state = policy.ParseState()
+ state.tokens = ['tok1', 'tok2']
+ state.values = ['val1', 'val2']
+
+ state.reduce()
+
+ self.assertEqual(state.tokens, ['tok3'])
+ self.assertEqual(state.values, ['val3'])
+ mock_meth.assert_called_once_with('val1', 'val2')
+
+ @mock.patch.object(policy.ParseState, 'reducers', [
+ (['tok1', 'tok4'], 'meth2'),
+ (['tok2', 'tok3'], 'meth1'),
+ ])
+ @mock.patch.object(policy.ParseState, 'meth1', create=True,
+ return_value=[('tok4', 'val4')])
+ @mock.patch.object(policy.ParseState, 'meth2', create=True,
+ return_value=[('tok5', 'val5')])
+ def test_reduce_two(self, mock_meth2, mock_meth1):
+ state = policy.ParseState()
+ state.tokens = ['tok1', 'tok2', 'tok3']
+ state.values = ['val1', 'val2', 'val3']
+
+ state.reduce()
+
+ self.assertEqual(state.tokens, ['tok5'])
+ self.assertEqual(state.values, ['val5'])
+ mock_meth1.assert_called_once_with('val2', 'val3')
+ mock_meth2.assert_called_once_with('val1', 'val4')
+
+ @mock.patch.object(policy.ParseState, 'reducers',
+ [(['tok1', 'tok2'], 'meth')])
+ @mock.patch.object(policy.ParseState, 'meth', create=True,
+ return_value=[('tok3', 'val3'), ('tok4', 'val4')])
+ def test_reduce_multi(self, mock_meth):
+ state = policy.ParseState()
+ state.tokens = ['tok1', 'tok2']
+ state.values = ['val1', 'val2']
+
+ state.reduce()
+
+ self.assertEqual(state.tokens, ['tok3', 'tok4'])
+ self.assertEqual(state.values, ['val3', 'val4'])
+ mock_meth.assert_called_once_with('val1', 'val2')
+
+ def test_shift(self):
+ state = policy.ParseState()
+
+ with mock.patch.object(policy.ParseState, 'reduce') as mock_reduce:
+ state.shift('token', 'value')
+
+ self.assertEqual(state.tokens, ['token'])
+ self.assertEqual(state.values, ['value'])
+ mock_reduce.assert_called_once_with()
+
+ def test_result_empty(self):
+ state = policy.ParseState()
+
+ self.assertRaises(ValueError, lambda: state.result)
+
+ def test_result_unreduced(self):
+ state = policy.ParseState()
+ state.tokens = ['tok1', 'tok2']
+ state.values = ['val1', 'val2']
+
+ self.assertRaises(ValueError, lambda: state.result)
+
+ def test_result(self):
+ state = policy.ParseState()
+ state.tokens = ['token']
+ state.values = ['value']
+
+ self.assertEqual(state.result, 'value')
+
+ def test_wrap_check(self):
+ state = policy.ParseState()
+
+ result = state._wrap_check('(', 'the_check', ')')
+
+ self.assertEqual(result, [('check', 'the_check')])
+
+ @mock.patch.object(policy, 'AndCheck', lambda x: x)
+ def test_make_and_expr(self):
+ state = policy.ParseState()
+
+ result = state._make_and_expr('check1', 'and', 'check2')
+
+ self.assertEqual(result, [('and_expr', ['check1', 'check2'])])
+
+ def test_extend_and_expr(self):
+ state = policy.ParseState()
+ mock_expr = mock.Mock()
+ mock_expr.add_check.return_value = 'newcheck'
+
+ result = state._extend_and_expr(mock_expr, 'and', 'check')
+
+ self.assertEqual(result, [('and_expr', 'newcheck')])
+ mock_expr.add_check.assert_called_once_with('check')
+
+ @mock.patch.object(policy, 'OrCheck', lambda x: x)
+ def test_make_or_expr(self):
+ state = policy.ParseState()
+
+ result = state._make_or_expr('check1', 'or', 'check2')
+
+ self.assertEqual(result, [('or_expr', ['check1', 'check2'])])
+
+ def test_extend_or_expr(self):
+ state = policy.ParseState()
+ mock_expr = mock.Mock()
+ mock_expr.add_check.return_value = 'newcheck'
+
+ result = state._extend_or_expr(mock_expr, 'or', 'check')
+
+ self.assertEqual(result, [('or_expr', 'newcheck')])
+ mock_expr.add_check.assert_called_once_with('check')
+
+
+class ParseTextRuleTestCase(unittest.TestCase):
+ def test_empty(self):
+ result = policy._parse_text_rule('')
+
+ self.assertTrue(isinstance(result, policy.TrueCheck))
+
+ @mock.patch.object(policy, '_parse_tokenize',
+ return_value=[('tok1', 'val1'), ('tok2', 'val2')])
+ @mock.patch.object(policy.ParseState, 'shift')
+ @mock.patch.object(policy.ParseState, 'result', 'result')
+ def test_shifts(self, mock_shift, mock_parse_tokenize):
+ result = policy._parse_text_rule('test rule')
+
+ self.assertEqual(result, 'result')
+ mock_parse_tokenize.assert_called_once_with('test rule')
+ mock_shift.assert_has_calls(
+ [mock.call('tok1', 'val1'), mock.call('tok2', 'val2')])
+
+ @mock.patch.object(policy, '_parse_tokenize', return_value=[])
+ def test_fail(self, mock_parse_tokenize):
+ result = policy._parse_text_rule('test rule')
+
+ self.assertTrue(isinstance(result, policy.FalseCheck))
+ mock_parse_tokenize.assert_called_once_with('test rule')
+
+
+class ParseRuleTestCase(unittest.TestCase):
+ @mock.patch.object(policy, '_parse_text_rule', return_value='text rule')
+ @mock.patch.object(policy, '_parse_list_rule', return_value='list rule')
+ def test_parse_rule_string(self, mock_parse_list_rule,
+ mock_parse_text_rule):
+ result = policy.parse_rule("a string")
+
+ self.assertEqual(result, 'text rule')
+ self.assertFalse(mock_parse_list_rule.called)
+ mock_parse_text_rule.assert_called_once_with('a string')
+
+ @mock.patch.object(policy, '_parse_text_rule', return_value='text rule')
+ @mock.patch.object(policy, '_parse_list_rule', return_value='list rule')
+ def test_parse_rule_list(self, mock_parse_list_rule, mock_parse_text_rule):
+ result = policy.parse_rule([['a'], ['list']])
+
+ self.assertEqual(result, 'list rule')
+ self.assertFalse(mock_parse_text_rule.called)
+ mock_parse_list_rule.assert_called_once_with([['a'], ['list']])
+
+
class CheckRegisterTestCase(unittest.TestCase):
@mock.patch.object(policy, '_checks', {})
def test_register_check(self):