summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.testr.conf9
-rw-r--r--HACKING.rst10
-rw-r--r--MAINTAINERS6
-rw-r--r--TESTING.rst88
-rwxr-xr-xopenstack/common/config/generator.py1
-rw-r--r--openstack/common/exception.py4
-rw-r--r--openstack/common/excutils.py31
-rw-r--r--openstack/common/network_utils.py17
-rw-r--r--openstack/common/plugin/__init__.py14
-rw-r--r--openstack/common/plugin/callbackplugin.py93
-rw-r--r--openstack/common/plugin/plugin.py86
-rw-r--r--openstack/common/plugin/pluginmanager.py78
-rw-r--r--openstack/common/rootwrap/filters.py113
-rw-r--r--openstack/common/rootwrap/wrapper.py14
-rw-r--r--openstack/common/rpc/amqp.py13
-rw-r--r--openstack/common/rpc/impl_kombu.py53
-rw-r--r--openstack/common/rpc/impl_qpid.py30
-rw-r--r--requirements.txt4
-rw-r--r--test-requirements.txt8
-rw-r--r--tests/unit/db/sqlalchemy/test_sqlalchemy.py57
-rw-r--r--tests/unit/db/test_api.py12
-rw-r--r--tests/unit/plugin/__init__.py14
-rw-r--r--tests/unit/plugin/test_callback_plugin.py92
-rw-r--r--tests/unit/rpc/test_common.py2
-rw-r--r--tests/unit/rpc/test_kombu.py171
-rw-r--r--tests/unit/rpc/test_qpid.py31
-rw-r--r--tests/unit/rpc/test_zmq.py1
-rw-r--r--tests/unit/test_excutils.py111
-rw-r--r--tests/unit/test_network_utils.py17
-rw-r--r--tests/unit/test_plugin.py118
-rw-r--r--tests/unit/test_rootwrap.py127
-rw-r--r--tests/utils.py11
-rw-r--r--tox.ini18
33 files changed, 859 insertions, 595 deletions
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 0000000..d54ffb8
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,9 @@
+[DEFAULT]
+TESTS_PATH=./test
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
+ ${PYTHON:-python} -m subunit.run discover -t ./ $TESTS_PATH $LISTOPT $IDOPTION
+
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/HACKING.rst b/HACKING.rst
index 3cea316..846c1b1 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -46,6 +46,16 @@ General
pass
+- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::
+
+ except Exception as e:
+ ...
+ raise e # BAD
+
+ except Exception:
+ ...
+ raise # OKAY
+
TODO vs FIXME
-------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 128cb22..0500c53 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -199,12 +199,6 @@ M: Michael Still <mikal@stillhq.com>
S: Maintained
F: periodic_task.py
-== plugins ==
-
-M:
-S: Orphan
-F: plugin/
-
== policy ==
M:
diff --git a/TESTING.rst b/TESTING.rst
new file mode 100644
index 0000000..4191b1b
--- /dev/null
+++ b/TESTING.rst
@@ -0,0 +1,88 @@
+===========================
+Testing Your OpenStack Code
+===========================
+------------
+A Quickstart
+------------
+
+This is designed to be enough information for you to run your first tests.
+Detailed information on testing can be found here: https://wiki.openstack.org/wiki/Testing
+
+*Install pip*::
+
+ [apt-get | yum] install python-pip
+More information on pip here: http://www.pip-installer.org/en/latest/
+
+*Use pip to install tox*::
+
+ pip install tox
+
+Run The Tests
+-------------
+
+*Navigate to the project's root directory and execute*::
+
+ tox
+Note: completing this command may take a long time (depends on system resources)
+also, you might not see any output until tox is complete.
+
+Information about tox can be found here: http://testrun.org/tox/latest/
+
+
+Run The Tests in One Environment
+--------------------------------
+
+Tox will run your entire test suite in the environments specified in the project tox.ini::
+
+ [tox]
+
+ envlist = <list of available environments>
+
+To run the test suite in just one of the environments in envlist execute::
+
+ tox -e <env>
+so for example, *run the test suite in py26*::
+
+ tox -e py26
+
+Run One Test
+------------
+
+To run individual tests with tox:
+
+if testr is in tox.ini, for example::
+
+ [testenv]
+
+ includes "python setup.py testr --slowest --testr-args='{posargs}'"
+
+run individual tests with the following syntax::
+
+ tox -e <env> -- path.to.module:Class.test
+so for example, *run the cpu_limited test in Nova*::
+
+ tox -e py27 -- nova.tests.test_claims:ClaimTestCase.test_cpu_unlimited
+
+if nose is in tox.ini, for example::
+
+ [testenv]
+
+ includes "nosetests {posargs}"
+
+run individual tests with the following syntax::
+
+ tox -e <env> -- --tests path.to.module:Class.test
+so for example, *run the list test in Glance*::
+
+ tox -e py27 -- --tests glance.tests.unit.test_auth.py:TestImageRepoProxy.test_list
+
+Need More Info?
+---------------
+
+More information about testr: https://wiki.openstack.org/wiki/Testr
+
+More information about nose: https://nose.readthedocs.org/en/latest/
+
+
+More information about testing OpenStack code can be found here:
+https://wiki.openstack.org/wiki/Testing
diff --git a/openstack/common/config/generator.py b/openstack/common/config/generator.py
index 09649e7..8ebfba1 100755
--- a/openstack/common/config/generator.py
+++ b/openstack/common/config/generator.py
@@ -205,6 +205,7 @@ def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
+ opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
diff --git a/openstack/common/exception.py b/openstack/common/exception.py
index cdf40f3..f6c8463 100644
--- a/openstack/common/exception.py
+++ b/openstack/common/exception.py
@@ -122,9 +122,9 @@ class OpenstackException(Exception):
try:
self._error_string = self.message % kwargs
- except Exception as e:
+ except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
- raise e
+ raise
else:
# at least get the core message out if something happened
self._error_string = self.message
diff --git a/openstack/common/excutils.py b/openstack/common/excutils.py
index 06d6e29..d40d46c 100644
--- a/openstack/common/excutils.py
+++ b/openstack/common/excutils.py
@@ -22,6 +22,7 @@ Exception related utilities.
import contextlib
import logging
import sys
+import time
import traceback
from openstack.common.gettextutils import _
@@ -49,3 +50,33 @@ def save_and_reraise_exception():
traceback.format_exception(type_, value, tb))
raise
raise type_, value, tb
+
+
+def forever_retry_uncaught_exceptions(infunc):
+ def inner_func(*args, **kwargs):
+ last_log_time = 0
+ last_exc_message = None
+ exc_count = 0
+ while True:
+ try:
+ return infunc(*args, **kwargs)
+ except Exception as exc:
+ if exc.message == last_exc_message:
+ exc_count += 1
+ else:
+ exc_count = 1
+ # Do not log any more frequently than once a minute unless
+ # the exception message changes
+ cur_time = int(time.time())
+ if (cur_time - last_log_time > 60 or
+ exc.message != last_exc_message):
+ logging.exception(
+ _('Unexpected exception occurred %d time(s)... '
+ 'retrying.') % exc_count)
+ last_log_time = cur_time
+ last_exc_message = exc.message
+ exc_count = 0
+ # This should be a very rare event. In case it isn't, do
+ # a sleep.
+ time.sleep(1)
+ return inner_func
diff --git a/openstack/common/network_utils.py b/openstack/common/network_utils.py
index 0fbf171..dbed1ce 100644
--- a/openstack/common/network_utils.py
+++ b/openstack/common/network_utils.py
@@ -19,6 +19,8 @@
Network-related utilities and helper functions.
"""
+import urlparse
+
def parse_host_port(address, default_port=None):
"""Interpret a string as a host:port pair.
@@ -62,3 +64,18 @@ def parse_host_port(address, default_port=None):
port = default_port
return (host, None if port is None else int(port))
+
+
+def urlsplit(url, scheme='', allow_fragments=True):
+ """Parse a URL using urlparse.urlsplit(), splitting query and fragments.
+ This function papers over Python issue9374 when needed.
+
+ The parameters are the same as urlparse.urlsplit.
+ """
+ scheme, netloc, path, query, fragment = urlparse.urlsplit(
+ url, scheme, allow_fragments)
+ if allow_fragments and '#' in path:
+ path, fragment = path.split('#', 1)
+ if '?' in path:
+ path, query = path.split('?', 1)
+ return urlparse.SplitResult(scheme, netloc, path, query, fragment)
diff --git a/openstack/common/plugin/__init__.py b/openstack/common/plugin/__init__.py
deleted file mode 100644
index b706747..0000000
--- a/openstack/common/plugin/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/openstack/common/plugin/callbackplugin.py b/openstack/common/plugin/callbackplugin.py
deleted file mode 100644
index 2de7fb0..0000000
--- a/openstack/common/plugin/callbackplugin.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack.common import log as logging
-from openstack.common.plugin import plugin
-
-
-LOG = logging.getLogger(__name__)
-
-
-class _CallbackNotifier(object):
- """Manages plugin-defined notification callbacks.
-
- For each Plugin, a CallbackNotifier will be added to the
- notification driver list. Calls to notify() with appropriate
- messages will be hooked and prompt callbacks.
-
- A callback should look like this:
- def callback(context, message, user_data)
- """
-
- def __init__(self):
- self._callback_dict = {}
-
- def _add_callback(self, event_type, callback, user_data):
- callback_list = self._callback_dict.get(event_type, [])
- callback_list.append({'function': callback,
- 'user_data': user_data})
- self._callback_dict[event_type] = callback_list
-
- def _remove_callback(self, callback):
- for callback_list in self._callback_dict.values():
- for entry in callback_list:
- if entry['function'] == callback:
- callback_list.remove(entry)
-
- def notify(self, context, message):
- if message.get('event_type') not in self._callback_dict:
- return
-
- for entry in self._callback_dict[message.get('event_type')]:
- entry['function'](context, message, entry.get('user_data'))
-
- def callbacks(self):
- return self._callback_dict
-
-
-class CallbackPlugin(plugin.Plugin):
- """Plugin with a simple callback interface.
-
- This class is provided as a convenience for producing a simple
- plugin that only watches a couple of events. For example, here's
- a subclass which prints a line the first time an instance is created.
-
- class HookInstanceCreation(CallbackPlugin):
-
- def __init__(self, _service_name):
- super(HookInstanceCreation, self).__init__()
- self._add_callback(self.magic, 'compute.instance.create.start')
-
- def magic(self):
- print "An instance was created!"
- self._remove_callback(self, self.magic)
- """
-
- def __init__(self, service_name):
- super(CallbackPlugin, self).__init__(service_name)
- self._callback_notifier = _CallbackNotifier()
- self._add_notifier(self._callback_notifier)
-
- def _add_callback(self, callback, event_type, user_data=None):
- """Add callback for a given event notification.
-
- Subclasses can call this as an alternative to implementing
- a fullblown notify notifier.
- """
- self._callback_notifier._add_callback(event_type, callback, user_data)
-
- def _remove_callback(self, callback):
- """Remove all notification callbacks to specified function."""
- self._callback_notifier._remove_callback(callback)
diff --git a/openstack/common/plugin/plugin.py b/openstack/common/plugin/plugin.py
deleted file mode 100644
index d2be0b3..0000000
--- a/openstack/common/plugin/plugin.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Plugin(object):
- """Defines an interface for adding functionality to an OpenStack service.
-
- A plugin interacts with a service via the following pathways:
-
- - An optional set of notifiers, managed by calling add_notifier()
- or by overriding _notifiers()
-
- - A set of api extensions, managed via add_api_extension_descriptor()
-
- - Direct calls to service functions.
-
- - Whatever else the plugin wants to do on its own.
-
- This is the reference implementation.
- """
-
- # The following functions are provided as convenience methods
- # for subclasses. Subclasses should call them but probably not
- # override them.
- def _add_api_extension_descriptor(self, descriptor):
- """Subclass convenience method which adds an extension descriptor.
-
- Subclass constructors should call this method when
- extending a project's REST interface.
-
- Note that once the api service has loaded, the
- API extension set is more-or-less fixed, so
- this should mainly be called by subclass constructors.
- """
- self._api_extension_descriptors.append(descriptor)
-
- def _add_notifier(self, notifier):
- """Subclass convenience method which adds a notifier.
-
- Notifier objects should implement the function notify(message).
- Each notifier receives a notify() call whenever an openstack
- service broadcasts a notification.
-
- Best to call this during construction. Notifiers are enumerated
- and registered by the pluginmanager at plugin load time.
- """
- self._notifiers.append(notifier)
-
- # The following methods are called by OpenStack services to query
- # plugin features. Subclasses should probably not override these.
- def _notifiers(self):
- """Returns list of notifiers for this plugin."""
- return self._notifiers
-
- notifiers = property(_notifiers)
-
- def _api_extension_descriptors(self):
- """Return a list of API extension descriptors.
-
- Called by a project API during its load sequence.
- """
- return self._api_extension_descriptors
-
- api_extension_descriptors = property(_api_extension_descriptors)
-
- # Most plugins will override this:
- def __init__(self, service_name):
- self._notifiers = []
- self._api_extension_descriptors = []
diff --git a/openstack/common/plugin/pluginmanager.py b/openstack/common/plugin/pluginmanager.py
deleted file mode 100644
index 3962447..0000000
--- a/openstack/common/plugin/pluginmanager.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pkg_resources
-
-from oslo.config import cfg
-
-from openstack.common.gettextutils import _
-from openstack.common import log as logging
-from openstack.common.notifier import api as notifier_api
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-class PluginManager(object):
- """Manages plugin entrypoints and loading.
-
- For a service to implement this plugin interface for callback purposes:
-
- - Make use of the openstack-common notifier system
- - Instantiate this manager in each process (passing in
- project and service name)
-
- For an API service to extend itself using this plugin interface,
- it needs to query the plugin_extension_factory provided by
- the already-instantiated PluginManager.
- """
-
- def __init__(self, project_name, service_name):
- """Construct Plugin Manager; load and initialize plugins.
-
- project_name (e.g. 'nova' or 'glance') is used
- to construct the entry point that identifies plugins.
-
- The service_name (e.g. 'compute') is passed on to
- each plugin as a raw string for it to do what it will.
- """
- self._project_name = project_name
- self._service_name = service_name
- self.plugins = []
-
- def load_plugins(self):
- self.plugins = []
-
- for entrypoint in pkg_resources.iter_entry_points('%s.plugin' %
- self._project_name):
- try:
- pluginclass = entrypoint.load()
- plugin = pluginclass(self._service_name)
- self.plugins.append(plugin)
- except Exception as exc:
- LOG.error(_("Failed to load plugin %(plug)s: %(exc)s") %
- {'plug': entrypoint, 'exc': exc})
-
- # Register individual notifiers.
- for plugin in self.plugins:
- for notifier in plugin.notifiers:
- notifier_api.add_driver(notifier)
-
- def plugin_extension_factory(self, ext_mgr):
- for plugin in self.plugins:
- descriptors = plugin.api_extension_descriptors
- for descriptor in descriptors:
- ext_mgr.load_extension(descriptor)
diff --git a/openstack/common/rootwrap/filters.py b/openstack/common/rootwrap/filters.py
index 0cc55ce..dfec412 100644
--- a/openstack/common/rootwrap/filters.py
+++ b/openstack/common/rootwrap/filters.py
@@ -235,3 +235,116 @@ class ReadFileFilter(CommandFilter):
if len(userargs) != 2:
return False
return True
+
+
+class IpFilter(CommandFilter):
+ """Specific filter for the ip utility to that does not match exec."""
+
+ def match(self, userargs):
+ if userargs[0] == 'ip':
+ if userargs[1] == 'netns':
+ return (userargs[2] in ('list', 'add', 'delete'))
+ else:
+ return True
+
+
+class EnvFilter(CommandFilter):
+ """Specific filter for the env utility.
+
+ Behaves like CommandFilter, except that it handles
+ leading env A=B.. strings appropriately.
+ """
+
+ def _extract_env(self, arglist):
+ """Extract all leading NAME=VALUE arguments from arglist."""
+
+ envs = set()
+ for arg in arglist:
+ if '=' not in arg:
+ break
+ envs.add(arg.partition('=')[0])
+ return envs
+
+ def __init__(self, exec_path, run_as, *args):
+ super(EnvFilter, self).__init__(exec_path, run_as, *args)
+
+ env_list = self._extract_env(self.args)
+ # Set exec_path to X when args are in the form of
+ # env A=a B=b C=c X Y Z
+ if "env" in exec_path and len(env_list) < len(self.args):
+ self.exec_path = self.args[len(env_list)]
+
+ def match(self, userargs):
+ # ignore leading 'env'
+ if userargs[0] == 'env':
+ userargs.pop(0)
+
+ # require one additional argument after configured ones
+ if len(userargs) < len(self.args):
+ return False
+
+ # extract all env args
+ user_envs = self._extract_env(userargs)
+ filter_envs = self._extract_env(self.args)
+ user_command = userargs[len(user_envs):len(user_envs) + 1]
+
+ # match first non-env argument with CommandFilter
+ return (super(EnvFilter, self).match(user_command)
+ and len(filter_envs) and user_envs == filter_envs)
+
+ def exec_args(self, userargs):
+ args = userargs[:]
+
+ # ignore leading 'env'
+ if args[0] == 'env':
+ args.pop(0)
+
+ # Throw away leading NAME=VALUE arguments
+ while args and '=' in args[0]:
+ args.pop(0)
+
+ return args
+
+ def get_command(self, userargs, exec_dirs=[]):
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
+ return [to_exec] + self.exec_args(userargs)[1:]
+
+ def get_environment(self, userargs):
+ env = os.environ.copy()
+
+ # ignore leading 'env'
+ if userargs[0] == 'env':
+ userargs.pop(0)
+
+ # Handle leading NAME=VALUE pairs
+ for a in userargs:
+ env_name, equals, env_value = a.partition('=')
+ if not equals:
+ break
+ if env_name and env_value:
+ env[env_name] = env_value
+
+ return env
+
+
+class ChainingFilter(CommandFilter):
+ def exec_args(self, userargs):
+ return []
+
+
+class IpNetnsExecFilter(ChainingFilter):
+ """Specific filter for the ip utility to that does match exec."""
+
+ def match(self, userargs):
+ # Network namespaces currently require root
+ # require <ns> argument
+ if self.run_as != "root" or len(userargs) < 4:
+ return False
+
+ return (userargs[:3] == ['ip', 'netns', 'exec'])
+
+ def exec_args(self, userargs):
+ args = userargs[4:]
+ if args:
+ args[0] = os.path.basename(args[0])
+ return args
diff --git a/openstack/common/rootwrap/wrapper.py b/openstack/common/rootwrap/wrapper.py
index 5390c1b..df1a9f4 100644
--- a/openstack/common/rootwrap/wrapper.py
+++ b/openstack/common/rootwrap/wrapper.py
@@ -131,6 +131,20 @@ def match_filter(filter_list, userargs, exec_dirs=[]):
for f in filter_list:
if f.match(userargs):
+ if isinstance(f, filters.ChainingFilter):
+ # This command calls exec verify that remaining args
+ # matches another filter.
+ def non_chain_filter(fltr):
+ return (fltr.run_as == f.run_as
+ and not isinstance(fltr, filters.ChainingFilter))
+
+ leaf_filters = [fltr for fltr in filter_list
+ if non_chain_filter(fltr)]
+ args = f.exec_args(userargs)
+ if (not args or not match_filter(leaf_filters,
+ args, exec_dirs=exec_dirs)):
+ continue
+
# Try other filters if executable is absent
if not f.get_exec(exec_dirs=exec_dirs):
if not first_not_executable_filter:
diff --git a/openstack/common/rpc/amqp.py b/openstack/common/rpc/amqp.py
index 22e01d7..c3e4e26 100644
--- a/openstack/common/rpc/amqp.py
+++ b/openstack/common/rpc/amqp.py
@@ -151,11 +151,13 @@ class ConnectionContext(rpc_common.Connection):
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
- def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
+ def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
+ ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
- exchange_name)
+ exchange_name,
+ ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
@@ -219,12 +221,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
- try:
- msg = {'result': reply, 'failure': failure}
- except TypeError:
- msg = {'result': dict((k, repr(v))
- for k, v in reply.__dict__.iteritems()),
- 'failure': failure}
+ msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
diff --git a/openstack/common/rpc/impl_kombu.py b/openstack/common/rpc/impl_kombu.py
index c062d9a..8fb3504 100644
--- a/openstack/common/rpc/impl_kombu.py
+++ b/openstack/common/rpc/impl_kombu.py
@@ -30,6 +30,7 @@ import kombu.entity
import kombu.messaging
from oslo.config import cfg
+from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common import network_utils
from openstack.common.rpc import amqp as rpc_amqp
@@ -129,6 +130,7 @@ class ConsumerBase(object):
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
+ self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel)
def reconnect(self, channel):
@@ -138,6 +140,36 @@ class ConsumerBase(object):
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
+ def _callback_handler(self, message, callback):
+ """Call callback with deserialized message.
+
+ Messages that are processed without exception are ack'ed.
+
+ If the message processing generates an exception, it will be
+ ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
+ Rejection is better than waiting for the message to timeout.
+ Rejected messages are immediately requeued.
+ """
+
+ ack_msg = False
+ try:
+ msg = rpc_common.deserialize_msg(message.payload)
+ callback(msg)
+ ack_msg = True
+ except Exception:
+ if self.ack_on_error:
+ ack_msg = True
+ LOG.exception(_("Failed to process message"
+ " ... skipping it."))
+ else:
+ LOG.exception(_("Failed to process message"
+ " ... will requeue."))
+ finally:
+ if ack_msg:
+ message.ack()
+ else:
+ message.reject()
+
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
@@ -150,8 +182,6 @@ class ConsumerBase(object):
If kwargs['nowait'] is True, then this call will block until
a message is read.
- Messages will automatically be acked if the callback doesn't
- raise an exception
"""
options = {'consumer_tag': self.tag}
@@ -162,13 +192,7 @@ class ConsumerBase(object):
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
- try:
- msg = rpc_common.deserialize_msg(message.payload)
- callback(msg)
- except Exception:
- LOG.exception(_("Failed to process message... skipping it."))
- finally:
- message.ack()
+ self._callback_handler(message, callback)
self.queue.consume(*args, callback=_callback, **options)
@@ -635,8 +659,8 @@ class Connection(object):
def _consume():
if info['do_consume']:
- queues_head = self.consumers[:-1]
- queues_tail = self.consumers[-1]
+ queues_head = self.consumers[:-1] # not fanout.
+ queues_tail = self.consumers[-1] # fanout
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
@@ -685,11 +709,12 @@ class Connection(object):
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
- exchange_name=None):
+ exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
+ ack_on_error=ack_on_error,
),
topic, callback)
@@ -724,6 +749,7 @@ class Connection(object):
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
+ @excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
@@ -754,7 +780,7 @@ class Connection(object):
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
- exchange_name=None):
+ exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
@@ -775,6 +801,7 @@ class Connection(object):
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
+ ack_on_error=ack_on_error,
)
diff --git a/openstack/common/rpc/impl_qpid.py b/openstack/common/rpc/impl_qpid.py
index 7352517..c988ae8 100644
--- a/openstack/common/rpc/impl_qpid.py
+++ b/openstack/common/rpc/impl_qpid.py
@@ -24,6 +24,7 @@ import eventlet
import greenlet
from oslo.config import cfg
+from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common import importutils
from openstack.common import jsonutils
@@ -118,10 +119,17 @@ class ConsumerBase(object):
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
- self.reconnect(session)
+ self.connect(session)
+
+ def connect(self, session):
+ """Declare the reciever on connect."""
+ self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
+ self._declare_receiver(session)
+
+ def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
@@ -152,11 +160,15 @@ class ConsumerBase(object):
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
+ # TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
+ def get_node_name(self):
+ return self.address.split(';')[0]
+
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
@@ -206,6 +218,7 @@ class FanoutConsumer(ConsumerBase):
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
+ self.conf = conf
super(FanoutConsumer, self).__init__(
session, callback,
@@ -214,6 +227,18 @@ class FanoutConsumer(ConsumerBase):
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True})
+ def reconnect(self, session):
+ topic = self.get_node_name()
+ params = {
+ 'session': session,
+ 'topic': topic,
+ 'callback': self.callback,
+ }
+
+ self.__init__(conf=self.conf, **params)
+
+ super(FanoutConsumer, self).reconnect(session)
+
class Publisher(object):
"""Base Publisher class."""
@@ -575,6 +600,7 @@ class Connection(object):
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
+ @excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
@@ -615,7 +641,7 @@ class Connection(object):
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
- exchange_name=None):
+ exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
diff --git a/requirements.txt b/requirements.txt
index d7a33c6..ec6dbdd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,8 +7,8 @@ greenlet>=0.3.2
lxml
routes==1.12.3
iso8601>=0.1.4
-anyjson==0.2.4
-kombu==1.0.4
+anyjson>=0.3.3
+kombu>2.4.7
argparse
stevedore
SQLAlchemy>=0.7.8,<=0.7.9
diff --git a/test-requirements.txt b/test-requirements.txt
index a19b4af..7ffabfe 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,15 +1,11 @@
coverage
+discover
fixtures>=0.3.12
flake8==2.0
hacking>=0.5.3,<0.6
mock
mox==0.5.3
mysql-python
-nose
-nose-exclude
-nosexcover
-openstack.nose_plugin
-nosehtmloutput
pep8==1.4.5
pyflakes==0.7.2
pylint
@@ -17,5 +13,5 @@ pyzmq==2.2.0.1
redis
setuptools-git>=0.4
sphinx
+testrepository>=0.0.13
testtools>=0.9.22
-webtest
diff --git a/tests/unit/db/sqlalchemy/test_sqlalchemy.py b/tests/unit/db/sqlalchemy/test_sqlalchemy.py
index 46a0cb2..48d6cf7 100644
--- a/tests/unit/db/sqlalchemy/test_sqlalchemy.py
+++ b/tests/unit/db/sqlalchemy/test_sqlalchemy.py
@@ -52,15 +52,15 @@ sql_max_overflow=50
sql_connection_debug=60
sql_connection_trace=True
""")])
- test_utils.CONF(['--config-file', paths[0]])
- self.assertEquals(test_utils.CONF.database.connection, 'x://y.z')
- self.assertEquals(test_utils.CONF.database.min_pool_size, 10)
- self.assertEquals(test_utils.CONF.database.max_pool_size, 20)
- self.assertEquals(test_utils.CONF.database.max_retries, 30)
- self.assertEquals(test_utils.CONF.database.retry_interval, 40)
- self.assertEquals(test_utils.CONF.database.max_overflow, 50)
- self.assertEquals(test_utils.CONF.database.connection_debug, 60)
- self.assertEquals(test_utils.CONF.database.connection_trace, True)
+ self.conf(['--config-file', paths[0]])
+ self.assertEquals(self.conf.database.connection, 'x://y.z')
+ self.assertEquals(self.conf.database.min_pool_size, 10)
+ self.assertEquals(self.conf.database.max_pool_size, 20)
+ self.assertEquals(self.conf.database.max_retries, 30)
+ self.assertEquals(self.conf.database.retry_interval, 40)
+ self.assertEquals(self.conf.database.max_overflow, 50)
+ self.assertEquals(self.conf.database.connection_debug, 60)
+ self.assertEquals(self.conf.database.connection_trace, True)
def test_session_parameters(self):
paths = self.create_tempfiles([('test', """[database]
@@ -74,16 +74,16 @@ connection_debug=60
connection_trace=True
pool_timeout=7
""")])
- test_utils.CONF(['--config-file', paths[0]])
- self.assertEquals(test_utils.CONF.database.connection, 'x://y.z')
- self.assertEquals(test_utils.CONF.database.min_pool_size, 10)
- self.assertEquals(test_utils.CONF.database.max_pool_size, 20)
- self.assertEquals(test_utils.CONF.database.max_retries, 30)
- self.assertEquals(test_utils.CONF.database.retry_interval, 40)
- self.assertEquals(test_utils.CONF.database.max_overflow, 50)
- self.assertEquals(test_utils.CONF.database.connection_debug, 60)
- self.assertEquals(test_utils.CONF.database.connection_trace, True)
- self.assertEquals(test_utils.CONF.database.pool_timeout, 7)
+ self.conf(['--config-file', paths[0]])
+ self.assertEquals(self.conf.database.connection, 'x://y.z')
+ self.assertEquals(self.conf.database.min_pool_size, 10)
+ self.assertEquals(self.conf.database.max_pool_size, 20)
+ self.assertEquals(self.conf.database.max_retries, 30)
+ self.assertEquals(self.conf.database.retry_interval, 40)
+ self.assertEquals(self.conf.database.max_overflow, 50)
+ self.assertEquals(self.conf.database.connection_debug, 60)
+ self.assertEquals(self.conf.database.connection_trace, True)
+ self.assertEquals(self.conf.database.pool_timeout, 7)
def test_dbapi_database_deprecated_parameters(self):
paths = self.create_tempfiles([('test',
@@ -97,17 +97,16 @@ pool_timeout=7
'sqlalchemy_max_overflow=101\n'
'sqlalchemy_pool_timeout=5\n'
)])
-
- test_utils.CONF(['--config-file', paths[0]])
- self.assertEquals(test_utils.CONF.database.connection,
+ self.conf(['--config-file', paths[0]])
+ self.assertEquals(self.conf.database.connection,
'fake_connection')
- self.assertEquals(test_utils.CONF.database.idle_timeout, 100)
- self.assertEquals(test_utils.CONF.database.min_pool_size, 99)
- self.assertEquals(test_utils.CONF.database.max_pool_size, 199)
- self.assertEquals(test_utils.CONF.database.max_retries, 22)
- self.assertEquals(test_utils.CONF.database.retry_interval, 17)
- self.assertEquals(test_utils.CONF.database.max_overflow, 101)
- self.assertEquals(test_utils.CONF.database.pool_timeout, 5)
+ self.assertEquals(self.conf.database.idle_timeout, 100)
+ self.assertEquals(self.conf.database.min_pool_size, 99)
+ self.assertEquals(self.conf.database.max_pool_size, 199)
+ self.assertEquals(self.conf.database.max_retries, 22)
+ self.assertEquals(self.conf.database.retry_interval, 17)
+ self.assertEquals(self.conf.database.max_overflow, 101)
+ self.assertEquals(self.conf.database.pool_timeout, 5)
class SessionErrorWrapperTestCase(test_base.DbTestCase):
diff --git a/tests/unit/db/test_api.py b/tests/unit/db/test_api.py
index f6e0d4c..2a8db3b 100644
--- a/tests/unit/db/test_api.py
+++ b/tests/unit/db/test_api.py
@@ -40,9 +40,9 @@ class DBAPITestCase(test_utils.BaseTestCase):
'dbapi_use_tpool=True\n'
)])
- test_utils.CONF(['--config-file', paths[0]])
- self.assertEquals(test_utils.CONF.database.backend, 'test_123')
- self.assertEquals(test_utils.CONF.database.use_tpool, True)
+ self.conf(['--config-file', paths[0]])
+ self.assertEquals(self.conf.database.backend, 'test_123')
+ self.assertEquals(self.conf.database.use_tpool, True)
def test_dbapi_parameters(self):
paths = self.create_tempfiles([('test',
@@ -51,9 +51,9 @@ class DBAPITestCase(test_utils.BaseTestCase):
'use_tpool=True\n'
)])
- test_utils.CONF(['--config-file', paths[0]])
- self.assertEquals(test_utils.CONF.database.backend, 'test_123')
- self.assertEquals(test_utils.CONF.database.use_tpool, True)
+ self.conf(['--config-file', paths[0]])
+ self.assertEquals(self.conf.database.backend, 'test_123')
+ self.assertEquals(self.conf.database.use_tpool, True)
def test_dbapi_api_class_method_and_tpool_false(self):
backend_mapping = {'test_known': 'tests.unit.db.test_api'}
diff --git a/tests/unit/plugin/__init__.py b/tests/unit/plugin/__init__.py
deleted file mode 100644
index b706747..0000000
--- a/tests/unit/plugin/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/tests/unit/plugin/test_callback_plugin.py b/tests/unit/plugin/test_callback_plugin.py
deleted file mode 100644
index 3f3fd63..0000000
--- a/tests/unit/plugin/test_callback_plugin.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pkg_resources
-
-from openstack.common.notifier import api as notifier_api
-from openstack.common.plugin import callbackplugin
-from openstack.common.plugin import pluginmanager
-from tests import utils as test_utils
-
-userdatastring = "magic user data string"
-
-
-class TestCBP(callbackplugin.CallbackPlugin):
-
- def callback1(self, context, message, userdata):
- self.callback1count += 1
-
- def callback2(self, context, message, userdata):
- self.callback2count += 1
-
- def callback3(self, context, message, userdata):
- self.callback3count += 1
- self.userdata = userdata
-
- def __init__(self, service_name):
- super(TestCBP, self).__init__(service_name)
- self.callback1count = 0
- self.callback2count = 0
- self.callback3count = 0
-
- self._add_callback(self.callback1, 'type1', None)
- self._add_callback(self.callback2, 'type1', None)
- self._add_callback(self.callback3, 'type2', 'magic user data string')
-
-
-class CallbackTestCase(test_utils.BaseTestCase):
- """Tests for the callback plugin convenience class."""
-
- def test_callback_plugin_subclass(self):
-
- class MockEntrypoint(pkg_resources.EntryPoint):
- def load(self):
- return TestCBP
-
- def mock_iter_entry_points(_t):
- return [MockEntrypoint("fake", "fake", ["fake"])]
-
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- mock_iter_entry_points)
-
- plugmgr = pluginmanager.PluginManager("testproject", "testservice")
- plugmgr.load_plugins()
- self.assertEqual(len(plugmgr.plugins), 1)
- plugin = plugmgr.plugins[0]
- self.assertEqual(len(plugin.notifiers), 1)
-
- notifier_api.notify('contextarg', 'publisher_id', 'type1',
- notifier_api.WARN, dict(a=3))
-
- self.assertEqual(plugin.callback1count, 1)
- self.assertEqual(plugin.callback2count, 1)
- self.assertEqual(plugin.callback3count, 0)
-
- notifier_api.notify('contextarg', 'publisher_id', 'type2',
- notifier_api.WARN, dict(a=3))
-
- self.assertEqual(plugin.callback1count, 1)
- self.assertEqual(plugin.callback2count, 1)
- self.assertEqual(plugin.callback3count, 1)
- self.assertEqual(plugin.userdata, userdatastring)
-
- plugin._remove_callback(plugin.callback1)
-
- notifier_api.notify('contextarg', 'publisher_id', 'type1',
- notifier_api.WARN, dict(a=3))
-
- self.assertEqual(plugin.callback1count, 1)
- self.assertEqual(plugin.callback2count, 2)
- self.assertEqual(plugin.callback3count, 1)
diff --git a/tests/unit/rpc/test_common.py b/tests/unit/rpc/test_common.py
index c2432f4..6f32005 100644
--- a/tests/unit/rpc/test_common.py
+++ b/tests/unit/rpc/test_common.py
@@ -108,7 +108,7 @@ class RpcCommonTestCase(test_utils.BaseTestCase):
'__unicode__': str_override})
new_ex_type.__module__ = '%s_Remote' % e.__class__.__module__
e.__class__ = new_ex_type
- raise e
+ raise
try:
raise_remote_exception()
diff --git a/tests/unit/rpc/test_kombu.py b/tests/unit/rpc/test_kombu.py
index 159fefb..54f8389 100644
--- a/tests/unit/rpc/test_kombu.py
+++ b/tests/unit/rpc/test_kombu.py
@@ -23,11 +23,13 @@ import eventlet
eventlet.monkey_patch()
import contextlib
+import functools
import logging
import mock
from oslo.config import cfg
import six
+import time
from openstack.common import exception
from openstack.common.rpc import amqp as rpc_amqp
@@ -69,6 +71,7 @@ class KombuStubs:
@staticmethod
def setUp(self):
if kombu:
+ self.conf = FLAGS
self.config(fake_rabbit=True)
self.config(rpc_response_timeout=5)
self.rpc = impl_kombu
@@ -77,6 +80,20 @@ class KombuStubs:
self.rpc = None
+class FakeMessage(object):
+ acked = False
+ rejected = False
+
+ def __init__(self, payload):
+ self.payload = payload
+
+ def ack(self):
+ self.acked = True
+
+ def reject(self):
+ self.rejected = True
+
+
class RpcKombuTestCase(amqp.BaseRpcAMQPTestCase):
def setUp(self):
KombuStubs.setUp(self)
@@ -112,6 +129,74 @@ class RpcKombuTestCase(amqp.BaseRpcAMQPTestCase):
self.assertEqual(self.received_message, message)
+ def test_callback_handler_ack_on_error(self):
+ """The default case will ack on error. Same as before.
+ """
+ def _callback(msg):
+ pass
+
+ conn = self.rpc.create_connection(FLAGS)
+ consumer = conn.declare_consumer(functools.partial(
+ impl_kombu.TopicConsumer,
+ name=None,
+ exchange_name=None),
+ "a_topic", _callback)
+ message = FakeMessage("some message")
+ consumer._callback_handler(message, _callback)
+ self.assertTrue(message.acked)
+ self.assertFalse(message.rejected)
+
+ def test_callback_handler_ack_on_error_exception(self):
+
+ def _callback(msg):
+ raise MyException()
+
+ conn = self.rpc.create_connection(FLAGS)
+ consumer = conn.declare_consumer(functools.partial(
+ impl_kombu.TopicConsumer,
+ name=None,
+ exchange_name=None,
+ ack_on_error=True),
+ "a_topic", _callback)
+ message = FakeMessage("some message")
+ consumer._callback_handler(message, _callback)
+ self.assertTrue(message.acked)
+ self.assertFalse(message.rejected)
+
+ def test_callback_handler_no_ack_on_error_exception(self):
+
+ def _callback(msg):
+ raise MyException()
+
+ conn = self.rpc.create_connection(FLAGS)
+ consumer = conn.declare_consumer(functools.partial(
+ impl_kombu.TopicConsumer,
+ name=None,
+ exchange_name=None,
+ ack_on_error=False),
+ "a_topic", _callback)
+ message = FakeMessage("some message")
+ consumer._callback_handler(message, _callback)
+ self.assertFalse(message.acked)
+ self.assertTrue(message.rejected)
+
+ def test_callback_handler_no_ack_on_error(self):
+
+ def _callback(msg):
+ pass
+
+ conn = self.rpc.create_connection(FLAGS)
+ consumer = conn.declare_consumer(functools.partial(
+ impl_kombu.TopicConsumer,
+ name=None,
+ exchange_name=None,
+ ack_on_error=False),
+ "a_topic", _callback)
+ message = FakeMessage("some message")
+ consumer._callback_handler(message, _callback)
+ self.assertTrue(message.acked)
+ self.assertFalse(message.rejected)
+
def test_message_ttl_on_timeout(self):
"""Test message ttl being set by request timeout. The message
should die on the vine and never arrive.
@@ -308,6 +393,22 @@ class RpcKombuTestCase(amqp.BaseRpcAMQPTestCase):
impl_kombu.cast_to_server(FLAGS, ctxt, server_params,
'fake_topic', {'msg': 'fake'})
+ def test_fanout_success(self):
+ # Overriding the method in the base class because the test
+ # seems to fail for the same reason as
+ # test_fanout_send_receive().
+ self.skipTest("kombu memory transport seems buggy with "
+ "fanout queues as this test passes when "
+ "you use rabbit (fake_rabbit=False)")
+
+ def test_cast_success_despite_missing_args(self):
+ # Overriding the method in the base class because the test
+ # seems to fail for the same reason as
+ # test_fanout_send_receive().
+ self.skipTest("kombu memory transport seems buggy with "
+ "fanout queues as this test passes when "
+ "you use rabbit (fake_rabbit=False)")
+
def test_fanout_send_receive(self):
"""Test sending to a fanout exchange and consuming from 2 queues."""
@@ -514,7 +615,7 @@ class RpcKombuTestCase(amqp.BaseRpcAMQPTestCase):
'pool.name',
)
- def test_join_consumer_pool(self):
+ def test_join_consumer_pool_default(self):
meth = 'declare_topic_consumer'
with mock.patch.object(self.rpc.Connection, meth) as p:
conn = self.rpc.create_connection(FLAGS)
@@ -529,8 +630,76 @@ class RpcKombuTestCase(amqp.BaseRpcAMQPTestCase):
queue_name='pool.name',
exchange_name='exchange.name',
topic='topic.name',
+ ack_on_error=True,
)
+ def test_join_consumer_pool_no_ack(self):
+ meth = 'declare_topic_consumer'
+ with mock.patch.object(self.rpc.Connection, meth) as p:
+ conn = self.rpc.create_connection(FLAGS)
+ conn.join_consumer_pool(
+ callback=lambda *a, **k: (a, k),
+ pool_name='pool.name',
+ topic='topic.name',
+ exchange_name='exchange.name',
+ ack_on_error=False,
+ )
+ p.assert_called_with(
+ callback=mock.ANY, # the callback wrapper
+ queue_name='pool.name',
+ exchange_name='exchange.name',
+ topic='topic.name',
+ ack_on_error=False,
+ )
+
+ # used to make unexpected exception tests run faster
+ def my_time_sleep(self, sleep_time):
+ return
+
+ def test_service_consume_thread_unexpected_exceptions(self):
+
+ def my_TopicConsumer_consume(myself, *args, **kwargs):
+ self.consume_calls += 1
+ # see if it can sustain three failures
+ if self.consume_calls < 3:
+ raise Exception('unexpected exception')
+ else:
+ self.orig_TopicConsumer_consume(myself, *args, **kwargs)
+
+ self.consume_calls = 0
+ self.orig_TopicConsumer_consume = impl_kombu.TopicConsumer.consume
+ self.stubs.Set(impl_kombu.TopicConsumer, 'consume',
+ my_TopicConsumer_consume)
+ self.stubs.Set(time, 'sleep', self.my_time_sleep)
+
+ value = 42
+ result = self.rpc.call(FLAGS, self.context, self.topic,
+ {"method": "echo",
+ "args": {"value": value}})
+ self.assertEqual(value, result)
+
+ def test_replyproxy_consume_thread_unexpected_exceptions(self):
+
+ def my_DirectConsumer_consume(myself, *args, **kwargs):
+ self.consume_calls += 1
+ # see if it can sustain three failures
+ if self.consume_calls < 3:
+ raise Exception('unexpected exception')
+ else:
+ self.orig_DirectConsumer_consume(myself, *args, **kwargs)
+
+ self.consume_calls = 1
+ self.orig_DirectConsumer_consume = impl_kombu.DirectConsumer.consume
+ self.stubs.Set(impl_kombu.DirectConsumer, 'consume',
+ my_DirectConsumer_consume)
+ self.stubs.Set(time, 'sleep', self.my_time_sleep)
+
+ value = 42
+ result = self.rpc.call(FLAGS, self.context, self.topic,
+ {"method": "echo",
+ "args": {"value": value}})
+ self.assertEqual(value, result)
+
class RpcKombuHATestCase(utils.BaseTestCase):
def setUp(self):
diff --git a/tests/unit/rpc/test_qpid.py b/tests/unit/rpc/test_qpid.py
index 0bad387..5d51a4b 100644
--- a/tests/unit/rpc/test_qpid.py
+++ b/tests/unit/rpc/test_qpid.py
@@ -26,6 +26,7 @@ eventlet.monkey_patch()
import fixtures
import mox
from oslo.config import cfg
+import time
import uuid
from openstack.common import context
@@ -218,7 +219,7 @@ class RpcQpidTestCase(utils.BaseTestCase):
)
connection.close()
- def test_topic_consumer(self):
+ def test_topic_consumer(self, consume_thread_exc=False):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_receiver = self.mox.CreateMock(self.orig_receiver)
@@ -235,6 +236,9 @@ class RpcQpidTestCase(utils.BaseTestCase):
self.mock_session.receiver(expected_address).AndReturn(
self.mock_receiver)
self.mock_receiver.capacity = 1
+ if consume_thread_exc:
+ self.mock_session.next_receiver(timeout=None).AndRaise(
+ Exception('unexpected exception'))
self.mock_connection.close()
self.mox.ReplayAll()
@@ -244,8 +248,14 @@ class RpcQpidTestCase(utils.BaseTestCase):
lambda *_x, **_y: None,
queue_name='impl.qpid.test.workers',
exchange_name='foobar')
+ if consume_thread_exc:
+ connection.consume_in_thread()
+ time.sleep(0)
connection.close()
+ def test_consume_thread_exception(self):
+ self.test_topic_consumer(consume_thread_exc=True)
+
def _test_cast(self, fanout, server_params=None):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
@@ -338,7 +348,11 @@ class RpcQpidTestCase(utils.BaseTestCase):
self._setup_to_server_tests(server_params)
self._test_cast(fanout=True, server_params=server_params)
+ def my_time_sleep(self, arg):
+ pass
+
def _test_call_mock_common(self):
+ self.stubs.Set(time, 'sleep', self.my_time_sleep)
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_sender = self.mox.CreateMock(self.orig_sender)
@@ -367,9 +381,12 @@ class RpcQpidTestCase(utils.BaseTestCase):
self.mock_session.close()
self.mock_connection.session().AndReturn(self.mock_session)
- def _test_call(self, multi):
+ def _test_call(self, multi, reply_proxy_exc):
self._test_call_mock_common()
+ if reply_proxy_exc:
+ self.mock_session.next_receiver(timeout=None).AndRaise(
+ Exception('unexpected exception'))
self.mock_session.next_receiver(timeout=None).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(qpid.messaging.Message(
@@ -393,6 +410,9 @@ class RpcQpidTestCase(utils.BaseTestCase):
"failure": False,
"ending": False}))
self.mock_session.acknowledge(mox.IgnoreArg())
+ if reply_proxy_exc:
+ self.mock_session.next_receiver(timeout=None).AndRaise(
+ Exception('unexpected exception'))
self.mock_session.next_receiver(timeout=None).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(qpid.messaging.Message(
@@ -425,7 +445,10 @@ class RpcQpidTestCase(utils.BaseTestCase):
self.uuid4 = uuid.uuid4()
def test_call(self):
- self._test_call(multi=False)
+ self._test_call(multi=False, reply_proxy_exc=False)
+
+ def test_replyproxy_consume_thread_unexpected_exceptions(self):
+ self._test_call(multi=False, reply_proxy_exc=True)
def _test_call_with_timeout(self, timeout, expect_failure):
self._test_call_mock_common()
@@ -483,7 +506,7 @@ class RpcQpidTestCase(utils.BaseTestCase):
self._test_call_with_timeout(timeout=0.1, expect_failure=True)
def test_multicall(self):
- self._test_call(multi=True)
+ self._test_call(multi=True, reply_proxy_exc=False)
def _test_publisher(self, message=True):
"""Test that messages containing long strings are correctly serialized
diff --git a/tests/unit/rpc/test_zmq.py b/tests/unit/rpc/test_zmq.py
index b0f0262..c87a040 100644
--- a/tests/unit/rpc/test_zmq.py
+++ b/tests/unit/rpc/test_zmq.py
@@ -60,6 +60,7 @@ class _RpcZmqBaseTestCase(common.BaseRpcTestCase):
self.reactor = None
self.rpc = impl_zmq
+ self.conf = FLAGS
self.config(rpc_zmq_bind_address='127.0.0.1')
self.config(rpc_zmq_host='127.0.0.1')
self.config(rpc_response_timeout=5)
diff --git a/tests/unit/test_excutils.py b/tests/unit/test_excutils.py
index 8c8137a..b8f9b96 100644
--- a/tests/unit/test_excutils.py
+++ b/tests/unit/test_excutils.py
@@ -14,6 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+import mox
+import time
+
from openstack.common import excutils
from tests import utils
@@ -47,3 +51,110 @@ class SaveAndReraiseTest(utils.BaseTestCase):
e = _e
self.assertEqual(str(e), msg)
+
+
+class ForeverRetryUncaughtExceptionsTest(utils.BaseTestCase):
+
+ @excutils.forever_retry_uncaught_exceptions
+ def exception_generator(self):
+ exc = self.exception_to_raise()
+ while exc is not None:
+ raise exc
+ exc = self.exception_to_raise()
+
+ def exception_to_raise(self):
+ return None
+
+ def my_time_sleep(self, arg):
+ pass
+
+ def exc_retrier_common_start(self):
+ self.stubs.Set(time, 'sleep', self.my_time_sleep)
+ self.mox.StubOutWithMock(logging, 'exception')
+ self.mox.StubOutWithMock(time, 'time')
+ self.mox.StubOutWithMock(self, 'exception_to_raise')
+
+ def exc_retrier_sequence(self, exc_id=None, timestamp=None,
+ exc_count=None):
+ self.exception_to_raise().AndReturn(
+ Exception('unexpected %d' % exc_id))
+ time.time().AndReturn(timestamp)
+ if exc_count != 0:
+ logging.exception(mox.In(
+ 'Unexpected exception occurred %d time(s)' % exc_count))
+
+ def exc_retrier_common_end(self):
+ self.exception_to_raise().AndReturn(None)
+ self.mox.ReplayAll()
+ self.exception_generator()
+ self.addCleanup(self.stubs.UnsetAll)
+
+ def test_exc_retrier_1exc_gives_1log(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ self.exc_retrier_common_end()
+
+ def test_exc_retrier_same_10exc_1min_gives_1log(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ # By design, the following exception don't get logged because they
+ # are within the same minute.
+ for i in range(2, 11):
+ self.exc_retrier_sequence(exc_id=1, timestamp=i, exc_count=0)
+ self.exc_retrier_common_end()
+
+ def test_exc_retrier_same_2exc_2min_gives_2logs(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ self.exc_retrier_sequence(exc_id=1, timestamp=65, exc_count=1)
+ self.exc_retrier_common_end()
+
+ def test_exc_retrier_same_10exc_2min_gives_2logs(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ self.exc_retrier_sequence(exc_id=1, timestamp=12, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=23, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=34, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=45, exc_count=0)
+ # The previous 4 exceptions are counted here
+ self.exc_retrier_sequence(exc_id=1, timestamp=106, exc_count=5)
+ # Again, the following are not logged due to being within
+ # the same minute
+ self.exc_retrier_sequence(exc_id=1, timestamp=117, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=128, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=139, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=150, exc_count=0)
+ self.exc_retrier_common_end()
+
+ def test_exc_retrier_mixed_4exc_1min_gives_2logs(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ # By design, this second 'unexpected 1' exception is not counted. This
+ # is likely a rare thing and is a sacrifice for code simplicity.
+ self.exc_retrier_sequence(exc_id=1, timestamp=10, exc_count=0)
+ self.exc_retrier_sequence(exc_id=2, timestamp=20, exc_count=1)
+ # Again, trailing exceptions within a minute are not counted.
+ self.exc_retrier_sequence(exc_id=2, timestamp=30, exc_count=0)
+ self.exc_retrier_common_end()
+
+ def test_exc_retrier_mixed_4exc_2min_gives_2logs(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ # Again, this second exception of the same type is not counted
+ # for the sake of code simplicity.
+ self.exc_retrier_sequence(exc_id=1, timestamp=10, exc_count=0)
+ # The difference between this and the previous case is the log
+ # is also triggered by more than a minute expiring.
+ self.exc_retrier_sequence(exc_id=2, timestamp=100, exc_count=1)
+ self.exc_retrier_sequence(exc_id=2, timestamp=110, exc_count=0)
+ self.exc_retrier_common_end()
+
+ def test_exc_retrier_mixed_4exc_2min_gives_3logs(self):
+ self.exc_retrier_common_start()
+ self.exc_retrier_sequence(exc_id=1, timestamp=1, exc_count=1)
+ # This time the second 'unexpected 1' exception is counted due
+ # to the same exception occurring same when the minute expires.
+ self.exc_retrier_sequence(exc_id=1, timestamp=10, exc_count=0)
+ self.exc_retrier_sequence(exc_id=1, timestamp=100, exc_count=2)
+ self.exc_retrier_sequence(exc_id=2, timestamp=110, exc_count=1)
+ self.exc_retrier_common_end()
diff --git a/tests/unit/test_network_utils.py b/tests/unit/test_network_utils.py
index 2783e70..4ac0222 100644
--- a/tests/unit/test_network_utils.py
+++ b/tests/unit/test_network_utils.py
@@ -40,3 +40,20 @@ class NetworkUtilsTest(utils.BaseTestCase):
network_utils.parse_host_port(
'2001:db8:85a3::8a2e:370:7334',
default_port=1234))
+
+ def test_urlsplit(self):
+ result = network_utils.urlsplit('rpc://myhost?someparam#somefragment')
+ self.assertEqual(result.scheme, 'rpc')
+ self.assertEqual(result.netloc, 'myhost')
+ self.assertEqual(result.path, '')
+ self.assertEqual(result.query, 'someparam')
+ self.assertEqual(result.fragment, 'somefragment')
+
+ result = network_utils.urlsplit(
+ 'rpc://myhost/mypath?someparam#somefragment',
+ allow_fragments=False)
+ self.assertEqual(result.scheme, 'rpc')
+ self.assertEqual(result.netloc, 'myhost')
+ self.assertEqual(result.path, '/mypath')
+ self.assertEqual(result.query, 'someparam#somefragment')
+ self.assertEqual(result.fragment, '')
diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py
deleted file mode 100644
index fd653d7..0000000
--- a/tests/unit/test_plugin.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pkg_resources
-
-from openstack.common.notifier import api as notifier_api
-from openstack.common.plugin import plugin
-from openstack.common.plugin import pluginmanager
-from tests import utils
-
-
-class SimpleNotifier(object):
- def __init__(self):
- self.message_list = []
-
- def notify(self, context, message):
- self.context = context
- self.message_list.append(message)
-
-
-class ManagerTestCase(utils.BaseTestCase):
- def test_constructs(self):
- manager1 = pluginmanager.PluginManager("testproject", "testservice")
- self.assertNotEqual(manager1, False)
-
-
-class NotifyTestCase(utils.BaseTestCase):
- """Test case for the plugin notification interface."""
-
- def test_add_notifier(self):
- notifier1 = SimpleNotifier()
- notifier2 = SimpleNotifier()
- notifier3 = SimpleNotifier()
-
- testplugin = plugin.Plugin('service')
- testplugin._add_notifier(notifier1)
- testplugin._add_notifier(notifier2)
- self.assertEqual(len(testplugin.notifiers), 2)
-
- testplugin._add_notifier(notifier3)
- self.assertEqual(len(testplugin.notifiers), 3)
-
- def test_notifier_action(self):
- def mock_iter_entry_points(_t):
- return [MockEntrypoint("fake", "fake", ["fake"])]
-
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- mock_iter_entry_points)
-
- plugmgr = pluginmanager.PluginManager("testproject", "testservice")
- plugmgr.load_plugins()
- self.assertEqual(len(plugmgr.plugins), 1)
- self.assertEqual(len(plugmgr.plugins[0].notifiers), 1)
- notifier = plugmgr.plugins[0].notifiers[0]
-
- notifier_api.notify('contextarg', 'publisher_id', 'event_type',
- notifier_api.WARN, dict(a=3))
-
- self.assertEqual(len(notifier.message_list), 1)
-
-
-class StubControllerExtension(object):
- name = 'stubextension'
- alias = 'stubby'
-
-
-class TestPluginClass(plugin.Plugin):
-
- def __init__(self, service_name):
- super(TestPluginClass, self).__init__(service_name)
- self._add_api_extension_descriptor(StubControllerExtension)
- notifier1 = SimpleNotifier()
- self._add_notifier(notifier1)
-
-
-class MockEntrypoint(pkg_resources.EntryPoint):
- def load(self):
- return TestPluginClass
-
-
-class MockExtManager():
- def __init__(self):
- self.descriptors = []
-
- def load_extension(self, descriptor):
- self.descriptors.append(descriptor)
-
-
-class APITestCase(utils.BaseTestCase):
- """Test case for the plugin api extension interface."""
- def test_add_extension(self):
- def mock_load(_s):
- return TestPluginClass()
-
- def mock_iter_entry_points(_t):
- return [MockEntrypoint("fake", "fake", ["fake"])]
-
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- mock_iter_entry_points)
-
- mgr = MockExtManager()
- plugmgr = pluginmanager.PluginManager("testproject", "testservice")
- plugmgr.load_plugins()
- plugmgr.plugin_extension_factory(mgr)
-
- self.assertTrue(StubControllerExtension in mgr.descriptors)
diff --git a/tests/unit/test_rootwrap.py b/tests/unit/test_rootwrap.py
index 0e08b5e..02789ec 100644
--- a/tests/unit/test_rootwrap.py
+++ b/tests/unit/test_rootwrap.py
@@ -61,10 +61,11 @@ class RootwrapTestCase(utils.BaseTestCase):
self.assertRaises(wrapper.NoFilterMatched,
wrapper.match_filter, self.filters, invalid)
- def _test_DnsmasqFilter(self, filter_class, config_file_arg):
+ def _test_EnvFilter_as_DnsMasq(self, config_file_arg):
usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar',
'dnsmasq', 'foo']
- f = filter_class("/usr/bin/dnsmasq", "root")
+ f = filters.EnvFilter("env", "root", config_file_arg + '=A',
+ 'NETWORK_ID=', "/usr/bin/dnsmasq")
self.assertTrue(f.match(usercmd))
self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo'])
env = f.get_environment(usercmd)
@@ -72,10 +73,68 @@ class RootwrapTestCase(utils.BaseTestCase):
self.assertEqual(env.get('NETWORK_ID'), 'foobar')
def test_DnsmasqFilter(self):
- self._test_DnsmasqFilter(filters.DnsmasqFilter, 'CONFIG_FILE')
+ self._test_EnvFilter_as_DnsMasq('CONFIG_FILE')
def test_DeprecatedDnsmasqFilter(self):
- self._test_DnsmasqFilter(filters.DeprecatedDnsmasqFilter, 'FLAGFILE')
+ self._test_EnvFilter_as_DnsMasq('FLAGFILE')
+
+ def test_EnvFilter(self):
+ envset = ['A=/some/thing', 'B=somethingelse']
+ envcmd = ['env'] + envset
+ realcmd = ['sleep', '10']
+ usercmd = envcmd + realcmd
+
+ f = filters.EnvFilter("env", "root", "A=", "B=ignored", "sleep")
+ # accept with leading env
+ self.assertTrue(f.match(envcmd + ["sleep"]))
+ # accept without leading env
+ self.assertTrue(f.match(envset + ["sleep"]))
+
+ # any other command does not match
+ self.assertFalse(f.match(envcmd + ["sleep2"]))
+ self.assertFalse(f.match(envset + ["sleep2"]))
+
+ # accept any trailing arguments
+ self.assertTrue(f.match(usercmd))
+
+ # require given environment variables to match
+ self.assertFalse(f.match([envcmd, 'C=ELSE']))
+ self.assertFalse(f.match(['env', 'C=xx']))
+ self.assertFalse(f.match(['env', 'A=xx']))
+
+ # require env command to be given
+ # (otherwise CommandFilters should match
+ self.assertFalse(f.match(realcmd))
+ # require command to match
+ self.assertFalse(f.match(envcmd))
+ self.assertFalse(f.match(envcmd[1:]))
+
+ # ensure that the env command is stripped when executing
+ self.assertEqual(f.exec_args(usercmd), realcmd)
+ env = f.get_environment(usercmd)
+ # check that environment variables are set
+ self.assertEqual(env.get('A'), '/some/thing')
+ self.assertEqual(env.get('B'), 'somethingelse')
+ self.assertFalse('sleep' in env.keys())
+
+ def test_EnvFilter_without_leading_env(self):
+ envset = ['A=/some/thing', 'B=somethingelse']
+ envcmd = ['env'] + envset
+ realcmd = ['sleep', '10']
+
+ f = filters.EnvFilter("sleep", "root", "A=", "B=ignored")
+
+ # accept without leading env
+ self.assertTrue(f.match(envset + ["sleep"]))
+
+ self.assertEqual(f.get_command(envcmd + realcmd), realcmd)
+ self.assertEqual(f.get_command(envset + realcmd), realcmd)
+
+ env = f.get_environment(envset + realcmd)
+ # check that environment variables are set
+ self.assertEqual(env.get('A'), '/some/thing')
+ self.assertEqual(env.get('B'), 'somethingelse')
+ self.assertFalse('sleep' in env.keys())
def test_KillFilter(self):
if not os.path.exists("/proc/%d" % os.getpid()):
@@ -169,6 +228,66 @@ class RootwrapTestCase(utils.BaseTestCase):
self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
self.assertTrue(f.match(usercmd))
+ def test_IpFilter_non_netns(self):
+ f = filters.IpFilter('/sbin/ip', 'root')
+ self.assertTrue(f.match(['ip', 'link', 'list']))
+
+ def _test_IpFilter_netns_helper(self, action):
+ f = filters.IpFilter('/sbin/ip', 'root')
+ self.assertTrue(f.match(['ip', 'link', action]))
+
+ def test_IpFilter_netns_add(self):
+ self._test_IpFilter_netns_helper('add')
+
+ def test_IpFilter_netns_delete(self):
+ self._test_IpFilter_netns_helper('delete')
+
+ def test_IpFilter_netns_list(self):
+ self._test_IpFilter_netns_helper('list')
+
+ def test_IpNetnsExecFilter_match(self):
+ f = filters.IpNetnsExecFilter('/sbin/ip', 'root')
+ self.assertTrue(
+ f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']))
+
+ def test_IpNetnsExecFilter_nomatch(self):
+ f = filters.IpNetnsExecFilter('/sbin/ip', 'root')
+ self.assertFalse(f.match(['ip', 'link', 'list']))
+
+ # verify that at least a NS is given
+ self.assertFalse(f.match(['ip', 'netns', 'exec']))
+
+ def test_IpNetnsExecFilter_nomatch_nonroot(self):
+ f = filters.IpNetnsExecFilter('/sbin/ip', 'user')
+ self.assertFalse(
+ f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']))
+
+ def test_match_filter_recurses_exec_command_filter_matches(self):
+ filter_list = [filters.IpNetnsExecFilter('/sbin/ip', 'root'),
+ filters.IpFilter('/sbin/ip', 'root')]
+ args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']
+
+ self.assertIsNotNone(wrapper.match_filter(filter_list, args))
+
+ def test_match_filter_recurses_exec_command_matches_user(self):
+ filter_list = [filters.IpNetnsExecFilter('/sbin/ip', 'root'),
+ filters.IpFilter('/sbin/ip', 'user')]
+ args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']
+
+ # Currently ip netns exec requires root, so verify that
+ # no non-root filter is matched, as that would escalate privileges
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, filter_list, args)
+
+ def test_match_filter_recurses_exec_command_filter_does_not_match(self):
+ filter_list = [filters.IpNetnsExecFilter('/sbin/ip', 'root'),
+ filters.IpFilter('/sbin/ip', 'root')]
+ args = ['ip', 'netns', 'exec', 'foo', 'ip', 'netns', 'exec', 'bar',
+ 'ip', 'link', 'list']
+
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, filter_list, args)
+
def test_exec_dirs_search(self):
# This test supposes you have /bin/cat or /usr/bin/cat locally
f = filters.CommandFilter("cat", "root")
diff --git a/tests/utils.py b/tests/utils.py
index 794a3d2..e93c278 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -27,17 +27,16 @@ import testtools
from openstack.common import exception
from openstack.common.fixture import moxstubout
-CONF = cfg.CONF
-
class BaseTestCase(testtools.TestCase):
- def setUp(self):
+ def setUp(self, conf=cfg.CONF):
super(BaseTestCase, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
- self.addCleanup(CONF.reset)
+ self.conf = conf
+ self.addCleanup(self.conf.reset)
self.useFixture(fixtures.FakeLogger('openstack.common'))
self.useFixture(fixtures.Timeout(30, True))
self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True)
@@ -46,7 +45,7 @@ class BaseTestCase(testtools.TestCase):
def tearDown(self):
super(BaseTestCase, self).tearDown()
- CONF.reset()
+ self.conf.reset()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
@@ -79,4 +78,4 @@ class BaseTestCase(testtools.TestCase):
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
- CONF.set_override(k, v, group)
+ self.conf.set_override(k, v, group)
diff --git a/tox.ini b/tox.ini
index 88ee6d1..9c64629 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,26 +2,22 @@
envlist = py26,py27,py33,pep8,pylint
[testenv]
+sitepackages = False
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=0.05
- NOSE_OPENSTACK_YELLOW=0.025
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
python tools/patch_tox_venv.py
- nosetests --with-doctest --exclude-dir=tests/testmods {posargs}
+ # due to dependencies between tests (bug 1192207) we use `--concurrency=1` option
+ python setup.py testr --slowest --testr-args='--subunit --concurrency=1 {posargs}'
[flake8]
show-source = True
-ignore = H202,H302,H304
+ignore = H302,H304
exclude = .venv,.tox,dist,doc,*.egg,.update-venv
[testenv:pep8]
-commands = flake8
+commands = flake8 {posargs}
[testenv:pylint]
deps = pylint>=0.26.0
@@ -30,7 +26,9 @@ commands = python ./tools/lint.py ./openstack
[testenv:cover]
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_COVERAGE=1
+commands =
+ python tools/patch_tox_venv.py
+ python setup.py testr --coverage --testr-args='{posargs}'
[testenv:venv]
commands = {posargs}