summaryrefslogtreecommitdiffstats
path: root/openstack/common
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2013-02-20 21:17:02 +0000
committerGerrit Code Review <review@openstack.org>2013-02-20 21:17:02 +0000
commit7e6513dedc58577c187d69d576659bc7f2ed7a87 (patch)
treecdf9621bd7f99076ae0e42a4c80470168bb5e815 /openstack/common
parenta6260da3e3696e77e3e7fcd13acae416dc34809d (diff)
parent02c12aade7a0c28c66cb45b54786c90c0ae8fb09 (diff)
downloadoslo-7e6513dedc58577c187d69d576659bc7f2ed7a87.tar.gz
oslo-7e6513dedc58577c187d69d576659bc7f2ed7a87.tar.xz
oslo-7e6513dedc58577c187d69d576659bc7f2ed7a87.zip
Merge "Move DB thread pooling to DB API loader"
Diffstat (limited to 'openstack/common')
-rw-r--r--openstack/common/db/api.py101
-rw-r--r--openstack/common/db/sqlalchemy/session.py35
2 files changed, 101 insertions, 35 deletions
diff --git a/openstack/common/db/api.py b/openstack/common/db/api.py
new file mode 100644
index 0000000..407790d
--- /dev/null
+++ b/openstack/common/db/api.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Multiple DB API backend support.
+
+Supported configuration options:
+
+`db_backend`: DB backend name or full module path to DB backend module.
+`dbapi_use_tpool`: Enable thread pooling of DB API calls.
+
+A DB backend module should implement a method named 'get_backend' which
+takes no arguments. The method can return any object that implements DB
+API methods.
+
+*NOTE*: There are bugs in eventlet when using tpool combined with
+threading locks. The python logging module happens to use such locks. To
+work around this issue, be sure to specify thread=False with
+eventlet.monkey_patch().
+
+A bug for eventlet has been filed here:
+
+https://bitbucket.org/eventlet/eventlet/issue/137/
+"""
+import functools
+
+from oslo.config import cfg
+
+from openstack.common import lockutils
+from openstack.common import importutils
+
+
+db_opts = [
+ cfg.StrOpt('db_backend',
+ default='sqlalchemy',
+ help='The backend to use for db'),
+ cfg.BoolOpt('dbapi_use_tpool',
+ default=False,
+ help='Enable the experimental use of thread pooling for '
+ 'all DB API calls')
+]
+
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
+
+
+class DBAPI(object):
+ def __init__(self, backend_mapping=None):
+ if backend_mapping is None:
+ backend_mapping = {}
+ self.__backend = None
+ self.__backend_mapping = backend_mapping
+
+ @lockutils.synchronized('dbapi_backend', 'oslo-')
+ def __get_backend(self):
+ """Get the actual backend. May be a module or an instance of
+ a class. Doesn't matter to us. We do this synchronized as it's
+ possible multiple greenthreads started very quickly trying to do
+ DB calls and eventlet can switch threads before self.__backend gets
+ assigned.
+ """
+ if self.__backend:
+ # Another thread assigned it
+ return self.__backend
+ backend_name = CONF.db_backend
+ self.__use_tpool = CONF.dbapi_use_tpool
+ if self.__use_tpool:
+ from eventlet import tpool
+ self.__tpool = tpool
+ # Import the untranslated name if we don't have a
+ # mapping.
+ backend_path = self.__backend_mapping.get(backend_name,
+ backend_name)
+ backend_mod = importutils.import_module(backend_path)
+ self.__backend = backend_mod.get_backend()
+ return self.__backend
+
+ def __getattr__(self, key):
+ backend = self.__backend or self.__get_backend()
+ attr = getattr(backend, key)
+ if not self.__use_tpool or not hasattr(attr, '__call__'):
+ return attr
+
+ def tpool_wrapper(*args, **kwargs):
+ return self.__tpool.execute(attr, *args, **kwargs)
+
+ functools.update_wrapper(tpool_wrapper, attr)
+ return tpool_wrapper
diff --git a/openstack/common/db/sqlalchemy/session.py b/openstack/common/db/sqlalchemy/session.py
index 1e9e123..96f582f 100644
--- a/openstack/common/db/sqlalchemy/session.py
+++ b/openstack/common/db/sqlalchemy/session.py
@@ -244,7 +244,6 @@ import os.path
import re
import time
-from eventlet import db_pool
from eventlet import greenthread
from oslo.config import cfg
from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
@@ -253,14 +252,10 @@ import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-from openstack.common import importutils
from openstack.common import log as logging
from openstack.common.gettextutils import _
from openstack.common import timeutils
-MySQLdb = importutils.try_import('MySQLdb')
-if MySQLdb is not None:
- from MySQLdb.constants import CLIENT as mysql_client_constants
sql_opts = [
cfg.StrOpt('sql_connection',
@@ -303,9 +298,6 @@ sql_opts = [
cfg.BoolOpt('sql_connection_trace',
default=False,
help='Add python stack traces to SQL as comment strings'),
- cfg.BoolOpt('sql_dbpool_enable',
- default=False,
- help="enable the use of eventlet's db_pool for MySQL"),
]
CONF = cfg.CONF
@@ -517,33 +509,6 @@ def create_engine(sql_connection):
if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
- elif all((CONF.sql_dbpool_enable, MySQLdb,
- "mysql" in connection_dict.drivername)):
- LOG.info(_("Using mysql/eventlet db_pool."))
- # MySQLdb won't accept 'None' in the password field
- password = connection_dict.password or ''
- pool_args = {
- 'db': connection_dict.database,
- 'passwd': password,
- 'host': connection_dict.host,
- 'user': connection_dict.username,
- 'min_size': CONF.sql_min_pool_size,
- 'max_size': CONF.sql_max_pool_size,
- 'max_idle': CONF.sql_idle_timeout,
- 'client_flag': mysql_client_constants.FOUND_ROWS}
-
- pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
-
- def creator():
- conn = pool.create()
- if isinstance(conn, tuple):
- # NOTE(belliott) eventlet >= 0.10 returns a tuple
- now, now, conn = conn
-
- return conn
-
- engine_args['creator'] = creator
-
else:
engine_args['pool_size'] = CONF.sql_max_pool_size
if CONF.sql_max_overflow is not None: