summaryrefslogtreecommitdiffstats
path: root/openstack
diff options
context:
space:
mode:
authorChris Behrens <cbehrens@codestud.com>2013-02-18 02:35:34 +0000
committerChris Behrens <cbehrens@codestud.com>2013-02-18 17:52:16 +0000
commit02c12aade7a0c28c66cb45b54786c90c0ae8fb09 (patch)
tree1373a0e88aa6fda796f28205bef89c20dc21231b /openstack
parent615394e6dec650e3e9a94aaac8143f9cea88b0f5 (diff)
downloadoslo-02c12aade7a0c28c66cb45b54786c90c0ae8fb09.tar.gz
oslo-02c12aade7a0c28c66cb45b54786c90c0ae8fb09.tar.xz
oslo-02c12aade7a0c28c66cb45b54786c90c0ae8fb09.zip
Move DB thread pooling to DB API loader
Fixes bug 1128605 The dbpool code in sqlalchemy session is the wrong place to implement thread pooling as it wraps each individual SQL call to run in its own thread. When combined with SQL server locking, all threads can be eaten waiting on locks with none available to run a 'COMMIT'. The correct place to do thread pooling is around each DB API call. This patch removes dbpool from sqlalchemy and creates a common DB API loader for all openstack projects which implements the following configuration options: db_backend: Full path to DB API backend module (or a known short name if a project chooses to implement a mapping) dbapi_use_tpool: True or False whether to use thread pooling around all DB API calls. DB backend modules must implement a 'get_backend()' method. Example usage for nova/db/api.py would be: """ from nova.openstack.common.db import api as db_api _KNOWN_BACKENDS = {'sqlalchemy': 'nova.db.sqlalchemy.api'} IMPL = db_api.DBAPI(backend_mapping=_KNOWN_BACKENDS) """ NOTE: Enabling thread pooling will be broken until this issue is resolved in eventlet _OR_ until we modify our eventlet.monkey_patch() calls to include 'thread=False': https://bitbucket.org/eventlet/eventlet/issue/137/ Change-Id: Idf14563ea07cf8ccf2a77b3f53659d8528927fc7
Diffstat (limited to 'openstack')
-rw-r--r--openstack/common/db/api.py101
-rw-r--r--openstack/common/db/sqlalchemy/session.py35
2 files changed, 101 insertions, 35 deletions
diff --git a/openstack/common/db/api.py b/openstack/common/db/api.py
new file mode 100644
index 0000000..407790d
--- /dev/null
+++ b/openstack/common/db/api.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Multiple DB API backend support.
+
+Supported configuration options:
+
+`db_backend`: DB backend name or full module path to DB backend module.
+`dbapi_use_tpool`: Enable thread pooling of DB API calls.
+
+A DB backend module should implement a method named 'get_backend' which
+takes no arguments. The method can return any object that implements DB
+API methods.
+
+*NOTE*: There are bugs in eventlet when using tpool combined with
+threading locks. The python logging module happens to use such locks. To
+work around this issue, be sure to specify thread=False with
+eventlet.monkey_patch().
+
+A bug for eventlet has been filed here:
+
+https://bitbucket.org/eventlet/eventlet/issue/137/
+"""
+import functools
+
+from oslo.config import cfg
+
+from openstack.common import lockutils
+from openstack.common import importutils
+
+
+db_opts = [
+ cfg.StrOpt('db_backend',
+ default='sqlalchemy',
+ help='The backend to use for db'),
+ cfg.BoolOpt('dbapi_use_tpool',
+ default=False,
+ help='Enable the experimental use of thread pooling for '
+ 'all DB API calls')
+]
+
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
+
+
+class DBAPI(object):
+ def __init__(self, backend_mapping=None):
+ if backend_mapping is None:
+ backend_mapping = {}
+ self.__backend = None
+ self.__backend_mapping = backend_mapping
+
+ @lockutils.synchronized('dbapi_backend', 'oslo-')
+ def __get_backend(self):
+ """Get the actual backend. May be a module or an instance of
+ a class. Doesn't matter to us. We do this synchronized as it's
+ possible multiple greenthreads started very quickly trying to do
+ DB calls and eventlet can switch threads before self.__backend gets
+ assigned.
+ """
+ if self.__backend:
+ # Another thread assigned it
+ return self.__backend
+ backend_name = CONF.db_backend
+ self.__use_tpool = CONF.dbapi_use_tpool
+ if self.__use_tpool:
+ from eventlet import tpool
+ self.__tpool = tpool
+ # Import the untranslated name if we don't have a
+ # mapping.
+ backend_path = self.__backend_mapping.get(backend_name,
+ backend_name)
+ backend_mod = importutils.import_module(backend_path)
+ self.__backend = backend_mod.get_backend()
+ return self.__backend
+
+ def __getattr__(self, key):
+ backend = self.__backend or self.__get_backend()
+ attr = getattr(backend, key)
+ if not self.__use_tpool or not hasattr(attr, '__call__'):
+ return attr
+
+ def tpool_wrapper(*args, **kwargs):
+ return self.__tpool.execute(attr, *args, **kwargs)
+
+ functools.update_wrapper(tpool_wrapper, attr)
+ return tpool_wrapper
diff --git a/openstack/common/db/sqlalchemy/session.py b/openstack/common/db/sqlalchemy/session.py
index 1e9e123..96f582f 100644
--- a/openstack/common/db/sqlalchemy/session.py
+++ b/openstack/common/db/sqlalchemy/session.py
@@ -244,7 +244,6 @@ import os.path
import re
import time
-from eventlet import db_pool
from eventlet import greenthread
from oslo.config import cfg
from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
@@ -253,14 +252,10 @@ import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-from openstack.common import importutils
from openstack.common import log as logging
from openstack.common.gettextutils import _
from openstack.common import timeutils
-MySQLdb = importutils.try_import('MySQLdb')
-if MySQLdb is not None:
- from MySQLdb.constants import CLIENT as mysql_client_constants
sql_opts = [
cfg.StrOpt('sql_connection',
@@ -303,9 +298,6 @@ sql_opts = [
cfg.BoolOpt('sql_connection_trace',
default=False,
help='Add python stack traces to SQL as comment strings'),
- cfg.BoolOpt('sql_dbpool_enable',
- default=False,
- help="enable the use of eventlet's db_pool for MySQL"),
]
CONF = cfg.CONF
@@ -517,33 +509,6 @@ def create_engine(sql_connection):
if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
- elif all((CONF.sql_dbpool_enable, MySQLdb,
- "mysql" in connection_dict.drivername)):
- LOG.info(_("Using mysql/eventlet db_pool."))
- # MySQLdb won't accept 'None' in the password field
- password = connection_dict.password or ''
- pool_args = {
- 'db': connection_dict.database,
- 'passwd': password,
- 'host': connection_dict.host,
- 'user': connection_dict.username,
- 'min_size': CONF.sql_min_pool_size,
- 'max_size': CONF.sql_max_pool_size,
- 'max_idle': CONF.sql_idle_timeout,
- 'client_flag': mysql_client_constants.FOUND_ROWS}
-
- pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
-
- def creator():
- conn = pool.create()
- if isinstance(conn, tuple):
- # NOTE(belliott) eventlet >= 0.10 returns a tuple
- now, now, conn = conn
-
- return conn
-
- engine_args['creator'] = creator
-
else:
engine_args['pool_size'] = CONF.sql_max_pool_size
if CONF.sql_max_overflow is not None: