summaryrefslogtreecommitdiffstats
path: root/openstack/common/db/sqlalchemy
diff options
context:
space:
mode:
authorChris Behrens <cbehrens@codestud.com>2013-02-18 02:35:34 +0000
committerChris Behrens <cbehrens@codestud.com>2013-02-18 17:52:16 +0000
commit02c12aade7a0c28c66cb45b54786c90c0ae8fb09 (patch)
tree1373a0e88aa6fda796f28205bef89c20dc21231b /openstack/common/db/sqlalchemy
parent615394e6dec650e3e9a94aaac8143f9cea88b0f5 (diff)
downloadoslo-02c12aade7a0c28c66cb45b54786c90c0ae8fb09.tar.gz
oslo-02c12aade7a0c28c66cb45b54786c90c0ae8fb09.tar.xz
oslo-02c12aade7a0c28c66cb45b54786c90c0ae8fb09.zip
Move DB thread pooling to DB API loader
Fixes bug 1128605 The dbpool code in sqlalchemy session is the wrong place to implement thread pooling as it wraps each individual SQL call to run in its own thread. When combined with SQL server locking, all threads can be eaten waiting on locks with none available to run a 'COMMIT'. The correct place to do thread pooling is around each DB API call. This patch removes dbpool from sqlalchemy and creates a common DB API loader for all openstack projects which implements the following configuration options: db_backend: Full path to DB API backend module (or a known short name if a project chooses to implement a mapping) dbapi_use_tpool: True or False whether to use thread pooling around all DB API calls. DB backend modules must implement a 'get_backend()' method. Example usage for nova/db/api.py would be: """ from nova.openstack.common.db import api as db_api _KNOWN_BACKENDS = {'sqlalchemy': 'nova.db.sqlalchemy.api'} IMPL = db_api.DBAPI(backend_mapping=_KNOWN_BACKENDS) """ NOTE: Enabling thread pooling will be broken until this issue is resolved in eventlet _OR_ until we modify our eventlet.monkey_patch() calls to include 'thread=False': https://bitbucket.org/eventlet/eventlet/issue/137/ Change-Id: Idf14563ea07cf8ccf2a77b3f53659d8528927fc7
Diffstat (limited to 'openstack/common/db/sqlalchemy')
-rw-r--r--openstack/common/db/sqlalchemy/session.py35
1 files changed, 0 insertions, 35 deletions
diff --git a/openstack/common/db/sqlalchemy/session.py b/openstack/common/db/sqlalchemy/session.py
index 1e9e123..96f582f 100644
--- a/openstack/common/db/sqlalchemy/session.py
+++ b/openstack/common/db/sqlalchemy/session.py
@@ -244,7 +244,6 @@ import os.path
import re
import time
-from eventlet import db_pool
from eventlet import greenthread
from oslo.config import cfg
from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
@@ -253,14 +252,10 @@ import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-from openstack.common import importutils
from openstack.common import log as logging
from openstack.common.gettextutils import _
from openstack.common import timeutils
-MySQLdb = importutils.try_import('MySQLdb')
-if MySQLdb is not None:
- from MySQLdb.constants import CLIENT as mysql_client_constants
sql_opts = [
cfg.StrOpt('sql_connection',
@@ -303,9 +298,6 @@ sql_opts = [
cfg.BoolOpt('sql_connection_trace',
default=False,
help='Add python stack traces to SQL as comment strings'),
- cfg.BoolOpt('sql_dbpool_enable',
- default=False,
- help="enable the use of eventlet's db_pool for MySQL"),
]
CONF = cfg.CONF
@@ -517,33 +509,6 @@ def create_engine(sql_connection):
if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
- elif all((CONF.sql_dbpool_enable, MySQLdb,
- "mysql" in connection_dict.drivername)):
- LOG.info(_("Using mysql/eventlet db_pool."))
- # MySQLdb won't accept 'None' in the password field
- password = connection_dict.password or ''
- pool_args = {
- 'db': connection_dict.database,
- 'passwd': password,
- 'host': connection_dict.host,
- 'user': connection_dict.username,
- 'min_size': CONF.sql_min_pool_size,
- 'max_size': CONF.sql_max_pool_size,
- 'max_idle': CONF.sql_idle_timeout,
- 'client_flag': mysql_client_constants.FOUND_ROWS}
-
- pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
-
- def creator():
- conn = pool.create()
- if isinstance(conn, tuple):
- # NOTE(belliott) eventlet >= 0.10 returns a tuple
- now, now, conn = conn
-
- return conn
-
- engine_args['creator'] = creator
-
else:
engine_args['pool_size'] = CONF.sql_max_pool_size
if CONF.sql_max_overflow is not None: