From 02c12aade7a0c28c66cb45b54786c90c0ae8fb09 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 18 Feb 2013 02:35:34 +0000 Subject: Move DB thread pooling to DB API loader Fixes bug 1128605 The dbpool code in sqlalchemy session is the wrong place to implement thread pooling as it wraps each individual SQL call to run in its own thread. When combined with SQL server locking, all threads can be eaten waiting on locks with none available to run a 'COMMIT'. The correct place to do thread pooling is around each DB API call. This patch removes dbpool from sqlalchemy and creates a common DB API loader for all openstack projects which implements the following configuration options: db_backend: Full path to DB API backend module (or a known short name if a project chooses to implement a mapping) dbapi_use_tpool: True or False whether to use thread pooling around all DB API calls. DB backend modules must implement a 'get_backend()' method. Example usage for nova/db/api.py would be: """ from nova.openstack.common.db import api as db_api _KNOWN_BACKENDS = {'sqlalchemy': 'nova.db.sqlalchemy.api'} IMPL = db_api.DBAPI(backend_mapping=_KNOWN_BACKENDS) """ NOTE: Enabling thread pooling will be broken until this issue is resolved in eventlet _OR_ until we modify our eventlet.monkey_patch() calls to include 'thread=False': https://bitbucket.org/eventlet/eventlet/issue/137/ Change-Id: Idf14563ea07cf8ccf2a77b3f53659d8528927fc7 --- openstack/common/db/sqlalchemy/session.py | 35 ------------------------------- 1 file changed, 35 deletions(-) (limited to 'openstack/common/db/sqlalchemy') diff --git a/openstack/common/db/sqlalchemy/session.py b/openstack/common/db/sqlalchemy/session.py index 1e9e123..96f582f 100644 --- a/openstack/common/db/sqlalchemy/session.py +++ b/openstack/common/db/sqlalchemy/session.py @@ -244,7 +244,6 @@ import os.path import re import time -from eventlet import db_pool from eventlet import greenthread from oslo.config import cfg from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError @@ -253,14 +252,10 @@ import sqlalchemy.orm from sqlalchemy.pool import NullPool, StaticPool from sqlalchemy.sql.expression import literal_column -from openstack.common import importutils from openstack.common import log as logging from openstack.common.gettextutils import _ from openstack.common import timeutils -MySQLdb = importutils.try_import('MySQLdb') -if MySQLdb is not None: - from MySQLdb.constants import CLIENT as mysql_client_constants sql_opts = [ cfg.StrOpt('sql_connection', @@ -303,9 +298,6 @@ sql_opts = [ cfg.BoolOpt('sql_connection_trace', default=False, help='Add python stack traces to SQL as comment strings'), - cfg.BoolOpt('sql_dbpool_enable', - default=False, - help="enable the use of eventlet's db_pool for MySQL"), ] CONF = cfg.CONF @@ -517,33 +509,6 @@ def create_engine(sql_connection): if CONF.sql_connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} - elif all((CONF.sql_dbpool_enable, MySQLdb, - "mysql" in connection_dict.drivername)): - LOG.info(_("Using mysql/eventlet db_pool.")) - # MySQLdb won't accept 'None' in the password field - password = connection_dict.password or '' - pool_args = { - 'db': connection_dict.database, - 'passwd': password, - 'host': connection_dict.host, - 'user': connection_dict.username, - 'min_size': CONF.sql_min_pool_size, - 'max_size': CONF.sql_max_pool_size, - 'max_idle': CONF.sql_idle_timeout, - 'client_flag': mysql_client_constants.FOUND_ROWS} - - pool = db_pool.ConnectionPool(MySQLdb, **pool_args) - - def creator(): - conn = pool.create() - if isinstance(conn, tuple): - # NOTE(belliott) eventlet >= 0.10 returns a tuple - now, now, conn = conn - - return conn - - engine_args['creator'] = creator - else: engine_args['pool_size'] = CONF.sql_max_pool_size if CONF.sql_max_overflow is not None: -- cgit