From 84d7be1a5cc4afc1c4304c6c8f47f54dcb0859b2 Mon Sep 17 00:00:00 2001 From: William Brown Date: Mon, 25 Jan 2016 09:39:52 +1000 Subject: [PATCH] Ticket 48342 - deadlock during DNA_EXTEND_EXOP_REQUEST_OID Bug Description: dna.c would deadlock during a range extension request. This is because of lock ordering issues. In the normal operation, we would take: * backend lock * dna_lock This is because *most* operations in dna are be_txn post operations. However, when another replica requests a range, they would call the exop request The issue with this is that the exop request is *not* a be_txn plugin. In fact exop plugins were never able to have a be_txn type. So the code would take: * dna_lock * backend lock This is how the dead lock starts. We have largely been lucky to not see this in production before. Fix Description: This patch fixes a number of issues: * We add a new plugin type, betxnextendedop. This will wrap the extended op in a be txn. It requires the addition of a helper from the the plugin to hint the backend that will be acted upon. * We actually abort the transaction in a failure case in update_config rather than leaving it dangling and open. https://fedorahosted.org/389/ticket/48342 Author: wibrown Review by: ??? --- dirsrvtests/tests/suites/dna_plugin/dna_test.py | 169 +++++++++++-- dirsrvtests/tests/tickets/ticket48342_test.py | 322 ++++++++++++++++++++++++ ldap/servers/plugins/dna/dna.c | 138 ++++++---- ldap/servers/slapd/extendop.c | 57 ++++- ldap/servers/slapd/pblock.c | 32 ++- ldap/servers/slapd/plugin.c | 180 ++++++++----- ldap/servers/slapd/proto-slap.h | 3 +- ldap/servers/slapd/slap.h | 31 +-- ldap/servers/slapd/slapi-plugin.h | 3 + 9 files changed, 794 insertions(+), 141 deletions(-) create mode 100644 dirsrvtests/tests/tickets/ticket48342_test.py diff --git a/dirsrvtests/tests/suites/dna_plugin/dna_test.py b/dirsrvtests/tests/suites/dna_plugin/dna_test.py index 6b0ab8b..e6fb745 100644 --- a/dirsrvtests/tests/suites/dna_plugin/dna_test.py +++ b/dirsrvtests/tests/suites/dna_plugin/dna_test.py @@ -22,8 +22,18 @@ from lib389.utils import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) -installation1_prefix = None - +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX +BUSER1_DN = 'uid=user1,ou=branch1,' + DEFAULT_SUFFIX +BUSER2_DN = 'uid=user2,ou=branch2,' + DEFAULT_SUFFIX +BUSER3_DN = 'uid=user3,ou=branch2,' + DEFAULT_SUFFIX +BRANCH1_DN = 'ou=branch1,' + DEFAULT_SUFFIX +BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX +GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX +PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX +CONFIG_AREA = 'nsslapd-pluginConfigArea' class TopologyStandalone(object): def __init__(self, standalone): @@ -33,10 +43,6 @@ class TopologyStandalone(object): @pytest.fixture(scope="module") def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE @@ -51,6 +57,16 @@ def topology(request): standalone.create() standalone.open() + # Delete each instance in the end + def fin(): + # This is useful for analysing the test env. + standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \ + repl_data=True, outputfile='%s/ldif/%s.ldif' % (standalone.dbdir,SERVERID_STANDALONE )) + standalone.clearBackupFS() + standalone.backupFS() + standalone.delete() + request.addfinalizer(fin) + # Clear out the tmp dir standalone.clearTmpDir(__file__) @@ -70,18 +86,143 @@ def test_dna_(topology): Write a single test here... ''' - return - + # stop the plugin, and start it + topology.standalone.plugins.disable(name=PLUGIN_DNA) + topology.standalone.plugins.enable(name=PLUGIN_DNA) + + CONFIG_DN = 'cn=config,cn=' + PLUGIN_DNA + ',cn=plugins,cn=config' + + log.info('Testing ' + PLUGIN_DNA + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + try: + topology.standalone.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top dnaPluginConfig'.split(), + 'cn': 'config', + 'dnatype': 'uidNumber', + 'dnafilter': '(objectclass=top)', + 'dnascope': DEFAULT_SUFFIX, + 'dnaMagicRegen': '-1', + 'dnaMaxValue': '50000', + 'dnaNextValue': '1' + }))) + except ldap.ALREADY_EXISTS: + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'), + (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc']) + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to add config entry: error ' + e.message['desc']) + assert False + + # Do we need to restart for the plugin? + + topology.standalone.restart() + + ############################################################################ + # Test plugin + ############################################################################ + + try: + topology.standalone.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to user1: error ' + e.message['desc']) + assert False + + # See if the entry now has the new uidNumber assignment - uidNumber=1 + try: + entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)') + if not entries: + log.fatal('test_dna: user1 was not updated - (looking for uidNumber: 1)') + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) + assert False + + # Test the magic regen value + try: + topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc']) + assert False + + # See if the entry now has the new uidNumber assignment - uidNumber=2 + try: + entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)') + if not entries: + log.fatal('test_dna: user1 was not updated (looking for uidNumber: 2)') + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) + assert False + + ################################################################################ + # Change the config + ################################################################################ + + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc']) + assert False + + ################################################################################ + # Test plugin + ################################################################################ + + # Test the magic regen value + try: + topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc']) + assert False + + # See if the entry now has the new uidNumber assignment - uidNumber=3 + try: + entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)') + if not entries: + log.fatal('test_dna: user1 was not updated (looking for uidNumber: 3)') + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + #test_dependency(inst, PLUGIN_AUTOMEMBER) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + topology.standalone.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to delete test entry1: ' + e.message['desc']) + assert False + + topology.standalone.plugins.disable(name=PLUGIN_DNA) + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_dna: PASS\n') -def test_dna_final(topology): - topology.standalone.delete() - log.info('dna test suite PASSED') + return def run_isolated(): - global installation1_prefix - installation1_prefix = None - topo = topology(True) test_dna_init(topo) test_dna_(topo) diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py new file mode 100644 index 0000000..104a938 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48342_test.py @@ -0,0 +1,322 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +PEOPLE_OU='people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +MAX_ACCOUNTS=5 + +class TopologyReplication(object): + def __init__(self, master1, master2, master3): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + master3.open() + self.master3 = master3 + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Creating master 3... + master3 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_3 + args_instance[SER_PORT] = PORT_MASTER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master3.allocate(args_master) + instance_master3 = master3.exists() + if instance_master3: + master3.delete() + master3.create() + master3.open() + master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 1 to master 3 +# properties = {RA_NAME: r'meTo_$host:$port', +# RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], +# RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], +# RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], +# RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} +# m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) +# if not m1_m3_agmt: +# log.fatal("Fail to create a master -> master replica agreement") +# sys.exit(1) +# log.debug("%s created" % m1_m3_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Creating agreement from master 2 to master 3 + properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) + if not m2_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m3_agmt) + + # Creating agreement from master 3 to master 1 +# properties = {RA_NAME: r'meTo_$host:$port', +# RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], +# RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], +# RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], +# RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} +# m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) +# if not m3_m1_agmt: +# log.fatal("Fail to create a master -> master replica agreement") +# sys.exit(1) +# log.debug("%s created" % m3_m1_agmt) + + # Creating agreement from master 3 to master 2 + properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m3_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m2_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + time.sleep(5) # just to be safe + master2.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) + master2.waitForReplInit(m2_m3_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Delete each instance in the end + def fin(): + for master in (master1, master2, master3): + # master.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \ + # repl_data=True, outputfile='%s/ldif/%s.ldif' % (master.dbdir,SERVERID_STANDALONE )) + # master.clearBackupFS() + # master.backupFS() + master.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2, master3) + +def _dna_config(server, nextValue=500, maxValue=510): + log.info("Add dna plugin config entry...%s" % server) + + try: + server.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=posixAccount)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': str(nextValue), + 'dnaMaxValue' : str(nextValue+maxValue), + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.message['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + server.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + assert False + + log.info("Restarting the server...") + server.stop(timeout=120) + time.sleep(1) + server.start(timeout=120) + time.sleep(3) + +def test_ticket4026(topology): + """Write your replication testcase here. + + To access each DirSrv instance use: topology.master1, topology.master2, + ..., topology.hub1, ..., topology.consumer1, ... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + try: + topology.master1.add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + + topology.master1.add_s(Entry(('ou=ranges,' + SUFFIX, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + + # make master3 having more free slots that master2 + # so master1 will contact master3 + _dna_config(topology.master1, nextValue=100, maxValue=10) + _dna_config(topology.master2, nextValue=200, maxValue=10) + _dna_config(topology.master3, nextValue=300, maxValue=3000) + + # Turn on lots of error logging now. + + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')] + #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] + topology.master1.modify_s('cn=config', mod) + topology.master2.modify_s('cn=config', mod) + topology.master3.modify_s('cn=config', mod) + + # We need to wait for the event in dna.c to fire to start the servers + # see dna.c line 899 + time.sleep(60) + + # add on master1 users with description DNA + for cpt in range(10): + name = "user_with_desc1_%d" % (cpt) + topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'description' : '-1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + # give time to negociate master1 <--> master3 + time.sleep(10) + # add on master1 users with description DNA + for cpt in range(11,20): + name = "user_with_desc1_%d" % (cpt) + topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'description' : '-1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + log.info('Test complete') + # add on master1 users with description DNA + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')] + #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] + topology.master1.modify_s('cn=config', mod) + topology.master2.modify_s('cn=config', mod) + topology.master3.modify_s('cn=config', mod) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +# global installation1_prefix +# installation1_prefix=None +# topo = topology(True) +# test_ticket4026(topo) + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index b0ea2f4..268ac15 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -277,6 +277,7 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype); static int dna_mod_pre_op(Slapi_PBlock * pb); static int dna_add_pre_op(Slapi_PBlock * pb); static int dna_extend_exop(Slapi_PBlock *pb); +static int dna_extend_exop_backend(Slapi_PBlock *pb, Slapi_Backend **target); static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype); static int dna_be_txn_add_pre_op(Slapi_PBlock *pb); static int dna_be_txn_mod_pre_op(Slapi_PBlock *pb); @@ -483,7 +484,7 @@ dna_init(Slapi_PBlock *pb) if ((status == DNA_SUCCESS) && /* the range extension extended operation */ - slapi_register_plugin("extendedop", /* op type */ + slapi_register_plugin("betxnextendedop", /* op type */ 1, /* Enabled */ "dna_init", /* this function desc */ dna_exop_init, /* init func for exop */ @@ -557,7 +558,9 @@ dna_exop_init(Slapi_PBlock * pb) slapi_pblock_set(pb, SLAPI_PLUGIN_EXT_OP_OIDLIST, (void *) dna_extend_exop_oid_list) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_EXT_OP_FN, - (void *) dna_extend_exop) != 0) { + (void *) dna_extend_exop) != 0 || + slapi_pblock_set(pb, SLAPI_PLUGIN_EXT_OP_BACKEND_FN, + (void *) dna_extend_exop_backend) != 0) { slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_exop_init: failed to register plugin\n"); status = DNA_FAILURE; @@ -699,6 +702,64 @@ dna_close(Slapi_PBlock * pb) return DNA_SUCCESS; } +static int +dna_parse_exop_ber(Slapi_PBlock *pb, char **shared_dn) +{ + int ret = -1; /* What's a better default? */ + char *oid = NULL; + struct berval *reqdata = NULL; + BerElement *tmp_bere = NULL; + + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, + "----> dna_parse_exop_ber\n"); + + /* Fetch the request OID */ + slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &oid); + if (!oid) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_exop_ber: Unable to retrieve request OID.\n"); + goto out; + } + + /* Make sure the request OID is correct. */ + if (strcmp(oid, DNA_EXTEND_EXOP_REQUEST_OID) != 0) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_exop_ber: Received incorrect request OID.\n"); + goto out; + } + + /* Fetch the request data */ + slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &reqdata); + if (!BV_HAS_DATA(reqdata)) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_exop_ber: No request data received.\n"); + goto out; + } + + /* decode the exop */ + tmp_bere = ber_init(reqdata); + if (tmp_bere == NULL) { + goto out; + } + + if (ber_scanf(tmp_bere, "{a}", shared_dn) == LBER_ERROR) { + ret = LDAP_PROTOCOL_ERROR; + goto out; + } + + ret = LDAP_SUCCESS; + +out: + if (NULL != tmp_bere) { + ber_free(tmp_bere, 1); + tmp_bere = NULL; + } + + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, + "<---- dna_parse_exop_ber %s\n", *shared_dn); + return ret; +} + /* * Free the global linkedl ist of shared servers */ @@ -832,6 +893,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) * looking for valid ones. */ dna_parse_config_entry(pb, entries[i], 1); } + dna_unlock(); if (use_eventq) { @@ -1562,6 +1624,10 @@ dna_update_config_event(time_t event_time, void *arg) if (dna_pb) { if (0 == rc) { slapi_back_transaction_commit(dna_pb); + } else { + if (slapi_back_transaction_abort(dna_pb) != 0) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_update_config_event: failed to abort transaction!\n"); + } } slapi_pblock_destroy(dna_pb); } @@ -4244,16 +4310,38 @@ static int dna_config_check_post_op(Slapi_PBlock * pb) /**************************************************** + * Pre Extended Operation, Backend selection + ***************************************************/ +static int dna_extend_exop_backend(Slapi_PBlock *pb, Slapi_Backend **target) +{ + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, + "--> dna_parse_exop_backend\n"); + Slapi_DN *shared_sdn = NULL; + char *shared_dn = NULL; + int res = -1; + /* Parse the oid and what exop wants us to do */ + res = dna_parse_exop_ber(pb, &shared_dn); + if (shared_dn) { + shared_sdn = slapi_sdn_new_dn_byref(shared_dn); + *target = slapi_be_select(shared_sdn); + slapi_sdn_free(&shared_sdn); + } + res = LDAP_SUCCESS; + + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, + "<-- dna_parse_exop_backend %d\n", res); + return res; +} + + +/**************************************************** * Range Extension Extended Operation ***************************************************/ static int dna_extend_exop(Slapi_PBlock *pb) { int ret = SLAPI_PLUGIN_EXTENDED_NOT_HANDLED; - struct berval *reqdata = NULL; - BerElement *tmp_bere = NULL; char *shared_dn = NULL; char *bind_dn = NULL; - char *oid = NULL; PRUint64 lower = 0; PRUint64 upper = 0; @@ -4264,39 +4352,8 @@ static int dna_extend_exop(Slapi_PBlock *pb) slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, "--> dna_extend_exop\n"); - /* Fetch the request OID */ - slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &oid); - if (!oid) { - slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, - "dna_extend_exop: Unable to retrieve request OID.\n"); - goto free_and_return; - } - - /* Make sure the request OID is correct. */ - if (strcmp(oid, DNA_EXTEND_EXOP_REQUEST_OID) != 0) { - slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, - "dna_extend_exop: Received incorrect request OID.\n"); - goto free_and_return; - } - - /* Fetch the request data */ - slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &reqdata); - if (!BV_HAS_DATA(reqdata)) { - slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, - "dna_extend_exop: No request data received.\n"); - goto free_and_return; - } - - /* decode the exop */ - tmp_bere = ber_init(reqdata); - if (tmp_bere == NULL) { - goto free_and_return; - } - - if (ber_scanf(tmp_bere, "{a}", &shared_dn) == LBER_ERROR) { - ret = LDAP_PROTOCOL_ERROR; - goto free_and_return; - } + // Check the return code. + dna_parse_exop_ber(pb, &shared_dn); slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, "dna_extend_exop: received range extension " @@ -4365,10 +4422,6 @@ static int dna_extend_exop(Slapi_PBlock *pb) free_and_return: slapi_ch_free_string(&shared_dn); slapi_ch_free_string(&bind_dn); - if (NULL != tmp_bere) { - ber_free(tmp_bere, 1); - tmp_bere = NULL; - } slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, "<-- dna_extend_exop\n"); @@ -4530,6 +4583,7 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper) if (ret == LDAP_SUCCESS) { /* Adjust maxval in our cached config and shared config */ config_entry->maxval = *lower - 1; + /* This is within the dna_lock, so okay */ dna_notice_allocation(config_entry, config_entry->nextval, 0); } } diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c index 8d0b8fb..840a898 100644 --- a/ldap/servers/slapd/extendop.c +++ b/ldap/servers/slapd/extendop.c @@ -333,8 +333,63 @@ do_extended( Slapi_PBlock *pb ) slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_OID, extoid ); slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_VALUE, &extval ); slapi_pblock_set( pb, SLAPI_REQUESTOR_ISROOT, &pb->pb_op->o_isroot); + + /* wibrown 201603 I want to rewrite this to get plugin p, and use that + * rather than all these plugin_call_, that loop over the plugin lists + * We do "get plugin (oid). + * then we just hand *p into the call functions. + * much more efficient! :) + */ - rc = plugin_call_exop_plugins( pb, extoid ); + slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c calling plugins ... \n"); + + rc = plugin_call_exop_plugins( pb, extoid, SLAPI_PLUGIN_EXTENDEDOP); + + slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c called exop, got %d \n", rc); + + if (rc == SLAPI_PLUGIN_EXTENDED_NOT_HANDLED) { + slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c calling betxn plugins ... \n"); + /* Look up the correct backend to use. */ + Slapi_Backend *be = plugin_extended_op_getbackend( pb, extoid ); + + if ( be == NULL ) { + slapi_log_error(SLAPI_LOG_FATAL, NULL, "extendop.c plugin_extended_op_getbackend was unable to retrieve a backend!!!\n"); + rc = SLAPI_PLUGIN_EXTENDED_NO_BACKEND_AVAILABLE; + } else { + /* We need to make a new be pb here because when you set SLAPI_BACKEND + * you overwrite the plg parts of the pb. So if we re-use pb + * you actually nuke the request, and everything hangs. (╯°□°)╯︵ ┻━┻ + */ + Slapi_PBlock *be_pb = NULL; + be_pb = slapi_pblock_new(); + slapi_pblock_set(be_pb, SLAPI_BACKEND, be); + + int txn_rc = slapi_back_transaction_begin(be_pb); + if (txn_rc) { + slapi_log_error(SLAPI_LOG_FATAL, NULL, "exendop.c Failed to start be_txn for plugin_call_exop_plugins %d\n", txn_rc); + } else { + rc = plugin_call_exop_plugins( pb, extoid, SLAPI_PLUGIN_BETXNEXTENDEDOP); + slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c called betxn exop, got %d \n", rc); + if (rc == LDAP_SUCCESS || rc == SLAPI_PLUGIN_EXTENDED_SENT_RESULT) { + /* commit */ + txn_rc = slapi_back_transaction_commit(be_pb); + if (txn_rc == 0) { + slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c commit with result %d \n", txn_rc); + } else { + slapi_log_error(SLAPI_LOG_FATAL, NULL, "extendop.c Unable to commit commit with result %d \n", txn_rc); + } + } else { + /* abort */ + txn_rc = slapi_back_transaction_abort(be_pb); + slapi_log_error(SLAPI_LOG_FATAL, NULL, "extendop.c abort with result %d \n", txn_rc); + } + } /* txn_rc */ + if (be_pb != NULL) { + slapi_pblock_destroy(be_pb); /* Clean up after ourselves */ + } + slapi_log_error(SLAPI_LOG_TRACE, NULL, "exendop.c plugin_call_exop_plugins rc final %d\n", rc); + } /* if be */ + } if ( SLAPI_PLUGIN_EXTENDED_SENT_RESULT != rc ) { if ( SLAPI_PLUGIN_EXTENDED_NOT_HANDLED == rc ) { diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c index d373d99..d48c2d0 100644 --- a/ldap/servers/slapd/pblock.c +++ b/ldap/servers/slapd/pblock.c @@ -727,23 +727,33 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value ) /* extendedop plugin functions */ case SLAPI_PLUGIN_EXT_OP_FN: - if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP ) { + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { return( -1 ); } (*(IFP *)value) = pblock->pb_plugin->plg_exhandler; break; case SLAPI_PLUGIN_EXT_OP_OIDLIST: - if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP ) { + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { return( -1 ); } (*(char ***)value) = pblock->pb_plugin->plg_exoids; break; case SLAPI_PLUGIN_EXT_OP_NAMELIST: - if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP ) { + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { return( -1 ); } (*(char ***)value) = pblock->pb_plugin->plg_exnames; break; + case SLAPI_PLUGIN_EXT_OP_BACKEND_FN: + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_be_exhandler; + break; /* preoperation plugin functions */ case SLAPI_PLUGIN_PRE_BIND_FN: @@ -2353,24 +2363,34 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value ) /* extendedop plugin functions */ case SLAPI_PLUGIN_EXT_OP_FN: - if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP ) { + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { return( -1 ); } pblock->pb_plugin->plg_exhandler = (IFP) value; break; case SLAPI_PLUGIN_EXT_OP_OIDLIST: - if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP ) { + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { return( -1 ); } pblock->pb_plugin->plg_exoids = (char **) value; ldapi_register_extended_op( (char **)value ); break; case SLAPI_PLUGIN_EXT_OP_NAMELIST: - if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP ) { + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { return( -1 ); } pblock->pb_plugin->plg_exnames = (char **) value; break; + case SLAPI_PLUGIN_EXT_OP_BACKEND_FN: + if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP && + pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP ) { + return( -1 ); + } + pblock->pb_plugin->plg_be_exhandler = (IFP) value; + break; /* preoperation plugin functions */ case SLAPI_PLUGIN_PRE_BIND_FN: diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index ddf2631..96169e6 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -485,44 +485,54 @@ plugin_call_entryfetch_plugins(char **entrystr, uint *size) * returned by the plugins we called). */ int -plugin_call_exop_plugins( Slapi_PBlock *pb, char *oid ) +plugin_call_exop_plugins( Slapi_PBlock *pb, char *oid, int whichtype ) { - struct slapdplugin *p; - int i, rc; - int lderr = SLAPI_PLUGIN_EXTENDED_NOT_HANDLED; - - for ( p = global_plugin_list[PLUGIN_LIST_EXTENDED_OPERATION]; p != NULL; p = p->plg_next ) { - if ( p->plg_exhandler != NULL ) { - if ( p->plg_exoids != NULL ) { - for ( i = 0; p->plg_exoids[i] != NULL; i++ ) { - if ( strcasecmp( oid, p->plg_exoids[i] ) - == 0 ) { - break; - } - } - if ( p->plg_exoids[i] == NULL ) { - continue; - } - } + struct slapdplugin *p; + int i, rc; + int list_type; + int lderr = SLAPI_PLUGIN_EXTENDED_NOT_HANDLED; + + if (whichtype == SLAPI_PLUGIN_EXTENDEDOP) { + list_type = PLUGIN_LIST_EXTENDED_OPERATION; + } else if (whichtype == SLAPI_PLUGIN_BETXNEXTENDEDOP) { + list_type = PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION; + } else { + slapi_log_error(SLAPI_LOG_FATAL, NULL, "plugin_call_exop_plugins unknown plugin list type %d\n", whichtype); + return( lderr ); + } - slapi_pblock_set( pb, SLAPI_PLUGIN, p ); - set_db_default_result_handlers( pb ); - if ( (rc = (*p->plg_exhandler)( pb )) - == SLAPI_PLUGIN_EXTENDED_SENT_RESULT ) { - return( rc ); /* result sent */ - } else if ( rc != SLAPI_PLUGIN_EXTENDED_NOT_HANDLED ) { - /* - * simple merge: report last real error - */ - if ( lderr == SLAPI_PLUGIN_EXTENDED_NOT_HANDLED - || rc != LDAP_SUCCESS ) { - lderr = rc; - } - } - } - } + for ( p = global_plugin_list[list_type]; p != NULL; p = p->plg_next ) { + if ( p->plg_exhandler != NULL && p->plg_type == whichtype ) { + if ( p->plg_exoids != NULL ) { + for ( i = 0; p->plg_exoids[i] != NULL; i++ ) { + if ( strcasecmp( oid, p->plg_exoids[i] ) + == 0 ) { + break; + } + } + if ( p->plg_exoids[i] == NULL ) { + continue; + } + } + + slapi_pblock_set( pb, SLAPI_PLUGIN, p ); + set_db_default_result_handlers( pb ); + if ( (rc = (*p->plg_exhandler)( pb )) + == SLAPI_PLUGIN_EXTENDED_SENT_RESULT ) { + return( rc ); /* result sent */ + } else if ( rc != SLAPI_PLUGIN_EXTENDED_NOT_HANDLED ) { + /* + * simple merge: report last real error + */ + if ( lderr == SLAPI_PLUGIN_EXTENDED_NOT_HANDLED + || rc != LDAP_SUCCESS ) { + lderr = rc; + } + } + } + } - return( lderr ); + return( lderr ); } @@ -539,36 +549,77 @@ plugin_call_exop_plugins( Slapi_PBlock *pb, char *oid ) const char * plugin_extended_op_oid2string( const char *oid ) { - struct slapdplugin *p; - int i, j; - const char *rval = NULL; - - for ( p = global_plugin_list[PLUGIN_LIST_EXTENDED_OPERATION]; p != NULL; - p = p->plg_next ) { - if ( p->plg_exhandler != NULL && p->plg_exoids != NULL ) { - for ( i = 0; p->plg_exoids[i] != NULL; i++ ) { - if ( strcasecmp( oid, p->plg_exoids[i] ) == 0 ) { - if ( NULL != p->plg_exnames ) { - for ( j = 0; j < i && p->plg_exnames[j] != NULL; ++j ) { - ; - } - rval = p->plg_exnames[j]; /* OID-related name */ - } + struct slapdplugin *p; + int i, j, l, list_type; + const char *rval = NULL; + int list_types[] = {PLUGIN_LIST_EXTENDED_OPERATION, PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION}; + + /* I feel there may be a better way to achieve this, but it works. */ + for ( l = 0; l < 2; ++l ) { + list_type = list_types[l]; + for ( p = global_plugin_list[list_type]; p != NULL; p = p->plg_next ) { + if ( p->plg_exhandler != NULL && p->plg_exoids != NULL ) { + for ( i = 0; p->plg_exoids[i] != NULL; i++ ) { + if ( strcasecmp( oid, p->plg_exoids[i] ) == 0 ) { + if ( NULL != p->plg_exnames ) { + for ( j = 0; j < i && p->plg_exnames[j] != NULL; ++j ) { + ; + } + rval = p->plg_exnames[j]; /* OID-related name */ + } + + if ( NULL == rval ) { + if ( NULL != p->plg_desc.spd_id ) { + rval = p->plg_desc.spd_id; /* short name */ + } else { + rval = p->plg_name; /* RDN */ + } + } + break; + } + } /* for */ + } /* If */ + } /* for p in global_plugin list */ + } /* list type */ - if ( NULL == rval ) { - if ( NULL != p->plg_desc.spd_id ) { - rval = p->plg_desc.spd_id; /* short name */ - } else { - rval = p->plg_name; /* RDN */ - } - } - break; - } - } - } - } + return( rval ); +} + + +Slapi_Backend * +plugin_extended_op_getbackend( Slapi_PBlock *pb, char *oid ) +{ + struct slapdplugin *p; + int i; + int rc; + /* This could be an error type, but for now we expect the caller to check + * that it's not null + */ + Slapi_Backend *result = NULL; + + for ( p = global_plugin_list[PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION]; p != NULL; p = p->plg_next ) { + if ( p->plg_be_exhandler != NULL && p->plg_type == SLAPI_PLUGIN_BETXNEXTENDEDOP ) { + if ( p->plg_exoids != NULL ) { + for ( i = 0; p->plg_exoids[i] != NULL; i++ ) { + if ( strcasecmp( oid, p->plg_exoids[i] ) == 0 ) { + break; + } + } + if ( p->plg_exoids[i] == NULL ) { + continue; + } + } + + rc = (*p->plg_be_exhandler)( pb, &result ); + if (rc != LDAP_SUCCESS) { + /* Do we need to do anything? Or it is the parents job? */ + result = NULL; + } + break; + } + } - return( rval ); + return( result ); } static int @@ -2264,6 +2315,9 @@ plugin_get_type_and_list( } else if ( strcasecmp( plugintype, "index" ) == 0 ) { *type = SLAPI_PLUGIN_INDEX; plugin_list_index= PLUGIN_LIST_INDEX; + } else if ( strcasecmp( plugintype, "betxnextendedop" ) == 0 ) { + *type = SLAPI_PLUGIN_BETXNEXTENDEDOP; + plugin_list_index= PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION; } else { return( 1 ); /* unknown plugin type - pass to backend */ } diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 8a0bdd3..b287e8e 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -871,7 +871,8 @@ void global_plugin_init(); int plugin_call_plugins( Slapi_PBlock *, int ); int plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group, slapi_plugin_init_fnptr initfunc, int add_to_dit, char *returntext); -int plugin_call_exop_plugins( Slapi_PBlock *pb, char *oid ); +int plugin_call_exop_plugins( Slapi_PBlock *pb, char *oid, int whichtype ); +Slapi_Backend * plugin_extended_op_getbackend( Slapi_PBlock *pb, char *oid); const char *plugin_extended_op_oid2string( const char *oid ); void plugin_closeall(int close_backends, int close_globals); void plugin_startall(int argc, char **argv, char **plugin_list); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 5bb9252..890b742 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -681,22 +681,23 @@ struct matchingRuleList { #define PLUGIN_LIST_INTERNAL_PREOPERATION 5 #define PLUGIN_LIST_INTERNAL_POSTOPERATION 6 #define PLUGIN_LIST_EXTENDED_OPERATION 7 -#define PLUGIN_LIST_BACKEND_MAX 8 +#define PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION 8 +#define PLUGIN_LIST_BACKEND_MAX 9 /* Global Plugins */ -#define PLUGIN_LIST_ACL 9 -#define PLUGIN_LIST_MATCHINGRULE 10 -#define PLUGIN_LIST_SYNTAX 11 -#define PLUGIN_LIST_ENTRY 12 -#define PLUGIN_LIST_OBJECT 13 -#define PLUGIN_LIST_PWD_STORAGE_SCHEME 14 -#define PLUGIN_LIST_VATTR_SP 15 /* DBDB */ -#define PLUGIN_LIST_REVER_PWD_STORAGE_SCHEME 16 -#define PLUGIN_LIST_LDBM_ENTRY_FETCH_STORE 17 -#define PLUGIN_LIST_INDEX 18 -#define PLUGIN_LIST_BETXNPREOPERATION 19 -#define PLUGIN_LIST_BETXNPOSTOPERATION 20 -#define PLUGIN_LIST_GLOBAL_MAX 21 +#define PLUGIN_LIST_ACL 10 +#define PLUGIN_LIST_MATCHINGRULE 11 +#define PLUGIN_LIST_SYNTAX 12 +#define PLUGIN_LIST_ENTRY 13 +#define PLUGIN_LIST_OBJECT 14 +#define PLUGIN_LIST_PWD_STORAGE_SCHEME 15 +#define PLUGIN_LIST_VATTR_SP 16 /* DBDB */ +#define PLUGIN_LIST_REVER_PWD_STORAGE_SCHEME 17 +#define PLUGIN_LIST_LDBM_ENTRY_FETCH_STORE 18 +#define PLUGIN_LIST_INDEX 19 +#define PLUGIN_LIST_BETXNPREOPERATION 20 +#define PLUGIN_LIST_BETXNPOSTOPERATION 21 +#define PLUGIN_LIST_GLOBAL_MAX 22 /* plugin configuration attributes */ #define ATTR_PLUGIN_PATH "nsslapd-pluginPath" @@ -900,10 +901,12 @@ struct slapdplugin { char **plg_un_pe_exoids; /* exop oids */ char **plg_un_pe_exnames; /* exop names (may be NULL) */ IFP plg_un_pe_exhandler; /* handler */ + IFP plg_un_pe_be_exhandler; /* handler to retrieve the be name for the operation */ } plg_un_pe; #define plg_exoids plg_un.plg_un_pe.plg_un_pe_exoids #define plg_exnames plg_un.plg_un_pe.plg_un_pe_exnames #define plg_exhandler plg_un.plg_un_pe.plg_un_pe_exhandler +#define plg_be_exhandler plg_un.plg_un_pe.plg_un_pe_be_exhandler /* pre-operation plugin structure */ diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index bf4e8c2..0dd10d9 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -6745,6 +6745,7 @@ time_t slapi_current_time( void ); #define SLAPI_PLUGIN_INDEX 18 #define SLAPI_PLUGIN_BETXNPREOPERATION 19 #define SLAPI_PLUGIN_BETXNPOSTOPERATION 20 +#define SLAPI_PLUGIN_BETXNEXTENDEDOP 21 /* * special return values for extended operation plugins (zero or positive @@ -6752,6 +6753,7 @@ time_t slapi_current_time( void ); */ #define SLAPI_PLUGIN_EXTENDED_SENT_RESULT -1 #define SLAPI_PLUGIN_EXTENDED_NOT_HANDLED -2 +#define SLAPI_PLUGIN_EXTENDED_NO_BACKEND_AVAILABLE -3 /* * Return values of plugins: @@ -6876,6 +6878,7 @@ typedef struct slapi_plugindesc { #define SLAPI_PLUGIN_EXT_OP_FN 300 #define SLAPI_PLUGIN_EXT_OP_OIDLIST 301 #define SLAPI_PLUGIN_EXT_OP_NAMELIST 302 +#define SLAPI_PLUGIN_EXT_OP_BACKEND_FN 1948 /* preoperation plugin functions */ #define SLAPI_PLUGIN_PRE_BIND_FN 401 -- 2.5.0