summaryrefslogtreecommitdiffstats
path: root/src/back-sch.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/back-sch.c')
-rw-r--r--src/back-sch.c59
1 files changed, 30 insertions, 29 deletions
diff --git a/src/back-sch.c b/src/back-sch.c
index 63ee110..8359056 100644
--- a/src/back-sch.c
+++ b/src/back-sch.c
@@ -155,7 +155,8 @@ backend_set_config_read_config(struct plugin_state *state, Slapi_Entry *e,
const char *group, const char *container,
bool_t *flag, struct backend_shr_set_data **pret)
{
- char **bases, *entry_filter, **attributes, *rdn_format, *dn, *nsswitch_min_id, *check_nsswitch, *strp;
+ char **bases, *entry_filter, **attributes, *rdn_format, *dn;
+ char *nsswitch_min_id, *check_nsswitch, *strp;
bool_t check_access;
struct backend_set_data ret;
Slapi_DN *tmp_sdn;
@@ -238,7 +239,8 @@ backend_set_config_read_config(struct plugin_state *state, Slapi_Entry *e,
/* If we're adding nsswitch-based entries to this map, make
* sure that we copy the schema-compat-origin and SID
* attributes, so that we can read the former during the BIND
- * callback. */
+ * callback. FIXME: store that in the entry's backend_data to
+ * avoid surprising clients. */
backend_shr_add_strlist(&ret.attribute_format, "objectClass=extensibleObject");
backend_shr_add_strlist(&ret.attribute_format, "schema-compat-origin=%{schema-compat-origin}");
backend_shr_add_strlist(&ret.attribute_format, "ipaNTSecurityIdentifier=%{ipaNTSecurityIdentifier}");
@@ -1005,7 +1007,7 @@ backend_search_set_cb(const char *group, const char *set, bool_t flag,
struct backend_set_data *set_data;
Slapi_Entry *set_entry;
int result, n_entries;
- int n_entries_nsswitch;
+ int n_entries_without_nsswitch;
const char *ndn;
cbdata = cb_data;
@@ -1013,9 +1015,10 @@ backend_search_set_cb(const char *group, const char *set, bool_t flag,
cbdata->check_access = set_data->check_access;
cbdata->check_nsswitch = set_data->check_nsswitch;
cbdata->nsswitch_min_id = set_data->nsswitch_min_id;
- /* If any entries were actually returned by the descending callback,
- * avoid to look up in nsswitch even if this set is marked to look up */
- n_entries_nsswitch = cbdata->n_entries;
+
+ /* Count the number of results that we've found before looking at this
+ * set of entries. */
+ n_entries_without_nsswitch = cbdata->n_entries;
/* Check the set itself, unless it's also the group, in which case we
* already evaluated it for this search. */
@@ -1071,14 +1074,13 @@ backend_search_set_cb(const char *group, const char *set, bool_t flag,
map_data_foreach_entry_id(cbdata->state, group, set, NULL,
backend_search_entry_cb, cbdata);
#ifdef USE_NSSWITCH
- /* If we didn't find an exact match for the entry but asked to look up NSSWITCH,
- * then try to search NSSWITCH. If search filters would match, the search will be
- * staged for retrieval. The retrieval process is run after lookup is completed
- * for all maps as we need to ensure there is no contention for the global
- * map cache lock. The contention might occur if NSSWITCH would need to re-kinit
- * its kerberos credentials -- this would cause changes in the original LDAP tree
- * which, in turn, will trigger modification of the map cache entries. */
- if ((n_entries_nsswitch == cbdata->n_entries) &&
+ /* If we didn't find a matching entry in this set, but we're
+ * configured to also consult nsswitch, check if the search
+ * filter is one that should trigger an nsswitch lookup, and
+ * make a note if it would. We'll come back and actually
+ * perform the lookup later when we're not holding a lock that
+ * can stall other threads. */
+ if ((cbdata->n_entries == n_entries_without_nsswitch) &&
(cbdata->check_nsswitch != SCH_NSSWITCH_NONE)) {
backend_search_nsswitch(set_data, cbdata);
}
@@ -1109,7 +1111,7 @@ backend_search_find_set_data_in_group_cb(const char *group, const char *set, boo
if ((0 == strcmp(group, cbdata->cur_staged->map_group)) &&
(0 == strcmp(set, cbdata->cur_staged->map_set))) {
- cbdata->cur_staged->set_data_fixup = set_data;
+ cbdata->cur_staged->set_data = set_data;
}
return TRUE;
@@ -1123,7 +1125,7 @@ backend_search_find_set_data_cb(const char *group, void *cb_data)
cbdata = cb_data;
map_data_foreach_map(cbdata->state, group,
- backend_search_find_set_data_in_group_cb, cbdata);
+ backend_search_find_set_data_in_group_cb, cb_data);
return TRUE;
}
@@ -1230,7 +1232,7 @@ static int
backend_search_cb(Slapi_PBlock *pb)
{
struct backend_search_cbdata cbdata;
- struct backend_staged_data *staged, *next;
+ struct backend_staged_search *staged, *next;
int i;
if (wrap_get_call_level() > 0) {
@@ -1298,7 +1300,7 @@ backend_search_cb(Slapi_PBlock *pb)
/* Go over the list of staged requests and retrieve entries.
* It is important to perform the retrieval *without* holding any locks to the map cache */
staged = cbdata.staged;
- while (staged) {
+ while (staged != NULL) {
if (staged->entries == NULL) {
backend_retrieve_from_nsswitch(staged, &cbdata);
}
@@ -1309,22 +1311,21 @@ backend_search_cb(Slapi_PBlock *pb)
/* Add the entries to the map cache */
wrap_inc_call_level();
map_wrlock();
- while (staged) {
+ while (staged != NULL) {
if (staged->entries) {
cbdata.cur_staged = staged;
/* We actually need to find the original set first */
map_data_foreach_domain(cbdata.state, backend_search_find_set_data_cb, &cbdata);
- if (cbdata.cur_staged->set_data_fixup != NULL) {
- for (i = 0; i < staged->count ; i++) {
- if (staged->entries[i] != NULL) {
- if (!map_data_check_entry(cbdata.state,
- staged->map_group, staged->map_set,
- slapi_sdn_get_ndn(staged->entries[i]))) {
- backend_set_entry(cbdata.pb, staged->entries[i], staged->set_data_fixup);
- }
- slapi_entry_free(staged->entries[i]);
- staged->entries[i] = NULL;
+ for (i = 0; i < staged->count ; i++) {
+ if (staged->entries[i] != NULL) {
+ if ((cbdata.cur_staged->set_data != NULL) &&
+ !map_data_check_entry(cbdata.state,
+ staged->map_group, staged->map_set,
+ slapi_sdn_get_ndn(staged->entries[i]))) {
+ backend_set_entry(cbdata.pb, staged->entries[i], staged->set_data);
}
+ slapi_entry_free(staged->entries[i]);
+ staged->entries[i] = NULL;
}
}
free(staged->entries);