summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimo Sorce <ssorce@redhat.com>2010-04-29 12:42:59 -0400
committerStephen Gallagher <sgallagh@redhat.com>2010-05-03 13:53:19 -0400
commita4e5f3db252ec310276188349c584cadb1790256 (patch)
tree37c26bee8dce4c18140881436950551662d7887c
parent601545ddd3f65e819de307214fdde8a9181be366 (diff)
downloadsssd_unused-a4e5f3db252ec310276188349c584cadb1790256.tar.gz
sssd_unused-a4e5f3db252ec310276188349c584cadb1790256.tar.xz
sssd_unused-a4e5f3db252ec310276188349c584cadb1790256.zip
Avoid freeing sdap_handle too early
Prevent freeing the sdap_handle by failing in the destructor if we are trying to recurse.
-rw-r--r--src/providers/ldap/sdap.h6
-rw-r--r--src/providers/ldap/sdap_async.c58
2 files changed, 46 insertions, 18 deletions
diff --git a/src/providers/ldap/sdap.h b/src/providers/ldap/sdap.h
index aca98457..1445e8ee 100644
--- a/src/providers/ldap/sdap.h
+++ b/src/providers/ldap/sdap.h
@@ -74,6 +74,12 @@ struct sdap_handle {
struct sdap_fd_events *sdap_fd_events;
struct sdap_op *ops;
+
+ /* during release we need to lock access to the handler
+ * from the destructor to avoid recursion */
+ bool destructor_lock;
+ /* mark when it is safe to finally release the handler memory */
+ bool release_memory;
};
struct sdap_service {
diff --git a/src/providers/ldap/sdap_async.c b/src/providers/ldap/sdap_async.c
index a25c50bb..5135cb47 100644
--- a/src/providers/ldap/sdap_async.c
+++ b/src/providers/ldap/sdap_async.c
@@ -84,35 +84,57 @@ static int sdap_handle_destructor(void *mem)
{
struct sdap_handle *sh = talloc_get_type(mem, struct sdap_handle);
- sdap_handle_release(sh);
+ /* if the structure is currently locked, then mark it to be released
+ * and prevent talloc from freeing the memory */
+ if (sh->destructor_lock) {
+ sh->release_memory = true;
+ return -1;
+ }
+ sdap_handle_release(sh);
return 0;
}
static void sdap_handle_release(struct sdap_handle *sh)
{
- DEBUG(8, ("Trace: sh[%p], connected[%d], ops[%p], ldap[%p]\n",
- sh, (int)sh->connected, sh->ops, sh->ldap));
+ struct sdap_op *op;
- if (sh->connected) {
- struct sdap_op *op;
+ DEBUG(8, ("Trace: sh[%p], connected[%d], ops[%p], ldap[%p], "
+ "destructor_lock[%d], release_memory[%d]\n",
+ sh, (int)sh->connected, sh->ops, sh->ldap,
+ (int)sh->destructor_lock, (int)sh->release_memory));
- talloc_zfree(sh->sdap_fd_events);
+ if (sh->destructor_lock) return;
+ sh->destructor_lock = true;
- while (sh->ops) {
- op = sh->ops;
- op->callback(op, NULL, EIO, op->data);
- /* calling the callback may result in freeing the op */
- /* check if it is still the same or avoid freeing */
- if (op == sh->ops) talloc_free(op);
- }
+ /* make sure nobody tries to reuse this connection from now on */
+ sh->connected = false;
- if (sh->ldap) {
- ldap_unbind_ext(sh->ldap, NULL, NULL);
- }
- sh->connected = false;
+ talloc_zfree(sh->sdap_fd_events);
+
+ while (sh->ops) {
+ op = sh->ops;
+ op->callback(op, NULL, EIO, op->data);
+ /* calling the callback may result in freeing the op */
+ /* check if it is still the same or avoid freeing */
+ if (op == sh->ops) talloc_free(op);
+ }
+
+ if (sh->ldap) {
+ ldap_unbind_ext(sh->ldap, NULL, NULL);
sh->ldap = NULL;
- sh->ops = NULL;
+ }
+
+ /* ok, we have done the job, unlock now */
+ sh->destructor_lock = false;
+
+ /* finally if a destructor was ever called, free sh before
+ * exiting */
+ if (sh->release_memory) {
+ /* neutralize the destructor as we already handled
+ * all was needed to be released */
+ talloc_set_destructor((TALLOC_CTX *)sh, NULL);
+ talloc_free(sh);
}
}