summaryrefslogtreecommitdiffstats
path: root/lib/tdb/common/lock.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2010-02-24 13:23:58 +1030
committerRusty Russell <rusty@rustcorp.com.au>2010-02-24 13:23:58 +1030
commitec96ea690edbe3398d690b4a953d487ca1773f1c (patch)
tree24346da6a7faddd3b04478e58b08d8b809994ce1 /lib/tdb/common/lock.c
parent1bf482b9ef9ec73dd7ee4387d7087aa3955503dd (diff)
downloadsamba-ec96ea690edbe3398d690b4a953d487ca1773f1c.tar.gz
samba-ec96ea690edbe3398d690b4a953d487ca1773f1c.tar.xz
samba-ec96ea690edbe3398d690b4a953d487ca1773f1c.zip
tdb: handle processes dying during transaction commit.
tdb transactions were designed to be robust against the machine powering off, but interestingly were never designed to handle the case where an administrator kill -9's a process during commit. Because recovery is only done on tdb_open, processes with the tdb already mapped will simply use it despite it being corrupt and needing recovery. The solution to this is to check for recovery every time we grab a data lock: we could have gained the lock because a process just died. This has no measurable cost: here is the time for tdbtorture -s 0 -n 1 -l 10000: Before: 2.75 2.50 2.81 3.19 2.91 2.53 2.72 2.50 2.78 2.77 = Avg 2.75 After: 2.81 2.57 3.42 2.49 3.02 2.49 2.84 2.48 2.80 2.43 = Avg 2.74 Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'lib/tdb/common/lock.c')
-rw-r--r--lib/tdb/common/lock.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/lib/tdb/common/lock.c b/lib/tdb/common/lock.c
index adff8a65808..65d68432fe9 100644
--- a/lib/tdb/common/lock.c
+++ b/lib/tdb/common/lock.c
@@ -309,10 +309,44 @@ int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
return 0;
}
+static int tdb_lock_and_recover(struct tdb_context *tdb)
+{
+ int ret;
+
+ /* We need to match locking order in transaction commit. */
+ if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
+ return -1;
+ }
+
+ if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+ return -1;
+ }
+
+ ret = tdb_transaction_recover(tdb);
+
+ tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+
+ return ret;
+}
+
+static bool have_data_locks(const struct tdb_context *tdb)
+{
+ unsigned int i;
+
+ for (i = 0; i < tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off >= lock_offset(-1))
+ return true;
+ }
+ return false;
+}
+
static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
enum tdb_lock_flags waitflag)
{
int ret;
+ bool check = false;
/* a allrecord lock allows us to avoid per chain locks */
if (tdb->allrecord_lock.count &&
@@ -324,7 +358,18 @@ static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
tdb->ecode = TDB_ERR_LOCK;
ret = -1;
} else {
+ /* Only check when we grab first data lock. */
+ check = !have_data_locks(tdb);
ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag);
+
+ if (ret == 0 && check && tdb_needs_recovery(tdb)) {
+ tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
+
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_lock_list(tdb, list, ltype, waitflag);
+ }
}
return ret;
}
@@ -488,6 +533,21 @@ int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
tdb->allrecord_lock.off = upgradable;
+ if (tdb_needs_recovery(tdb)) {
+ bool mark = flags & TDB_LOCK_MARK_ONLY;
+ tdb_allrecord_unlock(tdb, ltype, mark);
+ if (mark) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_lockall_mark cannot do recovery\n"));
+ return -1;
+ }
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_allrecord_lock(tdb, ltype, flags, upgradable);
+ }
+
return 0;
}