summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2007-01-17 15:33:23 +0000
committerSteven Whitehouse <swhiteho@redhat.com>2007-02-05 13:37:11 -0500
commitfee852e374fb367c5436b1226eb93b35f8355ed9 (patch)
tree0e373afa25bd27582b2fc4fff8f2964ff0de6722 /fs/gfs2/glock.c
parent330005c2b23e71e54931913e9b63d1712a19e444 (diff)
downloadkernel-crypto-fee852e374fb367c5436b1226eb93b35f8355ed9.tar.gz
kernel-crypto-fee852e374fb367c5436b1226eb93b35f8355ed9.tar.xz
kernel-crypto-fee852e374fb367c5436b1226eb93b35f8355ed9.zip
[GFS2] Shrink gfs2_inode memory by half
Here is something I spotted (while looking for something entirely different) the other day. Rather than using a completion in each and every struct gfs2_holder, this removes it in favour of hashed wait queues, thus saving a considerable amount of memory both on the stack (where a number of gfs2_holder structures are allocated) and in particular in the gfs2_inode which has 8 gfs2_holder structures embedded within it. As a result on x86_64 the gfs2_inode shrinks from 2488 bytes to 1912 bytes, a saving of 576 bytes per inode (no thats not a typo!). In actual practice we get a much better result than that since now that a gfs2_inode is under the 2048 byte barrier, we get two per 4k slab page effectively halving the amount of memory required to store gfs2_inodes. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c66
1 files changed, 43 insertions, 23 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index fb1960b7fdd..5341e03b873 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,6 +19,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/list.h>
#include <linux/lm_interface.h>
+#include <linux/wait.h>
#include <asm/uaccess.h>
#include "gfs2.h"
@@ -395,7 +396,6 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
gh->gh_flags = flags;
gh->gh_error = 0;
gh->gh_iflags = 0;
- init_completion(&gh->gh_wait);
if (gh->gh_state == LM_ST_EXCLUSIVE)
gh->gh_flags |= GL_LOCAL_EXCL;
@@ -479,6 +479,29 @@ static void gfs2_holder_put(struct gfs2_holder *gh)
kfree(gh);
}
+static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
+{
+ if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
+ gfs2_holder_put(gh);
+ return;
+ }
+ clear_bit(HIF_WAIT, &gh->gh_iflags);
+ smp_mb();
+ wake_up_bit(&gh->gh_iflags, HIF_WAIT);
+}
+
+static int holder_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
+static void wait_on_holder(struct gfs2_holder *gh)
+{
+ might_sleep();
+ wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
+}
+
/**
* rq_mutex - process a mutex request in the queue
* @gh: the glock holder
@@ -493,7 +516,9 @@ static int rq_mutex(struct gfs2_holder *gh)
list_del_init(&gh->gh_list);
/* gh->gh_error never examined. */
set_bit(GLF_LOCK, &gl->gl_flags);
- complete(&gh->gh_wait);
+ clear_bit(HIF_WAIT, &gh->gh_flags);
+ smp_mb();
+ wake_up_bit(&gh->gh_iflags, HIF_WAIT);
return 1;
}
@@ -549,7 +574,7 @@ static int rq_promote(struct gfs2_holder *gh)
gh->gh_error = 0;
set_bit(HIF_HOLDER, &gh->gh_iflags);
- complete(&gh->gh_wait);
+ gfs2_holder_dispose_or_wake(gh);
return 0;
}
@@ -573,10 +598,7 @@ static int rq_demote(struct gfs2_holder *gh)
list_del_init(&gh->gh_list);
gh->gh_error = 0;
spin_unlock(&gl->gl_spin);
- if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
- gfs2_holder_put(gh);
- else
- complete(&gh->gh_wait);
+ gfs2_holder_dispose_or_wake(gh);
spin_lock(&gl->gl_spin);
} else {
gl->gl_req_gh = gh;
@@ -684,6 +706,8 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
gfs2_holder_init(gl, 0, 0, &gh);
set_bit(HIF_MUTEX, &gh.gh_iflags);
+ if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
+ BUG();
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
@@ -691,11 +715,13 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
} else {
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
- complete(&gh.gh_wait);
+ clear_bit(HIF_WAIT, &gh.gh_iflags);
+ smp_mb();
+ wake_up_bit(&gh.gh_iflags, HIF_WAIT);
}
spin_unlock(&gl->gl_spin);
- wait_for_completion(&gh.gh_wait);
+ wait_on_holder(&gh);
gfs2_holder_uninit(&gh);
}
@@ -774,6 +800,7 @@ restart:
return;
set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
+ set_bit(HIF_WAIT, &new_gh->gh_iflags);
goto restart;
}
@@ -908,12 +935,8 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
gfs2_glock_put(gl);
- if (gh) {
- if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
- gfs2_holder_put(gh);
- else
- complete(&gh->gh_wait);
- }
+ if (gh)
+ gfs2_holder_dispose_or_wake(gh);
}
/**
@@ -999,12 +1022,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
gfs2_glock_put(gl);
- if (gh) {
- if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
- gfs2_holder_put(gh);
- else
- complete(&gh->gh_wait);
- }
+ if (gh)
+ gfs2_holder_dispose_or_wake(gh);
}
/**
@@ -1105,8 +1124,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
if (gh->gh_flags & LM_FLAG_PRIORITY)
do_cancels(gh);
- wait_for_completion(&gh->gh_wait);
-
+ wait_on_holder(gh);
if (gh->gh_error)
return gh->gh_error;
@@ -1162,6 +1180,8 @@ static void add_to_queue(struct gfs2_holder *gh)
struct gfs2_holder *existing;
BUG_ON(!gh->gh_owner);
+ if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
+ BUG();
existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
if (existing) {