summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-08-25 19:50:16 +0200
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 08:56:04 +0200
commita1ed5b0cffe4b16a93a6a3390e8cee0fbef94f86 (patch)
tree1f07794d793124c0ab514378d9179f595769a813 /lib
parent710027a48ede75428cc68eaa8ae2269b1e356e2c (diff)
downloadkernel-crypto-a1ed5b0cffe4b16a93a6a3390e8cee0fbef94f86.tar.gz
kernel-crypto-a1ed5b0cffe4b16a93a6a3390e8cee0fbef94f86.tar.xz
kernel-crypto-a1ed5b0cffe4b16a93a6a3390e8cee0fbef94f86.zip
klist: don't iterate over deleted entries
A klist entry is kept on the list till all its current iterations are finished; however, a new iteration after deletion also iterates over deleted entries as long as their reference count stays above zero. This causes problems for cases where there are users which iterate over the list while synchronized against list manipulations and natuarally expect already deleted entries to not show up during iteration. This patch implements dead flag which gets set on deletion so that iteration can skip already deleted entries. The dead flag piggy backs on the lowest bit of knode->n_klist and only visible to klist implementation proper. While at it, drop klist_iter->i_head as it's redundant and doesn't offer anything in semantics or performance wise as klist_iter->i_klist is dereferenced on every iteration anyway. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Greg Kroah-Hartman <gregkh@suse.de> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/klist.c96
1 files changed, 70 insertions, 26 deletions
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa..bbdd3015c2c 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
#include <linux/klist.h>
#include <linux/module.h>
+/*
+ * Use the lowest bit of n_klist to mark deleted nodes and exclude
+ * dead ones from iteration.
+ */
+#define KNODE_DEAD 1LU
+#define KNODE_KLIST_MASK ~KNODE_DEAD
+
+static struct klist *knode_klist(struct klist_node *knode)
+{
+ return (struct klist *)
+ ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
+}
+
+static bool knode_dead(struct klist_node *knode)
+{
+ return (unsigned long)knode->n_klist & KNODE_DEAD;
+}
+
+static void knode_set_klist(struct klist_node *knode, struct klist *klist)
+{
+ knode->n_klist = klist;
+ /* no knode deserves to start its life dead */
+ WARN_ON(knode_dead(knode));
+}
+
+static void knode_kill(struct klist_node *knode)
+{
+ /* and no knode should die twice ever either, see we're very humane */
+ WARN_ON(knode_dead(knode));
+ *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
+}
/**
* klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
kref_init(&n->n_ref);
- n->n_klist = k;
+ knode_set_klist(n, k);
if (k->get)
k->get(n);
}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
*/
void klist_add_after(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
{
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
+ WARN_ON(!knode_dead(n));
list_del(&n->n_node);
complete(&n->n_removed);
- n->n_klist = NULL;
+ knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
return kref_put(&n->n_ref, klist_release);
}
-/**
- * klist_del - Decrement the reference count of node and try to remove.
- * @n: node we're deleting.
- */
-void klist_del(struct klist_node *n)
+static void klist_put(struct klist_node *n, bool kill)
{
- struct klist *k = n->n_klist;
+ struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
+ if (kill)
+ knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+void klist_del(struct klist_node *n)
+{
+ klist_put(n, true);
+}
EXPORT_SYMBOL_GPL(klist_del);
/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
- i->i_head = &k->k_list;
i->i_cur = n;
if (n)
kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
- klist_del(i->i_cur);
+ klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
*/
struct klist_node *klist_next(struct klist_iter *i)
{
- struct list_head *next;
- struct klist_node *lnode = i->i_cur;
- struct klist_node *knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *next;
spin_lock(&i->i_klist->k_lock);
- if (lnode) {
- next = lnode->n_node.next;
- if (!klist_dec_and_del(lnode))
+
+ if (last) {
+ next = to_klist_node(last->n_node.next);
+ if (!klist_dec_and_del(last))
put = NULL;
} else
- next = i->i_head->next;
+ next = to_klist_node(i->i_klist->k_list.next);
- if (next != i->i_head) {
- knode = to_klist_node(next);
- kref_get(&knode->n_ref);
+ i->i_cur = NULL;
+ while (next != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(next))) {
+ kref_get(&next->n_ref);
+ i->i_cur = next;
+ break;
+ }
+ next = to_klist_node(next->n_node.next);
}
- i->i_cur = knode;
+
spin_unlock(&i->i_klist->k_lock);
- if (put && lnode)
- put(lnode);
- return knode;
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);