summaryrefslogtreecommitdiffstats
path: root/runtime/task_finder_vma.c
diff options
context:
space:
mode:
authorMark Wielaard <mjw@redhat.com>2009-03-18 12:27:13 +0100
committerMark Wielaard <mjw@redhat.com>2009-03-18 12:27:13 +0100
commitcb1468beb7b34ad988280fea9fb7c4558b47341a (patch)
treec1f2b90dc13ab0f4bd3fe07403449778bce53bed /runtime/task_finder_vma.c
parent7d6fe193398090a3fb1847fc1a286772db114893 (diff)
downloadsystemtap-steved-cb1468beb7b34ad988280fea9fb7c4558b47341a.tar.gz
systemtap-steved-cb1468beb7b34ad988280fea9fb7c4558b47341a.tar.xz
systemtap-steved-cb1468beb7b34ad988280fea9fb7c4558b47341a.zip
Replace mutex in task_finder_vma with rwlock to be interrupt context safe.
* runtime/task_finder_vma.c: Replace all mutex calls with appropriate read or write (un)lock calls.
Diffstat (limited to 'runtime/task_finder_vma.c')
-rw-r--r--runtime/task_finder_vma.c82
1 files changed, 51 insertions, 31 deletions
diff --git a/runtime/task_finder_vma.c b/runtime/task_finder_vma.c
index 83b206e5..a210d92e 100644
--- a/runtime/task_finder_vma.c
+++ b/runtime/task_finder_vma.c
@@ -1,13 +1,19 @@
#include <linux/list.h>
#include <linux/jhash.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
// When handling memcpy() syscall tracing to notice memory map
// changes, we need to cache memcpy() entry parameter values for
// processing at memcpy() exit.
-// __stp_tf_vma_mutex protects the hash table.
-static DEFINE_MUTEX(__stp_tf_vma_mutex);
+// __stp_tf_vma_lock protects the hash table.
+// Documentation/spinlocks.txt suggest we can be a bit more clever
+// if we guarantee that in interrupt context we only read, not write
+// the datastructures. We should never change the hash table or the
+// contents in interrupt context (which should only ever call
+// __stp_tf_get_vma_entry_addr for symbol lookup). So we might want
+// to look into that if this seems a bottleneck.
+static DEFINE_RWLOCK(__stp_tf_vma_lock);
#define __STP_TF_HASH_BITS 4
#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
@@ -40,23 +46,24 @@ static struct hlist_head __stp_tf_vma_table[__STP_TF_TABLE_SIZE];
static struct hlist_head __stp_tf_vma_map[__STP_TF_TABLE_SIZE];
// __stp_tf_vma_initialize(): Initialize the free list. Grabs the
-// mutex.
+// spinlock.
static void
__stp_tf_vma_initialize(void)
{
int i;
struct hlist_head *head = &__stp_tf_vma_free_list[0];
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
for (i = 0; i < TASK_FINDER_VMA_ENTRY_ITEMS; i++) {
hlist_add_head(&__stp_tf_vma_free_list_items[i].hlist, head);
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
}
// __stp_tf_vma_get_free_entry(): Returns an entry from the free list
-// or NULL. The __stp_tf_vma_mutex must be locked before calling this
+// or NULL. The __stp_tf_vma_lock must be write locked before calling this
// function.
static struct __stp_tf_vma_entry *
__stp_tf_vma_get_free_entry(void)
@@ -77,7 +84,7 @@ __stp_tf_vma_get_free_entry(void)
// __stp_tf_vma_put_free_entry(): Puts an entry back on the free
-// list. The __stp_tf_vma_mutex must be locked before calling this
+// list. The __stp_tf_vma_lock must be write locked before calling this
// function.
static void
__stp_tf_vma_put_free_entry(struct __stp_tf_vma_entry *entry)
@@ -101,7 +108,7 @@ __stp_tf_vma_hash(struct task_struct *tsk, unsigned long addr)
// Get vma_entry if the vma is present in the vma hash table.
-// Returns NULL if not present.
+// Returns NULL if not present. Takes a read lock on __stp_tf_vma_lock.
static struct __stp_tf_vma_entry *
__stp_tf_get_vma_entry(struct task_struct *tsk, unsigned long addr)
{
@@ -109,20 +116,22 @@ __stp_tf_get_vma_entry(struct task_struct *tsk, unsigned long addr)
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_table[__stp_tf_vma_hash(tsk, addr)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& addr == entry->addr) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return entry;
}
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return NULL;
}
// Add the vma info to the vma hash table.
+// Takes a write lock on __stp_tf_vma_lock.
static int
__stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
struct vm_area_struct *vma)
@@ -131,7 +140,8 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_table[__stp_tf_vma_hash(tsk, addr)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
@@ -141,7 +151,7 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
"vma (pid: %d, vm_start: 0x%lx) present?\n",
tsk->pid, vma->vm_start);
#endif
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -EBUSY; /* Already there */
}
}
@@ -149,7 +159,7 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
// Get an element from the free list.
entry = __stp_tf_vma_get_free_entry();
if (!entry) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -ENOMEM;
}
entry->pid = tsk->pid;
@@ -158,11 +168,12 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
entry->vm_end = vma->vm_end;
entry->vm_pgoff = vma->vm_pgoff;
hlist_add_head(&entry->hlist, head);
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
// Remove the vma entry from the vma hash table.
+// Takes a write lock on __stp_tf_vma_lock.
static int
__stp_tf_remove_vma_entry(struct __stp_tf_vma_entry *entry)
{
@@ -171,10 +182,11 @@ __stp_tf_remove_vma_entry(struct __stp_tf_vma_entry *entry)
int found = 0;
if (entry != NULL) {
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
hlist_del(&entry->hlist);
__stp_tf_vma_put_free_entry(entry);
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
}
return 0;
}
@@ -189,7 +201,7 @@ __stp_tf_vma_map_hash(struct task_struct *tsk)
}
// Get vma_entry if the vma is present in the vma map hash table.
-// Returns NULL if not present. The __stp_tf_vma_mutex must be locked
+// Returns NULL if not present. The __stp_tf_vma_lock must be read locked
// before calling this function.
static struct __stp_tf_vma_entry *
__stp_tf_get_vma_map_entry_internal(struct task_struct *tsk,
@@ -220,7 +232,10 @@ stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ // Take a write lock, since we are most likely going to write
+ // after reading.
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
if (entry != NULL) {
#if 0
@@ -228,14 +243,14 @@ stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
"vma (pid: %d, vm_start: 0x%lx) present?\n",
tsk->pid, entry->vm_start);
#endif
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -EBUSY; /* Already there */
}
// Get an element from the free list.
entry = __stp_tf_vma_get_free_entry();
if (!entry) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -ENOMEM;
}
@@ -249,7 +264,7 @@ stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
hlist_add_head(&entry->hlist, head);
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
@@ -263,18 +278,21 @@ stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ // Take a write lock since we are most likely going to delete
+ // after reading.
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
if (entry != NULL) {
hlist_del(&entry->hlist);
__stp_tf_vma_put_free_entry(entry);
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
// Finds vma info if the vma is present in the vma map hash table.
-// Returns ESRCH if not present. The __stp_tf_vma_mutex must *not* be
+// Returns ESRCH if not present. The __stp_tf_vma_lock must *not* be
// locked before calling this function.
static int
stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
@@ -287,7 +305,8 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
struct __stp_tf_vma_entry *found_entry = NULL;
int rc = ESRCH;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
@@ -306,7 +325,7 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
*vm_pgoff = found_entry->vm_pgoff;
rc = 0;
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return rc;
}
@@ -320,15 +339,16 @@ __stp_tf_get_vma_entry_addr(struct task_struct *tsk, unsigned long addr)
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& addr >= entry->vm_start && addr < entry->vm_end) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return entry;
}
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return NULL;
}