summaryrefslogtreecommitdiffstats
path: root/runtime/task_finder_map.c
diff options
context:
space:
mode:
authorDavid Smith <dsmith@redhat.com>2009-05-21 16:57:04 -0500
committerDavid Smith <dsmith@redhat.com>2009-05-21 16:57:04 -0500
commitc8e9eb18d8d13d099a4a177fe53de507c1d9ce8b (patch)
treeab2388afb795ed1a7ead2fbbf8b9d1b368a8231f /runtime/task_finder_map.c
parentdd9a3bcbef65bde65491d959e9458bc641924811 (diff)
parent3863e7999255deeaa7f8f4bba7df893773812537 (diff)
downloadsystemtap-steved-c8e9eb18d8d13d099a4a177fe53de507c1d9ce8b.tar.gz
systemtap-steved-c8e9eb18d8d13d099a4a177fe53de507c1d9ce8b.tar.xz
systemtap-steved-c8e9eb18d8d13d099a4a177fe53de507c1d9ce8b.zip
Merge commit 'origin/master' into pr7043
Conflicts: runtime/print.c runtime/transport/transport.c runtime/transport/transport_msgs.h
Diffstat (limited to 'runtime/task_finder_map.c')
-rw-r--r--runtime/task_finder_map.c191
1 files changed, 191 insertions, 0 deletions
diff --git a/runtime/task_finder_map.c b/runtime/task_finder_map.c
new file mode 100644
index 00000000..b770dd0e
--- /dev/null
+++ b/runtime/task_finder_map.c
@@ -0,0 +1,191 @@
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/spinlock.h>
+
+// When handling mmap()/munmap()/mprotect() syscall tracing to notice
+// memory map changes, we need to cache syscall entry parameter values
+// for processing at syscall exit.
+
+// __stp_tf_map_lock protects the hash table.
+// Documentation/spinlocks.txt suggest we can be a bit more clever
+// if we guarantee that in interrupt context we only read, not write
+// the datastructures. We should never change the hash table or the
+// contents in interrupt context (which should only ever call
+// stap_find_map_map_info for getting stored info). So we might
+// want to look into that if this seems a bottleneck.
+static DEFINE_RWLOCK(__stp_tf_map_lock);
+
+#define __STP_TF_HASH_BITS 4
+#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
+
+#ifndef TASK_FINDER_MAP_ENTRY_ITEMS
+#define TASK_FINDER_MAP_ENTRY_ITEMS 100
+#endif
+
+struct __stp_tf_map_entry {
+/* private: */
+ struct hlist_node hlist;
+ int usage;
+
+/* public: */
+ pid_t pid;
+ long syscall_no;
+ unsigned long arg0;
+ unsigned long arg1;
+ unsigned long arg2;
+};
+
+static struct __stp_tf_map_entry
+__stp_tf_map_free_list_items[TASK_FINDER_MAP_ENTRY_ITEMS];
+
+static struct hlist_head __stp_tf_map_free_list[1];
+
+static struct hlist_head __stp_tf_map_table[__STP_TF_TABLE_SIZE];
+
+// __stp_tf_map_initialize(): Initialize the free list. Grabs the
+// lock.
+static void
+__stp_tf_map_initialize(void)
+{
+ int i;
+ struct hlist_head *head = &__stp_tf_map_free_list[0];
+
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_map_lock, flags);
+ for (i = 0; i < TASK_FINDER_MAP_ENTRY_ITEMS; i++) {
+ hlist_add_head(&__stp_tf_map_free_list_items[i].hlist, head);
+ }
+ write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+}
+
+
+// __stp_tf_map_get_free_entry(): Returns an entry from the free list
+// or NULL. The __stp_tf_map_lock must be write locked before calling this
+// function.
+static struct __stp_tf_map_entry *
+__stp_tf_map_get_free_entry(void)
+{
+ struct hlist_head *head = &__stp_tf_map_free_list[0];
+ struct hlist_node *node;
+ struct __stp_tf_map_entry *entry = NULL;
+
+ if (hlist_empty(head))
+ return NULL;
+ hlist_for_each_entry(entry, node, head, hlist) {
+ break;
+ }
+ if (entry != NULL)
+ hlist_del(&entry->hlist);
+ return entry;
+}
+
+
+// __stp_tf_map_put_free_entry(): Puts an entry back on the free
+// list. The __stp_tf_map_lock must be write locked before calling this
+// function.
+static void
+__stp_tf_map_put_free_entry(struct __stp_tf_map_entry *entry)
+{
+ struct hlist_head *head = &__stp_tf_map_free_list[0];
+ hlist_add_head(&entry->hlist, head);
+}
+
+
+// __stp_tf_map_hash(): Compute the map hash.
+static inline u32
+__stp_tf_map_hash(struct task_struct *tsk)
+{
+ return (jhash_1word(tsk->pid, 0) & (__STP_TF_TABLE_SIZE - 1));
+}
+
+
+// Get map_entry if the map is present in the map hash table.
+// Returns NULL if not present. Takes a read lock on __stp_tf_map_lock.
+static struct __stp_tf_map_entry *
+__stp_tf_get_map_entry(struct task_struct *tsk)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct __stp_tf_map_entry *entry;
+
+ unsigned long flags;
+ read_lock_irqsave(&__stp_tf_map_lock, flags);
+ head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
+ hlist_for_each_entry(entry, node, head, hlist) {
+ if (tsk->pid == entry->pid) {
+ read_unlock_irqrestore(&__stp_tf_map_lock, flags);
+ return entry;
+ }
+ }
+ read_unlock_irqrestore(&__stp_tf_map_lock, flags);
+ return NULL;
+}
+
+
+// Add the map info to the map hash table. Takes a write lock on
+// __stp_tf_map_lock.
+static int
+__stp_tf_add_map(struct task_struct *tsk, long syscall_no, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct __stp_tf_map_entry *entry;
+ unsigned long flags;
+
+ write_lock_irqsave(&__stp_tf_map_lock, flags);
+ head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
+ hlist_for_each_entry(entry, node, head, hlist) {
+ // If we find an existing entry, just increment the
+ // usage count.
+ if (tsk->pid == entry->pid) {
+ entry->usage++;
+ write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+ return 0;
+ }
+ }
+
+ // Get an element from the free list.
+ entry = __stp_tf_map_get_free_entry();
+ if (!entry) {
+ write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+ return -ENOMEM;
+ }
+ entry->usage = 1;
+ entry->pid = tsk->pid;
+ entry->syscall_no = syscall_no;
+ entry->arg0 = arg0;
+ entry->arg1 = arg1;
+ entry->arg2 = arg2;
+ hlist_add_head(&entry->hlist, head);
+ write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+ return 0;
+}
+
+
+// Remove the map entry from the map hash table. Takes a write lock on
+// __stp_tf_map_lock.
+static int
+__stp_tf_remove_map_entry(struct __stp_tf_map_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ int found = 0;
+
+ if (entry != NULL) {
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_map_lock, flags);
+
+ // Decrement the usage count.
+ entry->usage--;
+
+ // If the entry is unused, put it back on the free
+ // list.
+ if (entry->usage == 0) {
+ hlist_del(&entry->hlist);
+ __stp_tf_map_put_free_entry(entry);
+ }
+ write_unlock_irqrestore(&__stp_tf_map_lock, flags);
+ }
+ return 0;
+}