summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/runtime.h3
-rw-r--r--runtime/sym.c71
-rw-r--r--runtime/task_finder.c34
-rw-r--r--runtime/task_finder_vma.c110
4 files changed, 122 insertions, 96 deletions
diff --git a/runtime/runtime.h b/runtime/runtime.h
index fc5d454f..822562a2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -86,6 +86,9 @@ static struct
#include "io.c"
#include "arith.c"
#include "copy.c"
+
+#include "task_finder.c"
+
#include "sym.c"
#ifdef STP_PERFMON
#include "perf.c"
diff --git a/runtime/sym.c b/runtime/sym.c
index 1d88a862..d0c5d9fd 100644
--- a/runtime/sym.c
+++ b/runtime/sym.c
@@ -20,6 +20,40 @@
* @{
*/
+/* Callback that needs to be registered (in tapsets.cxx for
+ emit_module_init) for every user task path or pid for which we
+ might need symbols or unwind info. */
+static int _stp_tf_vm_cb(struct stap_task_finder_target *tgt,
+ struct task_struct *tsk,
+ int map_p, char *vm_path,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long vm_pgoff)
+{
+ int i;
+#ifdef DEBUG_TASK_FINDER_VMA
+ _stp_dbug(__FUNCTION__, __LINE__, "vm_cb: tsk %d:%d path %s, start 0x%08lx, end 0x%08lx, offset 0x%lx\n", tsk->pid, map_p, vm_path, vm_start, vm_end, vm_pgoff);
+#endif
+ if (map_p)
+ {
+ struct _stp_module *module = NULL;
+ if (vm_path != NULL)
+ for (i = 0; i < _stp_num_modules; i++)
+ if (strcmp(vm_path, _stp_modules[i]->path) == 0)
+ {
+#ifdef DEBUG_TASK_FINDER_VMA
+ _stp_dbug(__FUNCTION__, __LINE__, "vm_cb: matched path %s to module\n", vm_path);
+#endif
+ module = _stp_modules[i];
+ break;
+ }
+ stap_add_vma_map_info(tsk, vm_start, vm_end, vm_pgoff, module);
+ }
+ else
+ stap_remove_vma_map_info(tsk, vm_start, vm_end, vm_pgoff);
+
+ return 0;
+}
+
/* XXX: this needs to be address-space-specific. */
static unsigned long _stp_module_relocate(const char *module, const char *section, unsigned long offset)
{
@@ -76,11 +110,33 @@ static unsigned long _stp_module_relocate(const char *module, const char *sectio
if found, return NULL otherwise.
XXX: needs to be address-space-specific. */
static struct _stp_module *_stp_mod_sec_lookup(unsigned long addr,
+ struct task_struct *task,
struct _stp_section **sec)
{
+ void *user = NULL;
struct _stp_module *m = NULL;
unsigned midx = 0;
unsigned long closest_section_offset = ~0;
+
+ // Try vma matching first if task given.
+ if (task)
+ {
+ unsigned long vm_start = 0;
+ if (stap_find_vma_map_info(task, addr,
+ &vm_start, NULL,
+ NULL, &user) == 0)
+ if (user != NULL)
+ {
+ m = (struct _stp_module *)user;
+ *sec = &m->sections[0]; // XXX check actual section and relocate
+ dbug_sym(1, "found section %s in module %s at 0x%lx\n",
+ m->sections[0].name, m->name, vm_start);
+ if (strcmp(".dynamic", m->sections[0].name) == 0)
+ m->sections[0].addr = vm_start; // cheat...
+ return m;
+ }
+ }
+
for (midx = 0; midx < _stp_num_modules; midx++)
{
unsigned secidx;
@@ -108,14 +164,15 @@ static const char *_stp_kallsyms_lookup(unsigned long addr, unsigned long *symbo
unsigned long *offset,
const char **modname,
/* char ** secname? */
- char *namebuf)
+ char *namebuf,
+ struct task_struct *task)
{
struct _stp_module *m = NULL;
struct _stp_section *sec = NULL;
struct _stp_symbol *s = NULL;
unsigned end, begin = 0;
- m = _stp_mod_sec_lookup(addr, &sec);
+ m = _stp_mod_sec_lookup(addr, task, &sec);
if (unlikely (m == NULL || sec == NULL))
return NULL;
@@ -240,7 +297,7 @@ static void _stp_symbol_print(unsigned long address)
const char *name;
unsigned long offset, size;
- name = _stp_kallsyms_lookup(address, &size, &offset, &modname, NULL);
+ name = _stp_kallsyms_lookup(address, &size, &offset, &modname, NULL, NULL);
_stp_printf("%p", (int64_t) address);
@@ -265,7 +322,7 @@ static int _stp_func_print(unsigned long address, int verbose, int exact)
else
exstr = " (inexact)";
- name = _stp_kallsyms_lookup(address, &size, &offset, &modname, NULL);
+ name = _stp_kallsyms_lookup(address, &size, &offset, &modname, NULL, NULL);
if (name) {
if (verbose) {
@@ -281,13 +338,15 @@ static int _stp_func_print(unsigned long address, int verbose, int exact)
return 0;
}
-static void _stp_symbol_snprint(char *str, size_t len, unsigned long address)
+static void _stp_symbol_snprint(char *str, size_t len, unsigned long address,
+ struct task_struct *task)
{
const char *modname;
const char *name;
unsigned long offset, size;
- name = _stp_kallsyms_lookup(address, &size, &offset, &modname, NULL);
+ name = _stp_kallsyms_lookup(address, &size, &offset, &modname, NULL,
+ task);
if (name)
strlcpy(str, name, len);
else
diff --git a/runtime/task_finder.c b/runtime/task_finder.c
index 3f4908cb..3a2d8c57 100644
--- a/runtime/task_finder.c
+++ b/runtime/task_finder.c
@@ -61,40 +61,6 @@ typedef int (*stap_task_finder_vm_callback)(struct stap_task_finder_target *tgt,
unsigned long vm_end,
unsigned long vm_pgoff);
-static int __stp_tf_vm_cb(struct stap_task_finder_target *tgt,
- struct task_struct *tsk,
- int map_p, char *vm_path,
- unsigned long vm_start,
- unsigned long vm_end,
- unsigned long vm_pgoff)
-{
- int i;
-#ifdef DEBUG_TASK_FINDER_VMA
- _stp_dbug(__FUNCTION__, __LINE__,
- "vm_cb: tsk %d:%d path %s, start 0x%08lx, end 0x%08lx, offset 0x%lx\n",
- tsk->pid, map_p, vm_path, vm_start, vm_end, vm_pgoff);
-#endif
- if (map_p) {
- struct _stp_module *module = NULL;
- if (vm_path != NULL)
- for (i = 0; i < _stp_num_modules; i++)
- if (strcmp(vm_path, _stp_modules[i]->path) == 0)
- {
-#ifdef DEBUG_TASK_FINDER_VMA
- _stp_dbug(__FUNCTION__, __LINE__,
- "vm_cb: matched path %s to module\n", vm_path);
-#endif
- module = _stp_modules[i];
- break;
- }
- stap_add_vma_map_info(tsk, vm_start, vm_end, vm_pgoff, module);
- }
- else {
- stap_remove_vma_map_info(tsk, vm_start, vm_end, vm_pgoff);
- }
- return 0;
-}
-
struct stap_task_finder_target {
/* private: */
struct list_head list; /* __stp_task_finder_list linkage */
diff --git a/runtime/task_finder_vma.c b/runtime/task_finder_vma.c
index 83b206e5..ed9c6f4f 100644
--- a/runtime/task_finder_vma.c
+++ b/runtime/task_finder_vma.c
@@ -1,13 +1,19 @@
#include <linux/list.h>
#include <linux/jhash.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
// When handling memcpy() syscall tracing to notice memory map
// changes, we need to cache memcpy() entry parameter values for
// processing at memcpy() exit.
-// __stp_tf_vma_mutex protects the hash table.
-static DEFINE_MUTEX(__stp_tf_vma_mutex);
+// __stp_tf_vma_lock protects the hash table.
+// Documentation/spinlocks.txt suggest we can be a bit more clever
+// if we guarantee that in interrupt context we only read, not write
+// the datastructures. We should never change the hash table or the
+// contents in interrupt context (which should only ever call
+// stap_find_vma_map_info for getting stored vma info). So we might
+// want to look into that if this seems a bottleneck.
+static DEFINE_RWLOCK(__stp_tf_vma_lock);
#define __STP_TF_HASH_BITS 4
#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
@@ -26,8 +32,8 @@ struct __stp_tf_vma_entry {
unsigned long vm_pgoff;
// Is that enough? Should we store a dcookie for vm_file?
- // Module that this vma entry is mapped from, if any.
- struct _stp_module *module;
+ // User data (possibly stp_module)
+ void *user;
};
static struct __stp_tf_vma_entry
@@ -40,23 +46,24 @@ static struct hlist_head __stp_tf_vma_table[__STP_TF_TABLE_SIZE];
static struct hlist_head __stp_tf_vma_map[__STP_TF_TABLE_SIZE];
// __stp_tf_vma_initialize(): Initialize the free list. Grabs the
-// mutex.
+// spinlock.
static void
__stp_tf_vma_initialize(void)
{
int i;
struct hlist_head *head = &__stp_tf_vma_free_list[0];
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
for (i = 0; i < TASK_FINDER_VMA_ENTRY_ITEMS; i++) {
hlist_add_head(&__stp_tf_vma_free_list_items[i].hlist, head);
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
}
// __stp_tf_vma_get_free_entry(): Returns an entry from the free list
-// or NULL. The __stp_tf_vma_mutex must be locked before calling this
+// or NULL. The __stp_tf_vma_lock must be write locked before calling this
// function.
static struct __stp_tf_vma_entry *
__stp_tf_vma_get_free_entry(void)
@@ -77,7 +84,7 @@ __stp_tf_vma_get_free_entry(void)
// __stp_tf_vma_put_free_entry(): Puts an entry back on the free
-// list. The __stp_tf_vma_mutex must be locked before calling this
+// list. The __stp_tf_vma_lock must be write locked before calling this
// function.
static void
__stp_tf_vma_put_free_entry(struct __stp_tf_vma_entry *entry)
@@ -101,7 +108,7 @@ __stp_tf_vma_hash(struct task_struct *tsk, unsigned long addr)
// Get vma_entry if the vma is present in the vma hash table.
-// Returns NULL if not present.
+// Returns NULL if not present. Takes a read lock on __stp_tf_vma_lock.
static struct __stp_tf_vma_entry *
__stp_tf_get_vma_entry(struct task_struct *tsk, unsigned long addr)
{
@@ -109,20 +116,22 @@ __stp_tf_get_vma_entry(struct task_struct *tsk, unsigned long addr)
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_table[__stp_tf_vma_hash(tsk, addr)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& addr == entry->addr) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return entry;
}
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return NULL;
}
// Add the vma info to the vma hash table.
+// Takes a write lock on __stp_tf_vma_lock.
static int
__stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
struct vm_area_struct *vma)
@@ -131,7 +140,8 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_table[__stp_tf_vma_hash(tsk, addr)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
@@ -141,7 +151,7 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
"vma (pid: %d, vm_start: 0x%lx) present?\n",
tsk->pid, vma->vm_start);
#endif
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -EBUSY; /* Already there */
}
}
@@ -149,7 +159,7 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
// Get an element from the free list.
entry = __stp_tf_vma_get_free_entry();
if (!entry) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -ENOMEM;
}
entry->pid = tsk->pid;
@@ -158,11 +168,12 @@ __stp_tf_add_vma(struct task_struct *tsk, unsigned long addr,
entry->vm_end = vma->vm_end;
entry->vm_pgoff = vma->vm_pgoff;
hlist_add_head(&entry->hlist, head);
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
// Remove the vma entry from the vma hash table.
+// Takes a write lock on __stp_tf_vma_lock.
static int
__stp_tf_remove_vma_entry(struct __stp_tf_vma_entry *entry)
{
@@ -171,10 +182,11 @@ __stp_tf_remove_vma_entry(struct __stp_tf_vma_entry *entry)
int found = 0;
if (entry != NULL) {
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
hlist_del(&entry->hlist);
__stp_tf_vma_put_free_entry(entry);
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
}
return 0;
}
@@ -189,7 +201,7 @@ __stp_tf_vma_map_hash(struct task_struct *tsk)
}
// Get vma_entry if the vma is present in the vma map hash table.
-// Returns NULL if not present. The __stp_tf_vma_mutex must be locked
+// Returns NULL if not present. The __stp_tf_vma_lock must be read locked
// before calling this function.
static struct __stp_tf_vma_entry *
__stp_tf_get_vma_map_entry_internal(struct task_struct *tsk,
@@ -214,13 +226,16 @@ __stp_tf_get_vma_map_entry_internal(struct task_struct *tsk,
static int
stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
unsigned long vm_end, unsigned long vm_pgoff,
- struct _stp_module *module)
+ void *user)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ // Take a write lock, since we are most likely going to write
+ // after reading.
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
if (entry != NULL) {
#if 0
@@ -228,14 +243,14 @@ stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
"vma (pid: %d, vm_start: 0x%lx) present?\n",
tsk->pid, entry->vm_start);
#endif
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -EBUSY; /* Already there */
}
// Get an element from the free list.
entry = __stp_tf_vma_get_free_entry();
if (!entry) {
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -ENOMEM;
}
@@ -245,11 +260,11 @@ stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
entry->vm_start = vm_start;
entry->vm_end = vm_end;
entry->vm_pgoff = vm_pgoff;
- entry->module = module;
+ entry->user = user;
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
hlist_add_head(&entry->hlist, head);
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
@@ -263,23 +278,26 @@ stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
- mutex_lock(&__stp_tf_vma_mutex);
+ // Take a write lock since we are most likely going to delete
+ // after reading.
+ unsigned long flags;
+ write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
if (entry != NULL) {
hlist_del(&entry->hlist);
__stp_tf_vma_put_free_entry(entry);
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
// Finds vma info if the vma is present in the vma map hash table.
-// Returns ESRCH if not present. The __stp_tf_vma_mutex must *not* be
+// Returns ESRCH if not present. The __stp_tf_vma_lock must *not* be
// locked before calling this function.
static int
stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
unsigned long *vm_start, unsigned long *vm_end,
- unsigned long *vm_pgoff)
+ unsigned long *vm_pgoff, void **user)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -287,7 +305,8 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
struct __stp_tf_vma_entry *found_entry = NULL;
int rc = ESRCH;
- mutex_lock(&__stp_tf_vma_mutex);
+ unsigned long flags;
+ read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
@@ -304,31 +323,10 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
*vm_end = found_entry->vm_end;
if (vm_pgoff != NULL)
*vm_pgoff = found_entry->vm_pgoff;
+ if (user != NULL)
+ *user = found_entry->user;
rc = 0;
}
- mutex_unlock(&__stp_tf_vma_mutex);
+ read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return rc;
}
-
-// Get vma_entry of the address (vm_start/vm_end) if the vma is
-// present in the vma hash table containing.
-// Returns NULL if not present.
-static struct __stp_tf_vma_entry *
-__stp_tf_get_vma_entry_addr(struct task_struct *tsk, unsigned long addr)
-{
- struct hlist_head *head;
- struct hlist_node *node;
- struct __stp_tf_vma_entry *entry;
-
- mutex_lock(&__stp_tf_vma_mutex);
- head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
- hlist_for_each_entry(entry, node, head, hlist) {
- if (tsk->pid == entry->pid
- && addr >= entry->vm_start && addr < entry->vm_end) {
- mutex_unlock(&__stp_tf_vma_mutex);
- return entry;
- }
- }
- mutex_unlock(&__stp_tf_vma_mutex);
- return NULL;
-}