summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/ChangeLog14
-rw-r--r--runtime/task_finder.c108
-rw-r--r--runtime/task_finder_vma.c132
3 files changed, 223 insertions, 31 deletions
diff --git a/runtime/ChangeLog b/runtime/ChangeLog
index a8d73ffd..7dfade1c 100644
--- a/runtime/ChangeLog
+++ b/runtime/ChangeLog
@@ -1,3 +1,17 @@
+2008-08-08 David Smith <dsmith@redhat.com>
+
+ * task_finder.c (stap_utrace_detach): New function.
+ (stap_utrace_detach_ops): Calls stap_utrace_detach().
+ (__stp_utrace_attach_match_filename): Ditto.
+
+ * task_finder.c (__stp_tf_vm_cb): Added calls to save/delete vma
+ information.
+ * task_finder_vma.c (__stp_tf_vma_map_hash): New function.
+ (__stp_tf_get_vma_map_entry_internal): Ditto.
+ (stap_add_vma_map_info): Ditto.
+ (stap_remove_vma_map_info): Ditto.
+ (stap_find_vma_map_info): Ditto.
+
2008-07-24 Josh Stone <joshua.i.stone@intel.com>
* runtime/autoconf-module-nsections.c: removed
diff --git a/runtime/task_finder.c b/runtime/task_finder.c
index b22a60a8..1832c795 100644
--- a/runtime/task_finder.c
+++ b/runtime/task_finder.c
@@ -47,6 +47,15 @@ int __stp_tf_vm_cb(struct task_struct *tsk,
_stp_dbug(__FUNCTION__, __LINE__,
"vm_cb: tsk %d:%d path %s, start 0x%08lx, end 0x%08lx, offset 0x%lx\n",
tsk->pid, map_p, vm_path, vm_start, vm_end, vm_pgoff);
+ if (map_p) {
+ // FIXME: What should we do with vm_path? We can't save
+ // the vm_path pointer itself, but we don't have any
+ // storage space allocated to save it in...
+ stap_add_vma_map_info(tsk, vm_start, vm_end, vm_pgoff);
+ }
+ else {
+ stap_remove_vma_map_info(tsk, vm_start, vm_end, vm_pgoff);
+ }
return 0;
}
#endif
@@ -141,12 +150,72 @@ stap_register_task_finder_target(struct stap_task_finder_target *new_tgt)
return 0;
}
+static int
+stap_utrace_detach(struct task_struct *tsk,
+ const struct utrace_engine_ops *ops)
+{
+ struct utrace_attached_engine *engine;
+ struct mm_struct *mm;
+ int rc = 0;
+
+ // Ignore init
+ if (tsk == NULL || tsk->pid <= 1)
+ return 0;
+
+ // Notice we're not calling get_task_mm() here. Normally we
+ // avoid tasks with no mm, because those are kernel threads.
+ // So, why is this function different? When a thread is in
+ // the process of dying, its mm gets freed. Then, later the
+ // thread gets in the dying state and the thread's DEATH event
+ // handler gets called (if any).
+ //
+ // If a thread is in this "mortally wounded" state - no mm
+ // but not dead - and at that moment this function is called,
+ // we'd miss detaching from it if we were checking to see if
+ // it had an mm.
+
+ engine = utrace_attach(tsk, UTRACE_ATTACH_MATCH_OPS, ops, 0);
+ if (IS_ERR(engine)) {
+ rc = -PTR_ERR(engine);
+ if (rc != ENOENT) {
+ _stp_error("utrace_attach returned error %d on pid %d",
+ rc, tsk->pid);
+ }
+ else {
+ rc = 0;
+ }
+ }
+ else if (unlikely(engine == NULL)) {
+ _stp_error("utrace_attach returned NULL on pid %d",
+ (int)tsk->pid);
+ rc = EFAULT;
+ }
+ else {
+ rc = utrace_detach(tsk, engine);
+ switch (rc) {
+ case 0: /* success */
+ debug_task_finder_detach();
+ break;
+ case -ESRCH: /* REAP callback already begun */
+ case -EALREADY: /* DEATH callback already begun */
+ rc = 0; /* ignore these errors*/
+ break;
+ default:
+ rc = -rc;
+ _stp_error("utrace_detach returned error %d on pid %d",
+ rc, tsk->pid);
+ break;
+ }
+ }
+ return rc;
+}
+
static void
stap_utrace_detach_ops(struct utrace_engine_ops *ops)
{
struct task_struct *grp, *tsk;
struct utrace_attached_engine *engine;
- long error = 0;
+ int rc = 0;
pid_t pid = 0;
// Notice we're not calling get_task_mm() in this loop. In
@@ -164,31 +233,12 @@ stap_utrace_detach_ops(struct utrace_engine_ops *ops)
rcu_read_lock();
do_each_thread(grp, tsk) {
- if (tsk == NULL || tsk->pid <= 1)
- continue;
-
- engine = utrace_attach(tsk, UTRACE_ATTACH_MATCH_OPS,
- ops, 0);
- if (IS_ERR(engine)) {
- error = -PTR_ERR(engine);
- if (error != ENOENT) {
- pid = tsk->pid;
- goto udo_err;
- }
- error = 0;
- }
- else if (engine != NULL) {
- utrace_detach(tsk, engine);
- debug_task_finder_detach();
- }
+ rc = stap_utrace_detach(tsk, ops);
+ if (rc != 0)
+ goto udo_err;
} while_each_thread(grp, tsk);
udo_err:
rcu_read_unlock();
-
- if (error != 0) {
- _stp_error("utrace_attach returned error %d on pid %d",
- error, pid);
- }
debug_task_finder_report();
}
@@ -372,14 +422,10 @@ __stp_utrace_attach_match_filename(struct task_struct *tsk,
cb_tgt->engine_attached = 1;
}
else {
- struct utrace_attached_engine *engine;
- engine = utrace_attach(tsk,
- UTRACE_ATTACH_MATCH_OPS,
- &cb_tgt->ops, 0);
- if (! IS_ERR(engine) && engine != NULL) {
- utrace_detach(tsk, engine);
- debug_task_finder_detach();
- }
+ rc = stap_utrace_detach(tsk, &cb_tgt->ops);
+ if (rc != 0)
+ break;
+ cb_tgt->engine_attached = 0;
}
}
}
diff --git a/runtime/task_finder_vma.c b/runtime/task_finder_vma.c
index c0a018ab..9d43e36c 100644
--- a/runtime/task_finder_vma.c
+++ b/runtime/task_finder_vma.c
@@ -34,6 +34,8 @@ static struct hlist_head __stp_tf_vma_free_list[1];
static struct hlist_head __stp_tf_vma_table[__STP_TF_TABLE_SIZE];
+static struct hlist_head __stp_tf_vma_map[__STP_TF_TABLE_SIZE];
+
// __stp_tf_vma_initialize(): Initialize the free list. Grabs the
// mutex.
static void
@@ -171,3 +173,133 @@ __stp_tf_remove_vma_entry(struct __stp_tf_vma_entry *entry)
}
return 0;
}
+
+
+
+// __stp_tf_vma_map_hash(): Compute the vma map hash.
+static inline u32
+__stp_tf_vma_map_hash(struct task_struct *tsk)
+{
+ return (jhash_1word(tsk->pid, 0) & (__STP_TF_TABLE_SIZE - 1));
+}
+
+// Get vma_entry if the vma is present in the vma map hash table.
+// Returns NULL if not present. The __stp_tf_vma_mutex must be locked
+// before calling this function.
+static struct __stp_tf_vma_entry *
+__stp_tf_get_vma_map_entry_internal(struct task_struct *tsk,
+ unsigned long vm_start)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct __stp_tf_vma_entry *entry;
+
+ head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
+ hlist_for_each_entry(entry, node, head, hlist) {
+ if (tsk->pid == entry->pid
+ && vm_start == entry->addr) {
+ mutex_unlock(&__stp_tf_vma_mutex);
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+
+// Add the vma info to the vma map hash table.
+static int
+stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
+ unsigned long vm_end, unsigned long vm_pgoff)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct __stp_tf_vma_entry *entry;
+
+ mutex_lock(&__stp_tf_vma_mutex);
+ entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
+ if (entry != NULL) {
+#if 0
+ printk(KERN_NOTICE
+ "vma (pid: %d, vm_start: 0x%lx) present?\n",
+ tsk->pid, entry->vm_start);
+#endif
+ mutex_unlock(&__stp_tf_vma_mutex);
+ return -EBUSY; /* Already there */
+ }
+
+ // Get an element from the free list.
+ entry = __stp_tf_vma_get_free_entry();
+ if (!entry) {
+ mutex_unlock(&__stp_tf_vma_mutex);
+ return -ENOMEM;
+ }
+
+ // Fill in the info
+ entry->pid = tsk->pid;
+ //entry->addr = addr; ???
+ entry->vm_start = vm_start;
+ entry->vm_end = vm_end;
+ entry->vm_pgoff = vm_pgoff;
+
+ head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
+ hlist_add_head(&entry->hlist, head);
+ mutex_unlock(&__stp_tf_vma_mutex);
+ return 0;
+}
+
+
+// Remove the vma entry from the vma hash table.
+static int
+stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start,
+ unsigned long vm_end, unsigned long vm_pgoff)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct __stp_tf_vma_entry *entry;
+
+ mutex_lock(&__stp_tf_vma_mutex);
+ entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
+ if (entry != NULL) {
+ hlist_del(&entry->hlist);
+ __stp_tf_vma_put_free_entry(entry);
+ }
+ mutex_unlock(&__stp_tf_vma_mutex);
+ return 0;
+}
+
+// Finds vma info if the vma is present in the vma map hash table.
+// Returns ESRCH if not present. The __stp_tf_vma_mutex must *not* be
+// locked before calling this function.
+static int
+stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr,
+ unsigned long *vm_start, unsigned long *vm_end,
+ unsigned long *vm_pgoff)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct __stp_tf_vma_entry *entry;
+ struct __stp_tf_vma_entry *found_entry = NULL;
+ int rc = ESRCH;
+
+ mutex_lock(&__stp_tf_vma_mutex);
+ head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
+ hlist_for_each_entry(entry, node, head, hlist) {
+ if (tsk->pid == entry->pid
+ && vm_addr >= entry->vm_start
+ && vm_addr < entry->vm_end) {
+ found_entry = entry;
+ break;
+ }
+ }
+ if (found_entry != NULL) {
+ if (vm_start != NULL)
+ *vm_start = found_entry->vm_start;
+ if (vm_end != NULL)
+ *vm_end = found_entry->vm_end;
+ if (vm_pgoff != NULL)
+ *vm_pgoff = found_entry->vm_pgoff;
+ rc = 0;
+ }
+ mutex_unlock(&__stp_tf_vma_mutex);
+ return rc;
+}