summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/ChangeLog50
-rw-r--r--runtime/autoconf-task-uid.c6
-rw-r--r--runtime/sdt.h163
-rw-r--r--runtime/sduprobes.h141
-rw-r--r--runtime/staprun/ChangeLog4
-rw-r--r--runtime/staprun/staprun.h3
-rw-r--r--runtime/task_finder.c303
-rw-r--r--runtime/transport/ChangeLog5
-rw-r--r--runtime/transport/symbols.c24
-rw-r--r--runtime/transport/transport.c7
-rw-r--r--runtime/uprobes/uprobes.c78
-rw-r--r--runtime/uprobes2/uprobes.c78
-rw-r--r--runtime/vsprintf.c19
13 files changed, 545 insertions, 336 deletions
diff --git a/runtime/ChangeLog b/runtime/ChangeLog
index b9a2a058..ca299a8c 100644
--- a/runtime/ChangeLog
+++ b/runtime/ChangeLog
@@ -16,6 +16,56 @@
task_finder_vma.c (stap_add_vma_map_info): Take an optional module.
(__stp_tf_get_vma_entry_addr): New lookup function.
+2009-01-20 David Smith <dsmith@redhat.com>
+
+ PR 9673.
+ * task_finder.c (struct stap_task_finder_target): Added
+ 'vm_events' field.
+ (stap_register_task_finder_target): Sets vm_events if a
+ vm_callback is present.
+ (__stp_task_finder_cleanup): Only detaches engines on the main
+ __stp_task_finder_list linked list.
+ (__stp_call_callbacks): New function.
+ (__stp_call_vm_callbacks): New function.
+ (__stp_utrace_attach_match_filename): Calls __stp_call_callbacks()
+ to call callbacks.
+ (__stp_utrace_task_finder_target_death): Ditto.
+ (__stp_utrace_task_finder_target_quiesce): Calls
+ __stp_call_callbacks() and __stp_call_vm_callbacks() to call
+ callbacks.
+ (__stp_call_vm_callbacks_with_vma): Renamed from
+ __stp_target_call_vm_callback.
+ (__stp_utrace_task_finder_target_syscall_exit): Calls
+ __stp_call_vm_callbacks() to call vm callbacks.
+ (stap_start_task_finder): Instead of a utrace engine for every
+ task_finder_target, there is now one utrace engine for all targets
+ with the same path or pid.
+
+2009-01-13 Jim Keniston <jkenisto@us.ibm.com>
+
+ PR 7082.
+ * uprobes2/uprobes.c: On exec, free up outstanding
+ uretprobe_instances and tick down the uproc's ref-count
+ accordingly, so the (old image's) uproc goes away as
+ desired.
+
+2009-01-12 Wenji Huang <wenji.huang@oracle.com>
+
+ * transport/symbols.c (_stp_sort): Adapt it to 2.6.29.
+
+2009-01-06 Frank Ch. Eigler <fche@elastic.org>
+
+ PR9699.
+ * autoconf-task-uid.c: New test.
+
+2008-12-21 Stan Cox <scox@redhat.com>
+
+ * sduprobes.h (STAP_PROBE): Put block around probe point.
+
+2008-12-16 Stan Cox <scox@redhat.com>
+
+ * sduprobes.h (STAP_PROBE): Add synthetic reference to probe label.
+
2008-12-09 Frank Ch. Eigler <fche@elastic.org>
* time.c (_stp_gettimeofday_ns): Protect some more against freq=0.
diff --git a/runtime/autoconf-task-uid.c b/runtime/autoconf-task-uid.c
new file mode 100644
index 00000000..8e40f831
--- /dev/null
+++ b/runtime/autoconf-task-uid.c
@@ -0,0 +1,6 @@
+#include <linux/sched.h>
+
+int bar (struct task_struct *foo) {
+ return (foo->uid = 0);
+}
+/* as opposed to linux/cred.h wrappers current_uid() etc. */
diff --git a/runtime/sdt.h b/runtime/sdt.h
new file mode 100644
index 00000000..9fe7b176
--- /dev/null
+++ b/runtime/sdt.h
@@ -0,0 +1,163 @@
+// Copyright (C) 2005-2009 Red Hat Inc.
+// Copyright (C) 2006 Intel Corporation.
+//
+// This file is part of systemtap, and is free software. You can
+// redistribute it and/or modify it under the terms of the GNU General
+// Public License (GPL); either version 2, or (at your option) any
+// later version.
+
+#include <string.h>
+
+#if _LP64
+#define STAP_PROBE_STRUCT_ARG(arg) \
+ __uint64_t arg;
+#else
+#define STAP_PROBE_STRUCT_ARG(arg) \
+ long arg __attribute__ ((aligned(8)));
+#endif
+
+#define STAP_SENTINEL 0x31425250
+
+#define STAP_PROBE_STRUCT(probe,type,argc) \
+struct _probe_ ## probe \
+{ \
+ int probe_type; \
+ char *probe_name; \
+ STAP_PROBE_STRUCT_ARG (probe_arg); \
+}; \
+static char probe_name [strlen(#probe)+1] \
+ __attribute__ ((section (".probes"))) \
+ = #probe; \
+static volatile struct _probe_ ## probe _probe_ ## probe __attribute__ ((section (".probes"))) = {STAP_SENTINEL,&probe_name[0],argc};
+
+#define STAP_CONCAT(a,b) a ## b
+#define STAP_LABEL(p,n) \
+ STAP_CONCAT(_probe_ ## p ## _, n)
+
+// The goto _probe_ prevents the label from "drifting"
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE(provider,probe) \
+ STAP_PROBE_STRUCT(probe,0,0) \
+ _stap_probe_0 (_probe_ ## probe.probe_name);
+#else
+#define STAP_PROBE(provider,probe) \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop"); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);
+#endif
+
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE1(provider,probe,arg1) \
+ STAP_PROBE_STRUCT(probe,0,1) \
+ _stap_probe_1 (_probe_ ## probe.probe_name,(size_t)arg1);
+#else
+#define STAP_PROBE1(provider,probe,parm1) \
+ {volatile typeof((parm1)) arg1 __attribute__ ((unused)) = parm1; \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop" :: "r"(arg1)); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);}
+#endif
+
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE2(provider,probe,arg1,arg2) \
+ STAP_PROBE_STRUCT(probe,0,2) \
+ _stap_probe_2 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2);
+#else
+#define STAP_PROBE2(provider,probe,parm1,parm2) \
+ {volatile typeof((parm1)) arg1 __attribute__ ((unused)) = parm1; \
+ volatile typeof((parm2)) arg2 __attribute__ ((unused)) = parm2; \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop" :: "r"(arg1), "r"(arg2)); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);}
+#endif
+
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE3(provider,probe,arg1,arg2,arg3) \
+ STAP_PROBE_STRUCT(probe,0,3) \
+ _stap_probe_3 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3);
+#else
+#define STAP_PROBE3(provider,probe,parm1,parm2,parm3) \
+ {volatile typeof((parm1)) arg1 __attribute__ ((unused)) = parm1; \
+ volatile typeof((parm2)) arg2 __attribute__ ((unused)) = parm2; \
+ volatile typeof((parm3)) arg3 __attribute__ ((unused)) = parm3; \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop" :: "r"(arg1), "r"(arg2), "r"(arg3)); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);}
+#endif
+
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE4(provider,probe,arg1,arg2,arg3,arg4) \
+ STAP_PROBE_STRUCT(probe,0,4) \
+ _stap_probe_4 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3,(size_t)arg4);
+#else
+#define STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4) \
+ {volatile typeof((parm1)) arg1 __attribute__ ((unused)) = parm1; \
+ volatile typeof((parm2)) arg2 __attribute__ ((unused)) = parm2; \
+ volatile typeof((parm3)) arg3 __attribute__ ((unused)) = parm3; \
+ volatile typeof((parm4)) arg4 __attribute__ ((unused)) = parm4; \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop" :: "r"(arg1), "r"(arg2), "r"(arg3), "r"(arg4)); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);}
+#endif
+
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE5(provider,probe,arg1,arg2,arg3,arg4,arg5) \
+ STAP_PROBE_STRUCT(probe,0,5) \
+ _stap_probe_5 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3,(size_t)arg4,(size_t)arg5);
+#else
+#define STAP_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5) \
+ {volatile typeof((parm1)) arg1 __attribute__ ((unused)) = parm1; \
+ volatile typeof((parm2)) arg2 __attribute__ ((unused)) = parm2; \
+ volatile typeof((parm3)) arg3 __attribute__ ((unused)) = parm3; \
+ volatile typeof((parm4)) arg4 __attribute__ ((unused)) = parm4; \
+ volatile typeof((parm5)) arg5 __attribute__ ((unused)) = parm5; \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop" :: "r"(arg1), "r"(arg2), "r"(arg3), "r"(arg4), "r"(arg5)); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);}
+#endif
+
+#ifdef USE_STAP_PROBE
+#define STAP_PROBE6(provider,probe,arg1,arg2,arg3,arg4,arg5,arg6) \
+ STAP_PROBE_STRUCT(probe,0,6) \
+ _stap_probe_6 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3,(size_t)arg4,(size_t)arg5,(size_t)arg6);
+#else
+#define STAP_PROBE6(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6) \
+ {volatile typeof((parm1)) arg1 __attribute__ ((unused)) = parm1; \
+ volatile typeof((parm2)) arg2 __attribute__ ((unused)) = parm2; \
+ volatile typeof((parm3)) arg3 __attribute__ ((unused)) = parm3; \
+ volatile typeof((parm4)) arg4 __attribute__ ((unused)) = parm4; \
+ volatile typeof((parm5)) arg5 __attribute__ ((unused)) = parm5; \
+ volatile typeof((parm6)) arg6 __attribute__ ((unused)) = parm6; \
+STAP_LABEL(probe,__LINE__): \
+ asm volatile ("nop" :: "r"(arg1), "r"(arg2), "r"(arg3), "r"(arg4), "r"(arg5), "r"(arg6)); \
+ STAP_PROBE_STRUCT(probe,1,(size_t)&& STAP_LABEL(probe,__LINE__)) \
+ if (__builtin_expect(_probe_ ## probe.probe_type < 0, 0)) \
+ goto STAP_LABEL(probe,__LINE__);}
+#endif
+
+#define DTRACE_PROBE(provider,probe) \
+STAP_PROBE(provider,probe)
+#define DTRACE_PROBE1(provider,probe,parm1) \
+STAP_PROBE1(provider,probe,parm1)
+#define DTRACE_PROBE2(provider,probe,parm1,parm2) \
+STAP_PROBE2(provider,probe,parm1,parm2)
+#define DTRACE_PROBE3(provider,probe,parm1,parm2,parm3) \
+STAP_PROBE3(provider,probe,parm1,parm2,parm3)
+#define DTRACE_PROBE4(provider,probe,parm1,parm2,parm3,parm4) \
+STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4)
+#define DTRACE_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5) \
+STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4,parm5)
+#define DTRACE_PROBE6(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6) \
+STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6)
diff --git a/runtime/sduprobes.h b/runtime/sduprobes.h
deleted file mode 100644
index b91dea93..00000000
--- a/runtime/sduprobes.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (C) 2005-2008 Red Hat Inc.
-// Copyright (C) 2006 Intel Corporation.
-//
-// This file is part of systemtap, and is free software. You can
-// redistribute it and/or modify it under the terms of the GNU General
-// Public License (GPL); either version 2, or (at your option) any
-// later version.
-
-#include <string.h>
-
-#if _LP64
-#define STAP_PROBE_STRUCT_ARG \
- __uint64_t probe_arg;
-#else
-#define STAP_PROBE_STRUCT_ARG \
- long probe_arg __attribute__ ((aligned(8)));
-#endif
-
-#define STAP_PROBE_STRUCT(probe,type,argc) \
-struct _probe_ ## probe \
-{ \
- char probe_name [strlen(#probe)+1]; \
- int probe_type; \
- STAP_PROBE_STRUCT_ARG \
-}; \
- static volatile struct _probe_ ## probe _probe_ ## probe __attribute__ ((section (".probes"))) = {#probe,type,argc};
-
-#ifndef USE_STAP_DEBUGINFO_PROBE
-#define STAP_PROBE(provider,probe) \
- STAP_PROBE_STRUCT(probe,0,0) \
- _stap_probe_0 (_probe_ ## probe.probe_name);
-#else
-#define STAP_PROBE(provider,probe) \
-_probe_ ## probe: \
- asm volatile ("nop"); \
- STAP_PROBE_STRUCT(probe,1,(size_t)&& _probe_ ## probe) \
- asm volatile ("# %0" :: "m" ((_probe_ ## probe.probe_type)));
-#endif
-
-
-#ifndef USE_STAP_DEBUGINFO_PROBE
-#define STAP_PROBE1(provider,probe,arg1) \
- STAP_PROBE_STRUCT(probe,0,1) \
- _stap_probe_1 (_probe_ ## probe.probe_name,(size_t)arg1);
-#else
-#define STAP_PROBE1(provider,probe,parm1) \
-_probe_ ## probe: \
- asm volatile ("nop"); \
- volatile typeof(parm1) arg1 = parm1; \
- STAP_PROBE_STRUCT(probe,1,(size_t)&& _probe_ ## probe) \
- asm volatile ("# %0" :: "r"(arg1)); \
- asm volatile ("# %0" :: "m" ((_probe_ ## probe.probe_type)));
-#endif
-
-
-#ifndef USE_STAP_DEBUGINFO_PROBE
-#define STAP_PROBE2(provider,probe,arg1,arg2) \
- STAP_PROBE_STRUCT(probe,0,2) \
- _stap_probe_2 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2);
-#else
-#define STAP_PROBE2(provider,probe,parm1,parm2) \
-_probe_ ## probe: \
- asm volatile ("nop"); \
- volatile typeof(parm1) arg1 = parm1; \
- volatile typeof(parm2) arg2 = parm2; \
- STAP_PROBE_STRUCT(probe,1,(size_t)&& _probe_ ## probe)\
- asm volatile ("# %0" :: "r"(arg1)); \
- asm volatile ("# %0" :: "r"(arg2)); \
- asm volatile ("# %0" :: "m" ((_probe_ ## probe.probe_type)));
-#endif
-
-#ifndef USE_STAP_DEBUGINFO_PROBE
-#define STAP_PROBE3(provider,probe,arg1,arg2,arg3) \
- STAP_PROBE_STRUCT(probe,0,3) \
- _stap_probe_3 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3);
-#else
-#define STAP_PROBE3(provider,probe,parm1,parm2,parm3) \
-_probe_ ## probe: \
- asm volatile ("nop"); \
- volatile typeof(parm1) arg1 = parm1; \
- volatile typeof(parm2) arg2 = parm2; \
- volatile typeof(parm3) arg3 = parm3; \
- STAP_PROBE_STRUCT(probe,1,(size_t)&& _probe_ ## probe) \
- asm volatile ("# %0" :: "r"(arg1)); \
- asm volatile ("# %0" :: "r"(arg2)); \
- asm volatile ("# %0" :: "r"(arg3)); \
- asm volatile ("# %0" :: "m" ((_probe_ ## probe.probe_type)));
-#endif
-
-#ifndef USE_STAP_DEBUGINFO_PROBE
-#define STAP_PROBE4(provider,probe,arg1,arg2,arg3,arg4) \
- STAP_PROBE_STRUCT(probe,0,4) \
- _stap_probe_4 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3,(size_t)arg4);
-#else
-#define STAP_PROBE4(provider,probe,parm1,parm2,parm3) \
-_probe_ ## probe: \
- asm volatile ("nop"); \
- volatile typeof(parm1) arg1 = parm1; \
- volatile typeof(parm2) arg2 = parm2; \
- volatile typeof(parm3) arg3 = parm3; \
- volatile typeof(parm4) arg4 = parm4; \
- STAP_PROBE_STRUCT(probe,1,(size_t)&& _probe_ ## probe) \
- asm volatile ("# %0" :: "r"(arg1)); \
- asm volatile ("# %0" :: "r"(arg2)); \
- asm volatile ("# %0" :: "r"(arg3)); \
- asm volatile ("# %0" :: "r"(arg4)); \
- asm volatile ("# %0" :: "m" ((_probe_ ## probe.probe_type)));
-#endif
-
-#ifndef USE_STAP_DEBUGINFO_PROBE
-#define STAP_PROBE5(provider,probe,arg1,arg2,arg3,arg4,arg5) \
- STAP_PROBE_STRUCT(probe,0,5) \
- _stap_probe_5 (_probe_ ## probe.probe_name,(size_t)arg1,(size_t)arg2,(size_t)arg3,(size_t)arg4,(size_t)arg5);
-#else
-#define STAP_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5) \
-_probe_ ## probe: \
- asm volatile ("nop"); \
- volatile typeof(parm1) arg1 = parm1; \
- volatile typeof(parm2) arg2 = parm2; \
- volatile typeof(parm3) arg3 = parm3; \
- volatile typeof(parm4) arg4 = parm4; \
- volatile typeof(parm5) arg5 = parm5; \
- STAP_PROBE_STRUCT(probe,1,(size_t)&& _probe_ ## probe) \
- asm volatile ("# %0" :: "r"(arg1)); \
- asm volatile ("# %0" :: "r"(arg2)); \
- asm volatile ("# %0" :: "r"(arg3)); \
- asm volatile ("# %0" :: "r"(arg4)); \
- asm volatile ("# %0" :: "r"(arg5)); \
- asm volatile ("# %0" :: "m" ((_probe_ ## probe.probe_type)));
-#endif
-
-#define DTRACE_PROBE1(provider,probe,parm1,parm2,parm3,parm4,parm5) \
-STAP_PROBE1(provider,probe,parm1,parm2,parm3,parm4,parm5)
-#define DTRACE_PROBE2(provider,probe,parm1,parm2,parm3,parm4,parm5) \
-STAP_PROBE2(provider,probe,parm1,parm2,parm3,parm4,parm5)
-#define DTRACE_PROBE3(provider,probe,parm1,parm2,parm3,parm4,parm5) \
-STAP_PROBE3(provider,probe,parm1,parm2,parm3,parm4,parm5)
-#define DTRACE_PROBE4(provider,probe,parm1,parm2,parm3,parm4,parm5) \
-STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4,parm5)
-#define DTRACE_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5) \
-STAP_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5)
diff --git a/runtime/staprun/ChangeLog b/runtime/staprun/ChangeLog
index 3611b55e..6c2304ce 100644
--- a/runtime/staprun/ChangeLog
+++ b/runtime/staprun/ChangeLog
@@ -1,3 +1,7 @@
+2008-01-11 Mark Wielaard <mjw@redhat.com>
+
+ * staprun.h: include config.h for dependency.
+
2008-12-08 Frank Ch. Eigler <fche@elastic.org>
PR7062
diff --git a/runtime/staprun/staprun.h b/runtime/staprun/staprun.h
index 2014ce5b..84cf63fc 100644
--- a/runtime/staprun/staprun.h
+++ b/runtime/staprun/staprun.h
@@ -34,6 +34,9 @@
#include <sys/statfs.h>
#include <linux/version.h>
+/* Include config.h to pick up dependency for --prefix usage. */
+#include "config.h"
+
#define dbug(level, args...) {if (verbose>=level) {fprintf(stderr,"%s:%s:%d ",__name__,__FUNCTION__, __LINE__); fprintf(stderr,args);}}
extern char *__name__;
diff --git a/runtime/task_finder.c b/runtime/task_finder.c
index 1e0a8474..31bccad8 100644
--- a/runtime/task_finder.c
+++ b/runtime/task_finder.c
@@ -95,7 +95,8 @@ struct stap_task_finder_target {
struct list_head callback_list_head;
struct list_head callback_list;
struct utrace_engine_ops ops;
- int engine_attached;
+ unsigned engine_attached:1;
+ unsigned vm_events:1;
size_t pathlen;
/* public: */
@@ -174,6 +175,7 @@ stap_register_task_finder_target(struct stap_task_finder_target *new_tgt)
// Make sure everything is initialized properly.
new_tgt->engine_attached = 0;
+ new_tgt->vm_events = 0;
memset(&new_tgt->ops, 0, sizeof(new_tgt->ops));
new_tgt->ops.report_death = &__stp_utrace_task_finder_target_death;
new_tgt->ops.report_quiesce = &__stp_utrace_task_finder_target_quiesce;
@@ -209,6 +211,10 @@ stap_register_task_finder_target(struct stap_task_finder_target *new_tgt)
// Add this target to the callback list for this task.
list_add_tail(&new_tgt->callback_list, &tgt->callback_list_head);
+
+ // If the new target has a vm_callback, remember this.
+ if (new_tgt->vm_callback != NULL)
+ tgt->vm_events = 1;
return 0;
}
@@ -309,7 +315,6 @@ static void
__stp_task_finder_cleanup(void)
{
struct list_head *tgt_node, *tgt_next;
- struct list_head *cb_node, *cb_next;
struct stap_task_finder_target *tgt;
// Walk the main list, cleaning up as we go.
@@ -319,22 +324,15 @@ __stp_task_finder_cleanup(void)
if (tgt == NULL)
continue;
- list_for_each_safe(cb_node, cb_next,
- &tgt->callback_list_head) {
- struct stap_task_finder_target *cb_tgt;
- cb_tgt = list_entry(cb_node,
- struct stap_task_finder_target,
- callback_list);
- if (cb_tgt == NULL)
- continue;
+ if (tgt->engine_attached) {
+ stap_utrace_detach_ops(&tgt->ops);
+ tgt->engine_attached = 0;
+ }
- if (cb_tgt->engine_attached) {
- stap_utrace_detach_ops(&cb_tgt->ops);
- cb_tgt->engine_attached = 0;
- }
+ // Notice we're not walking the callback_list here.
+ // There isn't anything to clean up and doing it would
+ // mess up callbacks in progress.
- list_del(&cb_tgt->callback_list);
- }
list_del(&tgt->list);
}
}
@@ -379,10 +377,10 @@ __stp_get_mm_path(struct mm_struct *mm, char *buf, int buflen)
/*
* __STP_TASK_BASE_EVENTS: base events for stap_task_finder_target's
- * without a vm_callback
+ * without vm_callback's
*
* __STP_TASK_VM_BASE_EVENTS: base events for
- * stap_task_finder_target's with a vm_callback
+ * stap_task_finder_target's with vm_callback's
*/
#define __STP_TASK_BASE_EVENTS (UTRACE_EVENT(DEATH))
@@ -400,8 +398,7 @@ __stp_get_mm_path(struct mm_struct *mm, char *buf, int buflen)
| UTRACE_EVENT(QUIESCE))
#define __STP_ATTACHED_TASK_BASE_EVENTS(tgt) \
- ((((tgt)->vm_callback) == NULL) ? __STP_TASK_BASE_EVENTS \
- : __STP_TASK_VM_BASE_EVENTS)
+ ((tgt)->vm_events ? __STP_TASK_VM_BASE_EVENTS : __STP_TASK_BASE_EVENTS)
static int
__stp_utrace_attach(struct task_struct *tsk,
@@ -485,6 +482,61 @@ stap_utrace_attach(struct task_struct *tsk,
}
static inline void
+__stp_call_callbacks(struct stap_task_finder_target *tgt,
+ struct task_struct *tsk, int register_p, int process_p)
+{
+ struct list_head *cb_node;
+ int rc;
+
+ if (tgt == NULL || tsk == NULL)
+ return;
+
+ list_for_each(cb_node, &tgt->callback_list_head) {
+ struct stap_task_finder_target *cb_tgt;
+
+ cb_tgt = list_entry(cb_node, struct stap_task_finder_target,
+ callback_list);
+ if (cb_tgt == NULL || cb_tgt->callback == NULL)
+ continue;
+
+ rc = cb_tgt->callback(cb_tgt, tsk, register_p, process_p);
+ if (rc != 0) {
+ _stp_error("callback for %d failed: %d",
+ (int)tsk->pid, rc);
+ }
+ }
+}
+
+static inline void
+__stp_call_vm_callbacks(struct stap_task_finder_target *tgt,
+ struct task_struct *tsk, int map_p, char *vm_path,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long vm_pgoff)
+{
+ struct list_head *cb_node;
+ int rc;
+
+ if (tgt == NULL || tsk == NULL)
+ return;
+
+ list_for_each(cb_node, &tgt->callback_list_head) {
+ struct stap_task_finder_target *cb_tgt;
+
+ cb_tgt = list_entry(cb_node, struct stap_task_finder_target,
+ callback_list);
+ if (cb_tgt == NULL || cb_tgt->vm_callback == NULL)
+ continue;
+
+ rc = cb_tgt->vm_callback(cb_tgt, tsk, map_p, vm_path,
+ vm_start, vm_end, vm_pgoff);
+ if (rc != 0) {
+ _stp_error("vm callback for %d failed: %d",
+ (int)tsk->pid, rc);
+ }
+ }
+}
+
+static inline void
__stp_utrace_attach_match_filename(struct task_struct *tsk,
const char * const filename,
int register_p, int process_p)
@@ -495,7 +547,7 @@ __stp_utrace_attach_match_filename(struct task_struct *tsk,
filelen = strlen(filename);
list_for_each(tgt_node, &__stp_task_finder_list) {
- struct list_head *cb_node;
+ int rc;
tgt = list_entry(tgt_node, struct stap_task_finder_target,
list);
@@ -515,52 +567,31 @@ __stp_utrace_attach_match_filename(struct task_struct *tsk,
/* Notice that "pid == 0" (which means to probe all
* threads) falls through. */
- list_for_each(cb_node, &tgt->callback_list_head) {
- struct stap_task_finder_target *cb_tgt;
- int rc;
-
- cb_tgt = list_entry(cb_node,
- struct stap_task_finder_target,
- callback_list);
- if (cb_tgt == NULL)
- continue;
-
- // Set up events we need for attached tasks.
- // When register_p is set, we won't actually
- // call the callback here - we'll call it when
- // the thread gets quiesced. When register_p
- // isn't set, we can go ahead and call the
- // callback.
- if (register_p) {
- rc = __stp_utrace_attach(tsk, &cb_tgt->ops,
- cb_tgt,
- __STP_ATTACHED_TASK_EVENTS,
- UTRACE_STOP);
- if (rc != 0 && rc != EPERM)
- break;
- cb_tgt->engine_attached = 1;
- }
- else {
- if (cb_tgt->callback != NULL) {
- rc = cb_tgt->callback(cb_tgt, tsk,
- register_p,
- process_p);
- if (rc != 0) {
- _stp_error("callback for %d failed: %d",
- (int)tsk->pid, rc);
- break;
- }
- }
-
- rc = stap_utrace_detach(tsk, &cb_tgt->ops);
- if (rc != 0)
- break;
+ // Set up events we need for attached tasks. When
+ // register_p is set, we won't actually call the
+ // callbacks here - we'll call it when the thread gets
+ // quiesced. When register_p isn't set, we can go
+ // ahead and call the callbacks.
+ if (register_p) {
+ rc = __stp_utrace_attach(tsk, &tgt->ops,
+ tgt,
+ __STP_ATTACHED_TASK_EVENTS,
+ UTRACE_STOP);
+ if (rc != 0 && rc != EPERM)
+ break;
+ tgt->engine_attached = 1;
+ }
+ else {
+ // Call the callbacks, then detach.
+ __stp_call_callbacks(tgt, tsk, register_p, process_p);
+ rc = stap_utrace_detach(tsk, &tgt->ops);
+ if (rc != 0)
+ break;
- // Note that we don't want to set
- // engine_attached to 0 here - only
- // when *all* threads using this
- // engine have been detached.
- }
+ // Note that we don't want to set
+ // engine_attached to 0 here - only
+ // when *all* threads using this
+ // engine have been detached.
}
}
}
@@ -756,20 +787,14 @@ __stp_utrace_task_finder_target_death(struct utrace_attached_engine *engine,
// don't know which callback(s) to call.
//
// So, now when an "interesting" thread is found, we add a
- // separate UTRACE_EVENT(DEATH) handler for every probe.
-
- if (tgt != NULL && tgt->callback != NULL) {
- int rc;
-
- // Call the callback
- rc = tgt->callback(tgt, tsk, 0,
- ((tsk->signal == NULL)
- || (atomic_read(&tsk->signal->live) == 0)));
- if (rc != 0) {
- _stp_error("death callback for %d failed: %d",
- (int)tsk->pid, rc);
- }
+ // separate UTRACE_EVENT(DEATH) handler for each attached
+ // handler.
+ if (tgt != NULL && tsk != NULL) {
+ __stp_call_callbacks(tgt, tsk, 0,
+ ((tsk->signal == NULL)
+ || (atomic_read(&tsk->signal->live) == 0)));
}
+
__stp_tf_handler_end();
debug_task_finder_detach();
return UTRACE_DETACH;
@@ -795,7 +820,7 @@ __stp_utrace_task_finder_target_quiesce(enum utrace_resume_action action,
return UTRACE_DETACH;
}
- if (tgt == NULL) {
+ if (tgt == NULL || tsk == NULL) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
@@ -825,20 +850,15 @@ __stp_utrace_task_finder_target_quiesce(enum utrace_resume_action action,
_stp_error("utrace_set_events returned error %d on pid %d",
rc, (int)tsk->pid);
- if (tgt->callback != NULL) {
- /* Call the callback. Assume that if the thread is a
- * thread group leader, it is a process. */
- rc = tgt->callback(tgt, tsk, 1, (tsk->pid == tsk->tgid));
- if (rc != 0) {
- _stp_error("callback for %d failed: %d",
- (int)tsk->pid, rc);
- }
- }
- /* If this is just a thread other than the thread group leader,
+ /* Call the callbacks. Assume that if the thread is a
+ * thread group leader, it is a process. */
+ __stp_call_callbacks(tgt, tsk, 1, (tsk->pid == tsk->tgid));
+
+ /* If this is just a thread other than the thread group leader,
don't bother inform vm_callback clients about its memory map,
since they will simply duplicate each other. */
- if (tgt->vm_callback != NULL && (tsk->tgid == tsk->pid)) {
+ if (tgt->vm_events == 1 && tsk->tgid == tsk->pid) {
struct mm_struct *mm;
char *mmpath_buf;
char *mmpath;
@@ -872,18 +892,12 @@ __stp_utrace_task_finder_target_quiesce(enum utrace_resume_action action,
mmpath_buf, PATH_MAX);
#endif
if (mmpath) {
- // Call the callback
- rc = tgt->vm_callback(tgt, tsk, 1,
- mmpath,
- vma->vm_start,
- vma->vm_end,
- (vma->vm_pgoff
- << PAGE_SHIFT));
- if (rc != 0) {
- _stp_error("vm callback for %d failed: %d",
- (int)tsk->pid, rc);
- }
-
+ __stp_call_vm_callbacks(tgt, tsk, 1,
+ mmpath,
+ vma->vm_start,
+ vma->vm_end,
+ (vma->vm_pgoff
+ << PAGE_SHIFT));
}
else {
_stp_dbug(__FUNCTION__, __LINE__,
@@ -944,7 +958,7 @@ __stp_utrace_task_finder_target_syscall_entry(enum utrace_resume_action action,
return UTRACE_DETACH;
}
- if (tgt == NULL || tgt->vm_callback == NULL)
+ if (tgt == NULL || tgt->vm_events == 0)
return UTRACE_RESUME;
// See if syscall is one we're interested in.
@@ -987,9 +1001,9 @@ __stp_utrace_task_finder_target_syscall_entry(enum utrace_resume_action action,
}
static void
-__stp_target_call_vm_callback(struct stap_task_finder_target *tgt,
- struct task_struct *tsk,
- struct vm_area_struct *vma)
+__stp_call_vm_callbacks_with_vma(struct stap_task_finder_target *tgt,
+ struct task_struct *tsk,
+ struct vm_area_struct *vma)
{
char *mmpath_buf;
char *mmpath;
@@ -1015,13 +1029,9 @@ __stp_target_call_vm_callback(struct stap_task_finder_target *tgt,
rc, (int)tsk->pid);
}
else {
- rc = tgt->vm_callback(tgt, tsk, 1, mmpath, vma->vm_start,
- vma->vm_end,
- (vma->vm_pgoff << PAGE_SHIFT));
- if (rc != 0) {
- _stp_error("vm callback for %d failed: %d",
- (int)tsk->pid, rc);
- }
+ __stp_call_vm_callbacks(tgt, tsk, 1, mmpath,
+ vma->vm_start, vma->vm_end,
+ (vma->vm_pgoff << PAGE_SHIFT));
}
_stp_kfree(mmpath_buf);
}
@@ -1056,7 +1066,7 @@ __stp_utrace_task_finder_target_syscall_exit(enum utrace_resume_action action,
return UTRACE_DETACH;
}
- if (tgt == NULL || tgt->vm_callback == NULL)
+ if (tgt == NULL || tgt->vm_events == 0)
return UTRACE_RESUME;
// See if syscall is one we're interested in.
@@ -1112,8 +1122,7 @@ __stp_utrace_task_finder_target_syscall_exit(enum utrace_resume_action action,
down_read(&mm->mmap_sem);
vma = __stp_find_file_based_vma(mm, rv);
if (vma != NULL) {
- __stp_target_call_vm_callback(tgt, tsk,
- vma);
+ __stp_call_vm_callbacks_with_vma(tgt, tsk, vma);
}
up_read(&mm->mmap_sem);
mmput(mm);
@@ -1138,15 +1147,12 @@ __stp_utrace_task_finder_target_syscall_exit(enum utrace_resume_action action,
// FIXME: We'll need to figure out to
// retrieve the path of a deleted
// vma.
- rc = tgt->vm_callback(tgt, tsk, 0, NULL,
- entry->vm_start,
- entry->vm_end,
- (entry->vm_pgoff
- << PAGE_SHIFT));
- if (rc != 0) {
- _stp_error("vm callback for %d failed: %d",
- (int)tsk->pid, rc);
- }
+
+ __stp_call_vm_callbacks(tgt, tsk, 0, NULL,
+ entry->vm_start,
+ entry->vm_end,
+ (entry->vm_pgoff
+ << PAGE_SHIFT));
}
// If nothing has changed, there is no
@@ -1172,15 +1178,11 @@ __stp_utrace_task_finder_target_syscall_exit(enum utrace_resume_action action,
// FIXME: We'll need to figure out to
// retrieve the path of a deleted
// vma.
- rc = tgt->vm_callback(tgt, tsk, 0, NULL,
- entry->vm_start,
- entry->vm_end,
- (entry->vm_pgoff
- << PAGE_SHIFT));
- if (rc != 0) {
- _stp_error("vm callback for %d failed: %d",
- (int)tsk->pid, rc);
- }
+ __stp_call_vm_callbacks(tgt, tsk, 0, NULL,
+ entry->vm_start,
+ entry->vm_end,
+ (entry->vm_pgoff
+ << PAGE_SHIFT));
// Now find all the new vma's that
// made up the original vma's address
@@ -1191,8 +1193,9 @@ __stp_utrace_task_finder_target_syscall_exit(enum utrace_resume_action action,
tmp))
!= NULL)
&& vma->vm_end <= entry->vm_end) {
- __stp_target_call_vm_callback(tgt, tsk,
- vma);
+ __stp_call_vm_callbacks_with_vma(tgt,
+ tsk,
+ vma);
if (vma->vm_end >= entry->vm_end)
break;
tmp = vma->vm_end;
@@ -1284,7 +1287,6 @@ stap_start_task_finder(void)
mmpathlen = strlen(mmpath);
list_for_each(tgt_node, &__stp_task_finder_list) {
struct stap_task_finder_target *tgt;
- struct list_head *cb_node;
tgt = list_entry(tgt_node,
struct stap_task_finder_target, list);
@@ -1301,23 +1303,13 @@ stap_start_task_finder(void)
/* Notice that "pid == 0" (which means to
* probe all threads) falls through. */
- list_for_each(cb_node, &tgt->callback_list_head) {
- struct stap_task_finder_target *cb_tgt;
- cb_tgt = list_entry(cb_node,
- struct stap_task_finder_target,
- callback_list);
- if (cb_tgt == NULL)
- continue;
-
- // Set up events we need for attached tasks.
- rc = __stp_utrace_attach(tsk, &cb_tgt->ops,
- cb_tgt,
- __STP_ATTACHED_TASK_EVENTS,
- UTRACE_STOP);
- if (rc != 0 && rc != EPERM)
- goto stf_err;
- cb_tgt->engine_attached = 1;
- }
+ // Set up events we need for attached tasks.
+ rc = __stp_utrace_attach(tsk, &tgt->ops, tgt,
+ __STP_ATTACHED_TASK_EVENTS,
+ UTRACE_STOP);
+ if (rc != 0 && rc != EPERM)
+ goto stf_err;
+ tgt->engine_attached = 1;
}
} while_each_thread(grp, tsk);
stf_err:
@@ -1354,6 +1346,7 @@ stap_stop_task_finder(void)
if (i > 0)
printk(KERN_ERR "it took %d polling loops to quit.\n", i);
#endif
+ debug_task_finder_report();
}
diff --git a/runtime/transport/ChangeLog b/runtime/transport/ChangeLog
index c6dfa005..e8e2a047 100644
--- a/runtime/transport/ChangeLog
+++ b/runtime/transport/ChangeLog
@@ -1,3 +1,8 @@
+2009-01-06 Frank Ch. Eigler <fche@elastic.org>
+
+ PR9699.
+ * transport.c (_stp_transport_init): Adapt to task_struct cred switch.
+
2008-11-28 Frank Ch. Eigler <fche@elastic.org>
PR5947: make code -Wpointer-arith clean
diff --git a/runtime/transport/symbols.c b/runtime/transport/symbols.c
index 6e3bef1b..72f9ad80 100644
--- a/runtime/transport/symbols.c
+++ b/runtime/transport/symbols.c
@@ -92,8 +92,8 @@ static void generic_swap(void *a, void *b, int size)
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
- * @cmp: pointer to comparison function
- * @swap: pointer to swap function or NULL
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
*
* This function does a heapsort on the given array. You may provide a
* swap function optimized to your element type.
@@ -104,37 +104,37 @@ static void generic_swap(void *a, void *b, int size)
* it less suitable for kernel use.
*/
void _stp_sort(void *_base, size_t num, size_t size,
- int (*cmp) (const void *, const void *), void (*swap) (void *, void *, int size))
+ int (*cmp_func) (const void *, const void *), void (*swap_func) (void *, void *, int size))
{
char *base = (char*) _base;
/* pre-scale counters for performance */
int i = (num / 2 - 1) * size, n = num * size, c, r;
- if (!swap)
- swap = (size == 4 ? u32_swap : generic_swap);
+ if (!swap_func)
+ swap_func = (size == 4 ? u32_swap : generic_swap);
/* heapify */
for (; i >= 0; i -= size) {
for (r = i; r * 2 + size < n; r = c) {
c = r * 2 + size;
- if (c < n - size && cmp(base + c, base + c + size) < 0)
+ if (c < n - size && cmp_func(base + c, base + c + size) < 0)
c += size;
- if (cmp(base + r, base + c) >= 0)
+ if (cmp_func(base + r, base + c) >= 0)
break;
- swap(base + r, base + c, size);
+ swap_func(base + r, base + c, size);
}
}
/* sort */
for (i = n - size; i >= 0; i -= size) {
- swap(base, base + i, size);
+ swap_func(base, base + i, size);
for (r = 0; r * 2 + size < i; r = c) {
c = r * 2 + size;
- if (c < i - size && cmp(base + c, base + c + size) < 0)
+ if (c < i - size && cmp_func(base + c, base + c + size) < 0)
c += size;
- if (cmp(base + r, base + c) >= 0)
+ if (cmp_func(base + r, base + c) >= 0)
break;
- swap(base + r, base + c, size);
+ swap_func(base + r, base + c, size);
}
}
}
diff --git a/runtime/transport/transport.c b/runtime/transport/transport.c
index 9f7a25f2..f5ee2c36 100644
--- a/runtime/transport/transport.c
+++ b/runtime/transport/transport.c
@@ -2,7 +2,7 @@
* transport.c - stp transport functions
*
* Copyright (C) IBM Corporation, 2005
- * Copyright (C) Red Hat Inc, 2005-2008
+ * Copyright (C) Red Hat Inc, 2005-2009
* Copyright (C) Intel Corporation, 2006
*
* This file is part of systemtap, and is free software. You can
@@ -211,8 +211,13 @@ int _stp_transport_init(void)
dbug_trans(1, "transport_init\n");
_stp_init_pid = current->pid;
+#ifdef STAPCONF_TASK_UID
_stp_uid = current->uid;
_stp_gid = current->gid;
+#else
+ _stp_uid = current_uid();
+ _stp_gid = current_gid();
+#endif
#ifdef RELAY_GUEST
/* Guest scripts use relay only for reporting warnings and errors */
diff --git a/runtime/uprobes/uprobes.c b/runtime/uprobes/uprobes.c
index 22d62ecc..9dfb82b9 100644
--- a/runtime/uprobes/uprobes.c
+++ b/runtime/uprobes/uprobes.c
@@ -453,13 +453,24 @@ static int quiesce_all_threads(struct uprobe_process *uproc,
return survivors;
}
+static void utask_free_uretprobe_instances(struct uprobe_task *utask)
+{
+ struct uretprobe_instance *ri;
+ struct hlist_node *r1, *r2;
+
+ hlist_for_each_entry_safe(ri, r1, r2, &utask->uretprobe_instances,
+ hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ uprobe_decref_process(utask->uproc);
+ }
+}
+
/* Called with utask->uproc write-locked. */
static void uprobe_free_task(struct uprobe_task *utask)
{
struct deferred_registration *dr, *d;
struct delayed_signal *ds, *ds2;
- struct uretprobe_instance *ri;
- struct hlist_node *r1, *r2;
uprobe_unhash_utask(utask);
list_del(&utask->list);
@@ -473,12 +484,8 @@ static void uprobe_free_task(struct uprobe_task *utask)
kfree(ds);
}
- hlist_for_each_entry_safe(ri, r1, r2, &utask->uretprobe_instances,
- hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- uprobe_decref_process(utask->uproc);
- }
+ utask_free_uretprobe_instances(utask);
+
kfree(utask);
}
@@ -809,6 +816,27 @@ static void purge_uprobe(struct uprobe_kimg *uk)
uprobe_free_probept(ppt);
}
+/* TODO: Avoid code duplication with uprobe_validate_vaddr(). */
+static int uprobe_validate_vma(struct task_struct *t, unsigned long vaddr)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ int ret = 0;
+
+ mm = get_task_mm(t);
+ if (!mm)
+ return -EINVAL;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, vaddr);
+ if (!vma || vaddr < vma->vm_start)
+ ret = -ENOENT;
+ else if (!(vma->vm_flags & VM_EXEC))
+ ret = -EFAULT;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ return ret;
+}
+
/* Probed address must be in an executable VM area, outside the SSOL area. */
static int uprobe_validate_vaddr(struct task_struct *p, unsigned long vaddr,
struct uprobe_process *uproc)
@@ -1942,9 +1970,9 @@ done:
/*
* uproc's process is exiting or exec-ing, so zap all the (now irrelevant)
- * probepoints. Runs with uproc->rwsem write-locked. Caller must ref-count
- * uproc before calling this function, to ensure that uproc doesn't get
- * freed in the middle of this.
+ * probepoints and uretprobe_instances. Runs with uproc->rwsem write-locked.
+ * Caller must ref-count uproc before calling this function, to ensure that
+ * uproc doesn't get freed in the middle of this.
*/
static void uprobe_cleanup_process(struct uprobe_process *uproc)
{
@@ -1953,6 +1981,7 @@ static void uprobe_cleanup_process(struct uprobe_process *uproc)
struct hlist_node *pnode1, *pnode2;
struct hlist_head *head;
struct uprobe_kimg *uk, *unode;
+ struct uprobe_task *utask;
uproc->finished = 1;
@@ -1988,6 +2017,16 @@ static void uprobe_cleanup_process(struct uprobe_process *uproc)
}
}
}
+
+ /*
+ * Free uretprobe_instances. This is a nop on exit, since all
+ * the uprobe_tasks are already gone. We do this here on exec
+ * (as opposed to letting uprobe_free_process() take care of it)
+ * because uprobe_free_process() never gets called if we don't
+ * tick down the ref count here (PR #7082).
+ */
+ list_for_each_entry(utask, &uproc->thread_list, list)
+ utask_free_uretprobe_instances(utask);
}
/*
@@ -2134,6 +2173,23 @@ static int uprobe_fork_uproc(struct uprobe_process *parent_uproc,
BUG_ON(!parent_uproc->uretprobe_trampoline_addr ||
IS_ERR(parent_uproc->uretprobe_trampoline_addr));
+ ret = uprobe_validate_vma(child_tsk,
+ (unsigned long) parent_uproc->ssol_area.insn_area);
+ if (ret) {
+ int ret2;
+ printk(KERN_ERR "uprobes: Child %d failed to inherit"
+ " parent %d's SSOL vma at %p. Error = %d\n",
+ child_tsk->pid, parent_utask->tsk->pid,
+ parent_uproc->ssol_area.insn_area, ret);
+ ret2 = uprobe_validate_vma(parent_utask->tsk,
+ (unsigned long) parent_uproc->ssol_area.insn_area);
+ if (ret2 != 0)
+ printk(KERN_ERR "uprobes: Parent %d's SSOL vma"
+ " is no longer valid. Error = %d\n",
+ parent_utask->tsk->pid, ret2);
+ return ret;
+ }
+
if (!try_module_get(THIS_MODULE))
return -ENOSYS;
child_uproc = uprobe_mk_process(child_tsk);
diff --git a/runtime/uprobes2/uprobes.c b/runtime/uprobes2/uprobes.c
index 02496a4e..af187fc9 100644
--- a/runtime/uprobes2/uprobes.c
+++ b/runtime/uprobes2/uprobes.c
@@ -498,13 +498,24 @@ static bool quiesce_all_threads(struct uprobe_process *uproc,
return survivors;
}
+static void utask_free_uretprobe_instances(struct uprobe_task *utask)
+{
+ struct uretprobe_instance *ri;
+ struct hlist_node *r1, *r2;
+
+ hlist_for_each_entry_safe(ri, r1, r2, &utask->uretprobe_instances,
+ hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ uprobe_decref_process(utask->uproc);
+ }
+}
+
/* Called with utask->uproc write-locked. */
static void uprobe_free_task(struct uprobe_task *utask, bool in_callback)
{
struct deferred_registration *dr, *d;
struct delayed_signal *ds, *ds2;
- struct uretprobe_instance *ri;
- struct hlist_node *r1, *r2;
if (utask->engine && (utask->tsk != current || !in_callback)) {
/*
@@ -530,12 +541,8 @@ static void uprobe_free_task(struct uprobe_task *utask, bool in_callback)
kfree(ds);
}
- hlist_for_each_entry_safe(ri, r1, r2, &utask->uretprobe_instances,
- hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- uprobe_decref_process(utask->uproc);
- }
+ utask_free_uretprobe_instances(utask);
+
kfree(utask);
}
@@ -873,6 +880,27 @@ static void purge_uprobe(struct uprobe_kimg *uk)
uprobe_free_probept(ppt);
}
+/* TODO: Avoid code duplication with uprobe_validate_vaddr(). */
+static int uprobe_validate_vma(struct task_struct *t, unsigned long vaddr)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ int ret = 0;
+
+ mm = get_task_mm(t);
+ if (!mm)
+ return -EINVAL;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, vaddr);
+ if (!vma || vaddr < vma->vm_start)
+ ret = -ENOENT;
+ else if (!(vma->vm_flags & VM_EXEC))
+ ret = -EFAULT;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ return ret;
+}
+
/* Probed address must be in an executable VM area, outside the SSOL area. */
static int uprobe_validate_vaddr(struct pid *p, unsigned long vaddr,
struct uprobe_process *uproc)
@@ -2085,9 +2113,9 @@ static u32 uprobe_report_quiesce(enum utrace_resume_action action,
/*
* uproc's process is exiting or exec-ing, so zap all the (now irrelevant)
- * probepoints. Runs with uproc->rwsem write-locked. Caller must ref-count
- * uproc before calling this function, to ensure that uproc doesn't get
- * freed in the middle of this.
+ * probepoints and uretprobe_instances. Runs with uproc->rwsem write-locked.
+ * Caller must ref-count uproc before calling this function, to ensure that
+ * uproc doesn't get freed in the middle of this.
*/
static void uprobe_cleanup_process(struct uprobe_process *uproc)
{
@@ -2096,6 +2124,7 @@ static void uprobe_cleanup_process(struct uprobe_process *uproc)
struct hlist_node *pnode1, *pnode2;
struct hlist_head *head;
struct uprobe_kimg *uk, *unode;
+ struct uprobe_task *utask;
uproc->finished = 1;
@@ -2131,6 +2160,16 @@ static void uprobe_cleanup_process(struct uprobe_process *uproc)
}
}
}
+
+ /*
+ * Free uretprobe_instances. This is a nop on exit, since all
+ * the uprobe_tasks are already gone. We do this here on exec
+ * (as opposed to letting uprobe_free_process() take care of it)
+ * because uprobe_free_process() never gets called if we don't
+ * tick down the ref count here (PR #7082).
+ */
+ list_for_each_entry(utask, &uproc->thread_list, list)
+ utask_free_uretprobe_instances(utask);
}
/*
@@ -2280,6 +2319,23 @@ static int uprobe_fork_uproc(struct uprobe_process *parent_uproc,
BUG_ON(!parent_uproc->uretprobe_trampoline_addr ||
IS_ERR(parent_uproc->uretprobe_trampoline_addr));
+ ret = uprobe_validate_vma(child_tsk,
+ (unsigned long) parent_uproc->ssol_area.insn_area);
+ if (ret) {
+ int ret2;
+ printk(KERN_ERR "uprobes: Child %d failed to inherit"
+ " parent %d's SSOL vma at %p. Error = %d\n",
+ child_tsk->pid, parent_utask->tsk->pid,
+ parent_uproc->ssol_area.insn_area, ret);
+ ret2 = uprobe_validate_vma(parent_utask->tsk,
+ (unsigned long) parent_uproc->ssol_area.insn_area);
+ if (ret2 != 0)
+ printk(KERN_ERR "uprobes: Parent %d's SSOL vma"
+ " is no longer valid. Error = %d\n",
+ parent_utask->tsk->pid, ret2);
+ return ret;
+ }
+
if (!try_module_get(THIS_MODULE))
return -ENOSYS;
child_pid = get_pid(find_vpid(child_tsk->pid));
diff --git a/runtime/vsprintf.c b/runtime/vsprintf.c
index 831b7a2b..2c3067cf 100644
--- a/runtime/vsprintf.c
+++ b/runtime/vsprintf.c
@@ -283,6 +283,7 @@ int _stp_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
continue;
case 's':
+ case 'M':
case 'm':
s = va_arg(args, char *);
if ((unsigned long)s < PAGE_SIZE)
@@ -302,11 +303,20 @@ int _stp_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
}
}
- for (i = 0; i < len; ++i) {
- if (str <= end)
- *str = *s;
- ++str; ++s;
+ if (*fmt == 'M') {
+ str = number(str, str + len - 1 < end ? str + len - 1 : end,
+ (unsigned long) *(uint64_t *) s,
+ 16, field_width, len, flags);
+ }
+ else {
+ for (i = 0; i < len; ++i) {
+ if (str <= end) {
+ *str = *s;
+ }
+ ++str; ++s;
+ }
}
+
while (len < field_width--) {
if (str <= end)
*str = ' ';
@@ -318,7 +328,6 @@ int _stp_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
}
continue;
-
case 'X':
flags |= STP_LARGE;
case 'x':