summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorFrank Ch. Eigler <fche@elastic.org>2008-05-14 14:35:48 -0400
committerFrank Ch. Eigler <fche@elastic.org>2008-05-14 14:35:48 -0400
commit0fe2b97c7b967d833b5588dbf1ef763bb4440ed3 (patch)
treefcce9345c9ebacb7d5bc1f510f155bfdcea08dc4 /runtime
parenta007b4068d20af2d4488d54bf3ef2edbf47f2f06 (diff)
parentc3799d720b60bd74a60de4addcd0d77a90f7842a (diff)
downloadsystemtap-steved-0fe2b97c7b967d833b5588dbf1ef763bb4440ed3.tar.gz
systemtap-steved-0fe2b97c7b967d833b5588dbf1ef763bb4440ed3.tar.xz
systemtap-steved-0fe2b97c7b967d833b5588dbf1ef763bb4440ed3.zip
Merge commit 'origin/master' into pr6429-comp-unwindsyms
* commit 'origin/master': PR 5955 - Accept ; terminated globals Factored returnval() out of returnstr(), for use in dwarfless probing. Converted more aliases to use arg numbers instead of names. In particular, Revert "PR6487: extend blacklist with relay/timer subsystem" Add syscalls_by_pid.meta, syscalls_by_proc.meta, PR6487: extend blacklist with relay/timer subsystem Adjust iotime.meta description. * iotime.meta: New. Fix for PR 6500. Update ChangeLog * sleeptime.meta, wait4time.meta: New. systemtap.examples futexes.meta change futex.stp to futexes.stp. In ioblock.stp ioblock.end set bytes_done depending on kernel version. PR6492: make listing mode warning-free PR5648: Fix unaligned access warning in stp_print_flush on ia64 PR5648: Fix memcpy's endianess issue. futexes.meta, nettop.meta, pf2.meta: New. Clean up output.
Diffstat (limited to 'runtime')
-rw-r--r--runtime/ChangeLog22
-rw-r--r--runtime/print_new.c12
-rw-r--r--runtime/print_old.c12
-rw-r--r--runtime/regs.c394
-rw-r--r--runtime/task_finder.c110
-rw-r--r--runtime/vsprintf.c12
6 files changed, 164 insertions, 398 deletions
diff --git a/runtime/ChangeLog b/runtime/ChangeLog
index 8410b918..aab6a862 100644
--- a/runtime/ChangeLog
+++ b/runtime/ChangeLog
@@ -1,3 +1,25 @@
+2008-05-08 David Smith <dsmith@redhat.com>
+
+ PR 6500.
+ * task_finder.c (__stp_utrace_task_finder_report_exec): Moved
+ attach logic to __stp_utrace_attach_match_filename().
+ (__stp_utrace_attach_match_filename): New function.
+ (__stp_utrace_task_finder_report_clone): Calls
+ __stp_utrace_attach_match_filename() to attach to newly cloned
+ threads.
+
+2008-05-06 Masami Hiramatsu <mhiramat@redhat.com>
+
+ PR 5648
+ * print_old.c (stp_print_flush): Fix unaligned access warning on
+ ia64.
+ * print_new.c (stp_print_flush): Ditto.
+
+2008-05-06 Masami Hiramatsu <mhiramat@redhat.com>
+
+ PR 5648
+ * vsprintf.c (_stp_vsnprintf): Fix memcpy's endianess issue.
+
2008-05-05 Frank Ch. Eigler <fche@elastic.org>
PR 6481.
diff --git a/runtime/print_new.c b/runtime/print_new.c
index 75bbd82b..07af2e33 100644
--- a/runtime/print_new.c
+++ b/runtime/print_new.c
@@ -40,11 +40,13 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
else
atomic_inc (&_stp_transport_failures);
#else
- struct _stp_trace *t = relay_reserve(_stp_utt->rchan, sizeof(*t) + len);
- if (likely(t)) {
- t->sequence = _stp_seq_inc();
- t->pdu_len = len;
- memcpy((void *) t + sizeof(*t), pb->buf, len);
+ void *buf = relay_reserve(_stp_utt->rchan,
+ sizeof(struct _stp_trace) + len);
+ if (likely(buf)) {
+ struct _stp_trace t = { .sequence = _stp_seq_inc(),
+ .pdu_len = len};
+ memcpy(buf, &t, sizeof(t)); // prevent unaligned access
+ memcpy(buf + sizeof(t), pb->buf, len);
} else
atomic_inc (&_stp_transport_failures);
#endif
diff --git a/runtime/print_old.c b/runtime/print_old.c
index 5ee050b5..5c117e5f 100644
--- a/runtime/print_old.c
+++ b/runtime/print_old.c
@@ -35,11 +35,13 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
else
atomic_inc (&_stp_transport_failures);
#else
- struct _stp_trace *t = relay_reserve(_stp_utt->rchan, sizeof(*t) + len);
- if (likely(t)) {
- t->sequence = _stp_seq_inc();
- t->pdu_len = len;
- memcpy((void *) t + sizeof(*t), pb->buf, len);
+ void *buf = relay_reserve(_stp_utt->rchan,
+ sizeof(struct _stp_trace) + len);
+ if (likely(buf)) {
+ struct _stp_trace t = { .sequence = _stp_seq_inc(),
+ .pdu_len = len};
+ memcpy(buf, &t, sizeof(t)); // prevent unaligned access
+ memcpy(buf + sizeof(t), pb->buf, len);
} else
atomic_inc (&_stp_transport_failures);
#endif
diff --git a/runtime/regs.c b/runtime/regs.c
index 2daeaa3c..5821f7e7 100644
--- a/runtime/regs.c
+++ b/runtime/regs.c
@@ -383,317 +383,60 @@ void _stp_print_regs(struct pt_regs * regs)
#endif
-/*
- * (Theoretically) arch-independent scheme for binary lookup of register
- * values (from pt_regs) by register name. A register may be called by
- * more than one name.
- */
-struct _stp_register_desc {
- const char *name;
- unsigned short size; // in bytes
- unsigned short offset; // in bytes, from start of pt_regs
-};
-
-struct _stp_register_table {
- struct _stp_register_desc *registers;
- unsigned nr_registers;
- unsigned nr_slots; // capacity
-};
-
-static DEFINE_SPINLOCK(_stp_register_table_lock);
-static void _stp_populate_register_table(void);
-
-/*
- * If the named register is in the list, return its slot number and *found=1.
- * Else *found=0 and return the slot number where the name should be inserted.
- */
-static int _stp_lookup_register(const char *name,
- struct _stp_register_table *table, int *found)
-{
- unsigned begin, mid, end;
-
- *found = 0;
- end = table->nr_registers;
- if (end == 0)
- return 0;
- begin = 0;
- mid = -1;
- for (;;) {
- int cmp;
- int prev_mid = mid;
- mid = (begin + end) / 2;
- if (mid == prev_mid)
- break;
- cmp = strcmp(name, table->registers[mid].name);
- if (cmp == 0) {
- *found = 1;
- return mid;
- } else if (cmp < 0)
- end = mid;
- else
- begin = mid;
- }
- if (begin == 0 && strcmp(name, table->registers[0].name) < 0)
- return 0;
- return begin + 1;
-}
-
-/*
- * If found, return 1 and the size and/or offset in the pt_regs array.
- * Else return 0.
- */
-static int _stp_find_register(const char *name,
- struct _stp_register_table *table, size_t *size, size_t *offset)
-{
- int slot, found;
- if (unlikely(table->nr_registers == 0)) {
- unsigned long flags;
- /*
- * Should we do this at the beginning of time to avoid
- * the possibility of spending too long in a handler?
- */
- spin_lock_irqsave(&_stp_register_table_lock, flags);
- if (table->nr_registers == 0)
- _stp_populate_register_table();
- spin_unlock_irqrestore(&_stp_register_table_lock, flags);
- }
- slot = _stp_lookup_register(name, table, &found);
- if (found) {
- if (size)
- *size = table->registers[slot].size;
- if (offset)
- *offset = table->registers[slot].offset;
- return 1;
- }
- return 0;
-}
-
-/*
- * Add name to the register-lookup table. Note that the name pointer
- * is merely copied, not strdup-ed.
- */
-void _stp_add_register(const char *name, struct _stp_register_table *table,
- size_t size, size_t offset)
-{
- int idx, found;
- struct _stp_register_desc *slot;
-
- idx = _stp_lookup_register(name, table, &found);
- if (found)
- _stp_error("stap runtime internal error: "
- "register name %s used twice\n", name);
- if (table->nr_registers >= table->nr_slots)
- _stp_error("stap runtime internal error: "
- "register table overflow\n");
- slot = &table->registers[idx];
-
- // Move the slots later in the array out of the way.
- if (idx < table->nr_registers)
- memmove(slot+1, slot,
- sizeof(*slot) * (table->nr_registers - idx));
- table->nr_registers++;
- slot->name = name;
- slot->size = size;
- slot->offset = offset;
-}
-
-#if defined(__i386__) || defined(__x86_64__)
-/*
- * This register set is used for i386 kernel and apps, and for 32-bit apps
- * running on x86_64. For the latter case, this allows the user to use
- * things like reg("eax") as well as the standard x86_64 pt_regs names.
- */
-
-/*
- * x86_64 and i386 are especially ugly because the pt_reg member names
- * changed as part of the x86 merge. We allow (and use, as needed)
- * either the pre-merge name or the post-merge name.
- */
-
-// I count 32 different names, but add a fudge factor.
-static struct _stp_register_desc i386_registers[32+8];
-static struct _stp_register_table i386_register_table = {
- .registers = i386_registers,
- .nr_slots = ARRAY_SIZE(i386_registers)
-};
-
-/*
- * sizeof(long) is indeed what we want here, for both i386 and x86_64.
- * Unlike function args, x86_64 pt_regs is the same even if the int3
- * was in an -m32 app.
- */
-#define ADD_PT_REG(name, member) \
- _stp_add_register(name, &i386_register_table, \
- sizeof(long), offsetof(struct pt_regs, member))
-#define ADD2NAMES(nm1, nm2, member) \
- do { \
- ADD_PT_REG(nm1, member); \
- ADD_PT_REG(nm2, member); \
- } while (0)
-
-#ifdef STAPCONF_X86_UNIREGS
-/* Map "ax" and "eax" to regs->ax, and "cs" and "xcs" to regs->cs */
-#define ADD_EREG(nm) ADD2NAMES(#nm, "e" #nm, nm)
-#define ADD_XREG(nm) ADD2NAMES(#nm, "x" #nm, nm)
-#define ADD_FLAGS_REG() ADD_EREG(flags)
-#define EREG(nm, regs) ((regs)->nm)
-#define RREG(nm, regs) ((regs)->nm)
+/* Function arguments */
-#else /* ! STAPCONF_X86_UNIREGS */
+#define _STP_REGPARM 0x8000
+#define _STP_REGPARM_MASK ((_STP_REGPARM) - 1)
-#ifdef __i386__
-#define ADD_EREG(nm) ADD2NAMES(#nm, "e" #nm, e##nm)
-#define ADD_XREG(nm) ADD2NAMES(#nm, "x" #nm, x##nm)
-#define ADD_FLAGS_REG() ADD_EREG(flags)
-#define EREG(nm, regs) ((regs)->e##nm)
-#else /* __x86_64__ */
/*
- * Map "eax" to regs->rax and "xcs" to regs->cs. Other mappings are
- * handled in x86_64_register_table.
+ * x86_64 and i386 are especially ugly because:
+ * 1) the pt_reg member names changed as part of the x86 merge. We use
+ * either the pre-merge name or the post-merge name, as needed.
+ * 2) -m32 apps on x86_64 look like i386 apps, so we need to support
+ * those semantics on both i386 and x86_64.
*/
-#define ADD_EREG(nm) ADD_PT_REG("e" #nm, r##nm)
-#define ADD_XREG(nm) ADD_PT_REG("x" #nm, nm)
-#define ADD_FLAGS_REG() ADD2NAMES("flags", "eflags", eflags)
-/* Note: After a store to %eax, %rax holds the ZERO-extended %eax. */
-#define EREG(nm, regs) ((regs)->r##nm)
-#define RREG(nm, regs) ((regs)->r##nm)
-#endif /* __x86_64__ */
-
-#endif /* ! STAPCONF_X86_UNIREGS */
-static void _stp_populate_i386_register_table(void)
-{
- /*
- * The order here is the same as in i386 struct pt_regs.
- * It's a different order from x86_64 pt_regs; but that doesn't
- * matter -- even when compiling for x86_64 -- because the
- * offsets are determined by offsetof(), not the calling order.
- */
- ADD_EREG(bx);
- ADD_EREG(cx);
- ADD_EREG(dx);
- ADD_EREG(si);
- ADD_EREG(di);
- ADD_EREG(bp);
- ADD_EREG(ax);
#ifdef __i386__
- ADD_XREG(ds);
- ADD_XREG(es);
- ADD_XREG(fs);
- /* gs not saved */
-#endif
#ifdef STAPCONF_X86_UNIREGS
- ADD2NAMES("orig_ax", "orig_eax", orig_ax);
+#define EREG(nm, regs) ((regs)->nm)
#else
-#ifdef __i386__
- ADD2NAMES("orig_ax", "orig_eax", orig_eax);
-#else /* __x86_64__ */
- ADD2NAMES("orig_ax", "orig_eax", orig_rax);
+#define EREG(nm, regs) ((regs)->e##nm)
#endif
-#endif /* STAPCONF_X86_UNIREGS */
- ADD_EREG(ip);
- ADD_XREG(cs);
- ADD_FLAGS_REG();
- ADD_EREG(sp);
- ADD_XREG(ss);
-}
-/*
- * For x86_64, this gets a copy of the saved 64-bit register (e.g., regs->rax).
- * After a store to %eax, %rax holds the ZERO-extended %eax.
- */
-static long
-_stp_get_reg32_by_name(const char *name, struct pt_regs *regs)
+static long _stp_get_sp(struct pt_regs *regs)
{
- size_t offset = 0;
- long value; // works for i386 or x86_64
- BUG_ON(!name);
- if (!regs)
- _stp_error("Register values not available in this context.\n");
-#ifdef __i386__
- if (!user_mode(regs)) {
- /* esp and ss aren't saved on trap from kernel mode. */
- if (!strcmp(name,"esp") || !strcmp(name, "sp"))
- return (long) &EREG(sp, regs);
- if (!strcmp(name,"xss") || !strcmp(name, "ss")) {
- /*
- * Assume ss register hasn't changed since we took
- * the trap.
- */
- unsigned short ss;
- asm volatile("movw %%ss, %0" : : "m" (ss));
- return ss;
- }
- }
-#endif
- if (!_stp_find_register(name, &i386_register_table, NULL, &offset))
- _stp_error("Unknown register name: %s\n", name);
- (void) memcpy(&value, ((char*)regs) + offset, sizeof(value));
- return value;
+ if (!user_mode(regs))
+ return (long) &EREG(sp, regs);
+ return EREG(sp, regs);
}
-#endif /* __i386__ || __x86_64__ */
-
-#ifdef __i386__
-static void _stp_populate_register_table(void)
+static int _stp_get_regparm(int regparm, struct pt_regs *regs)
{
- _stp_populate_i386_register_table();
+ if (regparm == 0) {
+ /* Default */
+ if (user_mode(regs))
+ return 0;
+ else
+ // Kernel is built with -mregparm=3.
+ return 3;
+ } else
+ return (regparm & _STP_REGPARM_MASK);
}
#endif /* __i386__ */
#ifdef __x86_64__
-// I count 32 different names (not the same 32 as i386), but add a fudge factor.
-static struct _stp_register_desc x86_64_registers[32+8];
-static struct _stp_register_table x86_64_register_table = {
- .registers = x86_64_registers,
- .nr_slots = ARRAY_SIZE(x86_64_registers)
-};
-
-/* NB: Redefining ADD_PT_REG here. ADD2NAMES and such change accordingly. */
-#undef ADD_PT_REG
-#define ADD_PT_REG(name, member) \
- _stp_add_register(name, &x86_64_register_table, \
- sizeof(unsigned long), offsetof(struct pt_regs, member))
-
-#define ADD_NREG(nm) ADD_PT_REG(#nm, nm)
-
#ifdef STAPCONF_X86_UNIREGS
-#define ADD_RREG(nm) ADD2NAMES(#nm, "r" #nm, nm)
+#define EREG(nm, regs) ((regs)->nm)
+#define RREG(nm, regs) ((regs)->nm)
#else
-#define ADD_RREG(nm) ADD2NAMES(#nm, "r" #nm, r##nm)
+#define EREG(nm, regs) ((regs)->r##nm)
+#define RREG(nm, regs) ((regs)->r##nm)
#endif
-static void _stp_populate_register_table(void)
+static long _stp_get_sp(struct pt_regs *regs)
{
- /* Same order as in struct pt_regs */
- ADD_NREG(r15);
- ADD_NREG(r14);
- ADD_NREG(r13);
- ADD_NREG(r12);
- ADD_RREG(bp);
- ADD_RREG(bx);
- ADD_NREG(r11);
- ADD_NREG(r10);
- ADD_NREG(r9);
- ADD_NREG(r8);
- ADD_RREG(ax);
- ADD_RREG(cx);
- ADD_RREG(dx);
- ADD_RREG(si);
- ADD_RREG(di);
-#ifdef STAPCONF_X86_UNIREGS
- ADD2NAMES("orig_ax", "orig_rax", orig_ax);
-#else
- ADD2NAMES("orig_ax", "orig_rax", orig_rax);
-#endif
- ADD_RREG(ip);
- ADD_NREG(cs);
- ADD_FLAGS_REG();
- ADD_RREG(sp);
- ADD_NREG(ss);
-
- _stp_populate_i386_register_table();
+ return RREG(sp, regs);
}
static int _stp_probing_32bit_app(struct pt_regs *regs)
@@ -704,54 +447,26 @@ static int _stp_probing_32bit_app(struct pt_regs *regs)
}
/* Ensure that the upper 32 bits of val are a sign-extension of the lower 32. */
-static long _stp_sign_extend32(long val)
+static int64_t __stp_sign_extend32(int64_t val)
{
int32_t *val_ptr32 = (int32_t*) &val;
return *val_ptr32;
}
-/*
- * Get the value of the 64-bit register with the specified name. "rax",
- * "ax", and "eax" all get you regs->[r]ax. Sets *reg32=1 if the name
- * designates a 32-bit register (e.g., "eax"), 0 otherwise.
- */
-static unsigned long
-_stp_get_reg64_by_name(const char *name, struct pt_regs *regs, int *reg32)
+static int _stp_get_regparm(int regparm, struct pt_regs *regs)
{
- size_t offset = 0;
- unsigned long value;
- BUG_ON(!name);
- if (!regs) {
- _stp_error("Register values not available in this context.\n");
- return 0;
- }
- if (_stp_find_register(name, &x86_64_register_table, NULL, &offset)) {
- if (reg32)
- *reg32 = 0;
- (void) memcpy(&value, ((char*)regs) + offset, sizeof(value));
- return value;
- }
- if (reg32)
- *reg32 = 1;
- return _stp_get_reg32_by_name(name, regs);
+ if (regparm == 0) {
+ /* Default */
+ if (_stp_probing_32bit_app(regs))
+ return 0;
+ else
+ return 6;
+ } else
+ return (regparm & _STP_REGPARM_MASK);
}
-#endif /* __x86_64__ */
-
-/* Function arguments */
-
-#define _STP_REGPARM 0x8000
-#define _STP_REGPARM_MASK ((_STP_REGPARM) - 1)
+#endif /* __x86_64__ */
#if defined(__i386__) || defined(__x86_64__)
-static long _stp_get_sp(struct pt_regs *regs)
-{
-#ifdef __i386__
- if (!user_mode(regs))
- return (long) &EREG(sp, regs);
-#endif
- return EREG(sp, regs);
-}
-
/*
* Use this for i386 kernel and apps, and for 32-bit apps running on x86_64.
* Does arch-specific work for fetching function arg #argnum (1 = first arg).
@@ -792,21 +507,6 @@ static int _stp_get_arg32_by_number(int n, int nr_regargs,
}
#endif /* __i386__ || __x86_64__ */
-#ifdef __i386__
-static int _stp_get_regparm(int regparm, struct pt_regs *regs)
-{
- if (regparm == 0) {
- /* Default */
- if (user_mode(regs))
- return 0;
- else
- // Kernel is built with -mregparm=3.
- return 3;
- } else
- return (regparm & _STP_REGPARM_MASK);
-}
-#endif
-
#ifdef __x86_64__
/* See _stp_get_arg32_by_number(). */
static int _stp_get_arg64_by_number(int n, int nr_regargs,
@@ -835,18 +535,6 @@ static int _stp_get_arg64_by_number(int n, int nr_regargs,
return 0;
}
}
-
-static int _stp_get_regparm(int regparm, struct pt_regs *regs)
-{
- if (regparm == 0) {
- /* Default */
- if (_stp_probing_32bit_app(regs))
- return 0;
- else
- return 6;
- } else
- return (regparm & _STP_REGPARM_MASK);
-}
#endif /* __x86_64__ */
/** @} */
diff --git a/runtime/task_finder.c b/runtime/task_finder.c
index 6d79c98a..2c27e4a3 100644
--- a/runtime/task_finder.c
+++ b/runtime/task_finder.c
@@ -231,43 +231,16 @@ __stp_utrace_attach(struct task_struct *tsk,
return rc;
}
-static u32
-__stp_utrace_task_finder_report_clone(struct utrace_attached_engine *engine,
- struct task_struct *parent,
- unsigned long clone_flags,
- struct task_struct *child)
-{
- struct utrace_attached_engine *child_engine;
- struct mm_struct *mm;
-
- if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
- return UTRACE_ACTION_RESUME;
-
- // On clone, attach to the child.
- (void) __stp_utrace_attach(child, engine->ops, 0,
- __STP_UTRACE_TASK_FINDER_EVENTS);
- return UTRACE_ACTION_RESUME;
-}
-
-static u32
-__stp_utrace_task_finder_report_exec(struct utrace_attached_engine *engine,
- struct task_struct *tsk,
- const struct linux_binprm *bprm,
- struct pt_regs *regs)
+static inline void
+__stp_utrace_attach_match_filename(struct task_struct *tsk,
+ const char * const filename)
{
size_t filelen;
struct list_head *tgt_node;
struct stap_task_finder_target *tgt;
int found_node = 0;
- if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
- return UTRACE_ACTION_RESUME;
-
- // On exec, check bprm
- if (bprm->filename == NULL)
- return UTRACE_ACTION_RESUME;
-
- filelen = strlen(bprm->filename);
+ filelen = strlen(filename);
list_for_each(tgt_node, &__stp_task_finder_list) {
tgt = list_entry(tgt_node, struct stap_task_finder_target,
list);
@@ -275,7 +248,7 @@ __stp_utrace_task_finder_report_exec(struct utrace_attached_engine *engine,
// here, since they are handled at startup.
if (tgt != NULL && tgt->pathlen > 0
&& tgt->pathlen == filelen
- && strcmp(tgt->pathname, bprm->filename) == 0) {
+ && strcmp(tgt->pathname, filename) == 0) {
found_node = 1;
break;
}
@@ -309,6 +282,79 @@ __stp_utrace_task_finder_report_exec(struct utrace_attached_engine *engine,
cb_tgt->engine_attached = 1;
}
}
+}
+
+static u32
+__stp_utrace_task_finder_report_clone(struct utrace_attached_engine *engine,
+ struct task_struct *parent,
+ unsigned long clone_flags,
+ struct task_struct *child)
+{
+ int rc;
+ struct mm_struct *mm;
+ char *mmpath_buf;
+ char *mmpath;
+
+ if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
+ return UTRACE_ACTION_RESUME;
+
+ // On clone, attach to the child.
+ rc = __stp_utrace_attach(child, engine->ops, 0,
+ __STP_UTRACE_TASK_FINDER_EVENTS);
+ if (rc != 0 && rc != EPERM)
+ return UTRACE_ACTION_RESUME;
+
+ /* Grab the path associated with this task. */
+ mm = get_task_mm(child);
+ if (! mm) {
+ /* If the thread doesn't have a mm_struct, it is
+ * a kernel thread which we need to skip. */
+ return UTRACE_ACTION_RESUME;
+ }
+
+ // Allocate space for a path
+ mmpath_buf = _stp_kmalloc(PATH_MAX);
+ if (mmpath_buf == NULL) {
+ _stp_error("Unable to allocate space for path");
+ return UTRACE_ACTION_RESUME;
+ }
+
+ // Grab the path associated with the new task
+ mmpath = __stp_get_mm_path(mm, mmpath_buf, PATH_MAX);
+ mmput(mm); /* We're done with mm */
+ if (IS_ERR(mmpath)) {
+ rc = -PTR_ERR(mmpath);
+ _stp_error("Unable to get path (error %d) for pid %d",
+ rc, (int)child->pid);
+ }
+ else {
+ __stp_utrace_attach_match_filename(child, mmpath);
+ }
+
+ _stp_kfree(mmpath_buf);
+ return UTRACE_ACTION_RESUME;
+}
+
+static u32
+__stp_utrace_task_finder_report_exec(struct utrace_attached_engine *engine,
+ struct task_struct *tsk,
+ const struct linux_binprm *bprm,
+ struct pt_regs *regs)
+{
+ size_t filelen;
+ struct list_head *tgt_node;
+ struct stap_task_finder_target *tgt;
+ int found_node = 0;
+
+ if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING)
+ return UTRACE_ACTION_RESUME;
+
+ // On exec, check bprm
+ if (bprm->filename == NULL)
+ return UTRACE_ACTION_RESUME;
+
+ __stp_utrace_attach_match_filename(tsk, bprm->filename);
+
return UTRACE_ACTION_RESUME;
}
diff --git a/runtime/vsprintf.c b/runtime/vsprintf.c
index dcaa1bc3..4ffcf72e 100644
--- a/runtime/vsprintf.c
+++ b/runtime/vsprintf.c
@@ -248,6 +248,11 @@ int _stp_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
}
}
+#ifdef __ia64__
+ if ((str + precision - 1) <= end)
+ memcpy(str, &num, precision); //to prevent unaligned access
+ str += precision;
+#else
switch(precision) {
case 1:
if(str <= end)
@@ -256,21 +261,22 @@ int _stp_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
break;
case 2:
if((str + 1) <= end)
- memcpy(str, &num, 2);
+ *(int16_t *)str = (int16_t)num;
str+=2;
break;
case 4:
if((str + 3) <= end)
- memcpy(str, &num, 4);
+ *(int32_t *)str = num;
str+=4;
break;
default: // "%.8b" by default
case 8:
if((str + 7) <= end)
- memcpy(str, &num, 8);
+ *(int64_t *)str = num;
str+=8;
break;
}
+#endif
while (len < field_width--) {
if (str <= end)
*str = '\0';