From b5a0dd413a3e4f521df7febca6b88f0ec6b9ff36 Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Fri, 18 Dec 2009 12:20:52 -0500 Subject: PR10601 part 1: i386 and x86-64 regset for dwarf fetch/store_register()s * runtime/loc2c-runtime.h (fetch_register, store_register): forked into k_ (kernel) and u_ (user) varieties. Implement i386 and x86_64 in terms of regset.h; fall back to k_* for other architectures. * tapsets.cxx: (*::visit_target_symbol): Emit macros to map loc2c's fetch/store_register to loc2c-runtime's k_ or u_ as appopriate. --- runtime/loc2c-runtime.h | 385 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 300 insertions(+), 85 deletions(-) (limited to 'runtime') diff --git a/runtime/loc2c-runtime.h b/runtime/loc2c-runtime.h index e9e5a071..d23df7f2 100644 --- a/runtime/loc2c-runtime.h +++ b/runtime/loc2c-runtime.h @@ -1,5 +1,5 @@ /* target operations - * Copyright (C) 2005 Red Hat Inc. + * Copyright (C) 2005-2009 Red Hat Inc. * Copyright (C) 2005, 2006, 2007 Intel Corporation. * Copyright (C) 2007 Quentin Barnes. * @@ -44,10 +44,10 @@ can be pasted into an identifier name. These definitions turn it into a per-register macro, defined below for machines with individually-named registers. */ -#define fetch_register(regno) \ - ((intptr_t) dwarf_register_##regno (c->regs)) -#define store_register(regno, value) \ - (dwarf_register_##regno (c->regs) = (value)) +#define k_fetch_register(regno) \ + ((intptr_t) k_dwarf_register_##regno (c->regs)) +#define k_store_register(regno, value) \ + (k_dwarf_register_##regno (c->regs) = (value)) /* The deref and store_deref macros are called to safely access addresses @@ -82,35 +82,242 @@ }) #endif + +/* PR 10601: user-space (user_regset) register access. Needs porting to each architecture. */ +#include + +struct usr_regset_lut { + char *name; + unsigned rsn; + unsigned pos; +}; + +/* DWARF register number -to- user_regset offset/bank mapping table. */ +static const struct usr_regset_lut url_i386[] = { + { "ax", NT_PRSTATUS, 6*4 }, + { "cx", NT_PRSTATUS, 1*4 }, + { "dx", NT_PRSTATUS, 2*4 }, + { "bx", NT_PRSTATUS, 0*4 }, + { "sp", NT_PRSTATUS, 15*4 }, + { "bp", NT_PRSTATUS, 5*4 }, + { "di", NT_PRSTATUS, 4*4 }, + { "si", NT_PRSTATUS, 3*4 }, +}; + +static const struct usr_regset_lut url_x86_64[] = { + { "rax", NT_PRSTATUS, 10*8 }, + { "rdx", NT_PRSTATUS, 12*8 }, + { "rcx", NT_PRSTATUS, 11*8 }, + { "rbx", NT_PRSTATUS, 5*8 }, + { "rsi", NT_PRSTATUS, 13*8 }, + { "rdi", NT_PRSTATUS, 14*8 }, + { "rbp", NT_PRSTATUS, 4*8 }, + { "rsp", NT_PRSTATUS, 19*8 }, + { "r8", NT_PRSTATUS, 9*8 }, + { "r9", NT_PRSTATUS, 8*8 }, + { "r10", NT_PRSTATUS, 7*8 }, + { "r11", NT_PRSTATUS, 6*8 }, + { "r12", NT_PRSTATUS, 3*8 }, + { "r13", NT_PRSTATUS, 2*8 }, + { "r14", NT_PRSTATUS, 1*8 }, + { "r15", NT_PRSTATUS, 0*8 }, +}; + + + +static u32 ursl_fetch32 (const struct usr_regset_lut* lut, unsigned lutsize, int e_machine, unsigned regno) +{ + u32 value = ~0; + const struct user_regset_view *rsv = task_user_regset_view(current); + unsigned rsi; + int rc; + unsigned rsn; + unsigned pos; + unsigned count; + + WARN_ON (!rsv); + if (!rsv) goto out; + WARN_ON (regno >= lutsize); + if (regno >= lutsize) goto out; + if (rsv->e_machine != e_machine) goto out; + + rsn = lut[regno].rsn; + pos = lut[regno].pos; + count = sizeof(value); + + for (rsi=0; rsin; rsi++) + if (rsv->regsets[rsi].core_note_type == rsn) + { + const struct user_regset *rs = & rsv->regsets[rsi]; + rc = (rs->get)(current, rs, pos, count, & value, NULL); + WARN_ON (rc); + /* success */ + goto out; + } + WARN_ON (1); /* did not find appropriate regset! */ + + out: + return value; +} + + +static void ursl_store32 (const struct usr_regset_lut* lut,unsigned lutsize, int e_machine, unsigned regno, u32 value) +{ + const struct user_regset_view *rsv = task_user_regset_view(current); + unsigned rsi; + int rc; + unsigned rsn; + unsigned pos; + unsigned count; + + WARN_ON (!rsv); + if (!rsv) goto out; + WARN_ON (regno >= lutsize); + if (regno >= lutsize) goto out; + if (rsv->e_machine != e_machine) goto out; + + rsn = lut[regno].rsn; + pos = lut[regno].pos; + count = sizeof(value); + + for (rsi=0; rsin; rsi++) + if (rsv->regsets[rsi].core_note_type == rsn) + { + const struct user_regset *rs = & rsv->regsets[rsi]; + rc = (rs->set)(current, rs, pos, count, & value, NULL); + WARN_ON (rc); + /* success */ + goto out; + } + WARN_ON (1); /* did not find appropriate regset! */ + + out: + return; +} + + +static u64 ursl_fetch64 (const struct usr_regset_lut* lut, unsigned lutsize, int e_machine, unsigned regno) +{ + u64 value = ~0; + const struct user_regset_view *rsv = task_user_regset_view(current); + unsigned rsi; + int rc; + unsigned rsn; + unsigned pos; + unsigned count; + + if (!rsv) goto out; + if (regno >= lutsize) goto out; + if (rsv->e_machine != e_machine) goto out; + + rsn = lut[regno].rsn; + pos = lut[regno].pos; + count = sizeof(value); + + for (rsi=0; rsin; rsi++) + if (rsv->regsets[rsi].core_note_type == rsn) + { + const struct user_regset *rs = & rsv->regsets[rsi]; + rc = (rs->get)(current, rs, pos, count, & value, NULL); + if (rc) + goto out; + /* success */ + return value; + } + out: + printk (KERN_WARNING "process %d mach %d regno %d not available for fetch.\n", current->tgid, e_machine, regno); + return value; +} + + +static void ursl_store64 (const struct usr_regset_lut* lut,unsigned lutsize, int e_machine, unsigned regno, u64 value) +{ + const struct user_regset_view *rsv = task_user_regset_view(current); + unsigned rsi; + int rc; + unsigned rsn; + unsigned pos; + unsigned count; + + WARN_ON (!rsv); + if (!rsv) goto out; + WARN_ON (regno >= lutsize); + if (regno >= lutsize) goto out; + if (rsv->e_machine != e_machine) goto out; + + rsn = lut[regno].rsn; + pos = lut[regno].pos; + count = sizeof(value); + + for (rsi=0; rsin; rsi++) + if (rsv->regsets[rsi].core_note_type == rsn) + { + const struct user_regset *rs = & rsv->regsets[rsi]; + rc = (rs->set)(current, rs, pos, count, & value, NULL); + if (rc) + goto out; + /* success */ + return; + } + + out: + printk (KERN_WARNING "process %d mach %d regno %d not available for store.\n", current->tgid, e_machine, regno); + return; +} + + +#define S(array) sizeof(array)/sizeof(array[0]) +#if defined (__i386__) + +#define u_fetch_register(regno) ursl_fetch32(url_i386, S(url_i386), EM_386, regno) +#define u_store_register(regno,value) ursl_store32(url_i386, S(url_i386), EM_386, regno, value) + +#elif defined (__x86_64__) + +#define u_fetch_register(regno) (_stp_probing_32bit_app(c->regs) ? ursl_fetch32(url_i386, S(url_i386), EM_386, regno) : ursl_fetch64(url_x86_64, S(url_x86_64), EM_X86_64, regno)) +#define u_store_register(regno,value) (_stp_probing_32bit_app(c->regs) ? ursl_store2(url_i386, S(url_i386), EM_386, regno, value) : ursl_store64(url_x86_64, S(url_x86_64), EM_X86_64, regno, value)) + +#else + +#error "no can do" + +/* Some other architecture; downgrade to kernel register access. */ +#define u_fetch_register(regno) k_fetch_register(regno) +#define u_store_register(regno,value) k_store_register(regno,value) + +#endif + + + #if defined (STAPCONF_X86_UNIREGS) && defined (__i386__) -#define dwarf_register_0(regs) regs->ax -#define dwarf_register_1(regs) regs->cx -#define dwarf_register_2(regs) regs->dx -#define dwarf_register_3(regs) regs->bx -#define dwarf_register_4(regs) ((long) ®s->sp) -#define dwarf_register_5(regs) regs->bp -#define dwarf_register_6(regs) regs->si -#define dwarf_register_7(regs) regs->di +#define k_dwarf_register_0(regs) regs->ax +#define k_dwarf_register_1(regs) regs->cx +#define k_dwarf_register_2(regs) regs->dx +#define k_dwarf_register_3(regs) regs->bx +#define k_dwarf_register_4(regs) ((long) ®s->sp) +#define k_dwarf_register_5(regs) regs->bp +#define k_dwarf_register_6(regs) regs->si +#define k_dwarf_register_7(regs) regs->di #elif defined (STAPCONF_X86_UNIREGS) && defined (__x86_64__) -#define dwarf_register_0(regs) regs->ax -#define dwarf_register_1(regs) regs->dx -#define dwarf_register_2(regs) regs->cx -#define dwarf_register_3(regs) regs->bx -#define dwarf_register_4(regs) regs->si -#define dwarf_register_5(regs) regs->di -#define dwarf_register_6(regs) regs->bp -#define dwarf_register_7(regs) regs->sp -#define dwarf_register_8(regs) regs->r8 -#define dwarf_register_9(regs) regs->r9 -#define dwarf_register_10(regs) regs->r10 -#define dwarf_register_11(regs) regs->r11 -#define dwarf_register_12(regs) regs->r12 -#define dwarf_register_13(regs) regs->r13 -#define dwarf_register_14(regs) regs->r14 -#define dwarf_register_15(regs) regs->r15 +#define k_dwarf_register_0(regs) regs->ax +#define k_dwarf_register_1(regs) regs->dx +#define k_dwarf_register_2(regs) regs->cx +#define k_dwarf_register_3(regs) regs->bx +#define k_dwarf_register_4(regs) regs->si +#define k_dwarf_register_5(regs) regs->di +#define k_dwarf_register_6(regs) regs->bp +#define k_dwarf_register_7(regs) regs->sp +#define k_dwarf_register_8(regs) regs->r8 +#define k_dwarf_register_9(regs) regs->r9 +#define k_dwarf_register_10(regs) regs->r10 +#define k_dwarf_register_11(regs) regs->r11 +#define k_dwarf_register_12(regs) regs->r12 +#define k_dwarf_register_13(regs) regs->r13 +#define k_dwarf_register_14(regs) regs->r14 +#define k_dwarf_register_15(regs) regs->r15 #elif defined __i386__ @@ -120,60 +327,60 @@ For a kernel mode trap, the interrupted state's esp is actually an address inside where the `struct pt_regs' on the kernel trap stack points. */ -#define dwarf_register_0(regs) regs->eax -#define dwarf_register_1(regs) regs->ecx -#define dwarf_register_2(regs) regs->edx -#define dwarf_register_3(regs) regs->ebx -#define dwarf_register_4(regs) (user_mode(regs) ? regs->esp : (long)®s->esp) -#define dwarf_register_5(regs) regs->ebp -#define dwarf_register_6(regs) regs->esi -#define dwarf_register_7(regs) regs->edi +#define k_dwarf_register_0(regs) regs->eax +#define k_dwarf_register_1(regs) regs->ecx +#define k_dwarf_register_2(regs) regs->edx +#define k_dwarf_register_3(regs) regs->ebx +#define k_dwarf_register_4(regs) (user_mode(regs) ? regs->esp : (long)®s->esp) +#define k_dwarf_register_5(regs) regs->ebp +#define k_dwarf_register_6(regs) regs->esi +#define k_dwarf_register_7(regs) regs->edi #elif defined __ia64__ -#undef fetch_register -#undef store_register +#undef k_fetch_register +#undef k_store_register -#define fetch_register(regno) ia64_fetch_register(regno, c->regs, &c->unwaddr) -#define store_register(regno,value) ia64_store_register(regno, c->regs, value) +#define k_fetch_register(regno) ia64_fetch_register(regno, c->regs, &c->unwaddr) +#define k_store_register(regno,value) ia64_store_register(regno, c->regs, value) #elif defined __x86_64__ -#define dwarf_register_0(regs) regs->rax -#define dwarf_register_1(regs) regs->rdx -#define dwarf_register_2(regs) regs->rcx -#define dwarf_register_3(regs) regs->rbx -#define dwarf_register_4(regs) regs->rsi -#define dwarf_register_5(regs) regs->rdi -#define dwarf_register_6(regs) regs->rbp -#define dwarf_register_7(regs) regs->rsp -#define dwarf_register_8(regs) regs->r8 -#define dwarf_register_9(regs) regs->r9 -#define dwarf_register_10(regs) regs->r10 -#define dwarf_register_11(regs) regs->r11 -#define dwarf_register_12(regs) regs->r12 -#define dwarf_register_13(regs) regs->r13 -#define dwarf_register_14(regs) regs->r14 -#define dwarf_register_15(regs) regs->r15 +#define k_dwarf_register_0(regs) regs->rax +#define k_dwarf_register_1(regs) regs->rdx +#define k_dwarf_register_2(regs) regs->rcx +#define k_dwarf_register_3(regs) regs->rbx +#define k_dwarf_register_4(regs) regs->rsi +#define k_dwarf_register_5(regs) regs->rdi +#define k_dwarf_register_6(regs) regs->rbp +#define k_dwarf_register_7(regs) regs->rsp +#define k_dwarf_register_8(regs) regs->r8 +#define k_dwarf_register_9(regs) regs->r9 +#define k_dwarf_register_10(regs) regs->r10 +#define k_dwarf_register_11(regs) regs->r11 +#define k_dwarf_register_12(regs) regs->r12 +#define k_dwarf_register_13(regs) regs->r13 +#define k_dwarf_register_14(regs) regs->r14 +#define k_dwarf_register_15(regs) regs->r15 #elif defined __powerpc__ -#undef fetch_register -#undef store_register -#define fetch_register(regno) ((intptr_t) c->regs->gpr[regno]) -#define store_register(regno,value) (c->regs->gpr[regno] = (value)) +#undef k_fetch_register +#undef k_store_register +#define k_fetch_register(regno) ((intptr_t) c->regs->gpr[regno]) +#define k_store_register(regno,value) (c->regs->gpr[regno] = (value)) #elif defined (__arm__) -#undef fetch_register -#undef store_register -#define fetch_register(regno) ((long) c->regs->uregs[regno]) -#define store_register(regno,value) (c->regs->uregs[regno] = (value)) +#undef k_fetch_register +#undef k_store_register +#define k_fetch_register(regno) ((long) c->regs->uregs[regno]) +#define k_store_register(regno,value) (c->regs->uregs[regno] = (value)) #elif defined (__s390__) || defined (__s390x__) -#undef fetch_register -#undef store_register -#define fetch_register(regno) ((intptr_t) c->regs->gprs[regno]) -#define store_register(regno,value) (c->regs->gprs[regno] = (value)) +#undef k_fetch_register +#undef k_store_register +#define k_fetch_register(regno) ((intptr_t) c->regs->gprs[regno]) +#define k_store_register(regno,value) (c->regs->gprs[regno] = (value)) #endif @@ -206,7 +413,7 @@ STORE_DEREF_FAULT(ptr); \ }) -#define deref(size, addr) ({ \ +#define k_deref(size, addr) ({ \ intptr_t _i = 0; \ switch (size) { \ case 1: _i = kread((u8 *)(addr)); break; \ @@ -218,7 +425,7 @@ _i; \ }) -#define store_deref(size, addr, value) ({ \ +#define k_store_deref(size, addr, value) ({ \ switch (size) { \ case 1: kwrite((u8 *)(addr), (value)); break; \ case 2: kwrite((u16 *)(addr), (value)); break; \ @@ -228,6 +435,7 @@ } \ }) + extern void __deref_bad(void); extern void __store_deref_bad(void); @@ -235,7 +443,7 @@ extern void __store_deref_bad(void); #if defined __i386__ -#define deref(size, addr) \ +#define k_deref(size, addr) \ ({ \ int _bad = 0; \ u8 _b; u16 _w; u32 _l; \ @@ -255,7 +463,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define store_deref(size, addr, value) \ +#define k_store_deref(size, addr, value) \ ({ \ int _bad = 0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -275,7 +483,7 @@ extern void __store_deref_bad(void); #elif defined __x86_64__ -#define deref(size, addr) \ +#define k_deref(size, addr) \ ({ \ int _bad = 0; \ u8 _b; u16 _w; u32 _l; u64 _q; \ @@ -296,7 +504,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define store_deref(size, addr, value) \ +#define k_store_deref(size, addr, value) \ ({ \ int _bad = 0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -315,7 +523,7 @@ extern void __store_deref_bad(void); }) #elif defined __ia64__ -#define deref(size, addr) \ +#define k_deref(size, addr) \ ({ \ int _bad = 0; \ intptr_t _v=0; \ @@ -334,7 +542,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define store_deref(size, addr, value) \ +#define k_store_deref(size, addr, value) \ ({ \ int _bad=0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -393,7 +601,7 @@ extern void __store_deref_bad(void); "i"(sizeof(unsigned long))) -#define deref(size, addr) \ +#define k_deref(size, addr) \ ({ \ int _bad = 0; \ intptr_t _v = 0; \ @@ -413,7 +621,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define store_deref(size, addr, value) \ +#define k_store_deref(size, addr, value) \ ({ \ int _bad = 0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -567,7 +775,7 @@ extern void __store_deref_bad(void); : "r" (x), "i" (-EFAULT) \ : "cc") -#define deref(size, addr) \ +#define k_deref(size, addr) \ ({ \ int _bad = 0; \ intptr_t _v=0; \ @@ -585,7 +793,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define store_deref(size, addr, value) \ +#define k_store_deref(size, addr, value) \ ({ \ int _bad=0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -655,7 +863,7 @@ extern void __store_deref_bad(void); : "cc"); \ }) -#define deref(size, addr) \ +#define k_deref(size, addr) \ ({ \ u8 _b; u16 _w; u32 _l; u64 _q; \ int _bad = 0; \ @@ -692,7 +900,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define store_deref(size, addr, value) \ +#define k_store_deref(size, addr, value) \ ({ \ int _bad = 0; \ int i; \ @@ -738,14 +946,21 @@ extern void __store_deref_bad(void); #else #define kread(ptr) \ - ( (typeof(*(ptr))) deref(sizeof(*(ptr)), (ptr)) ) + ( (typeof(*(ptr))) k_deref(sizeof(*(ptr)), (ptr)) ) #define kwrite(ptr, value) \ - ( store_deref(sizeof(*(ptr)), (ptr), (long)(typeof(*(ptr)))(value)) ) + ( k_store_deref(sizeof(*(ptr)), (ptr), (long)(typeof(*(ptr)))(value)) ) #endif #endif /* STAPCONF_PROBE_KERNEL */ +/* XXX: PR10601 */ +/* Perhaps this should use something like set_fs(USER_DS); k_deref() ; set_fs(KERNEL_DS) + * But then again, the addr_map protections do that already. */ +#define u_deref(a,b) k_deref(a,b) +#define u_store_deref(a,b,c) k_store_deref(a,b,c) + + #define deref_string(dst, addr, maxbytes) \ ({ \ uintptr_t _addr; \ -- cgit From be00414012df13c2eddf98909b3cdfe21c2a94d8 Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Sun, 20 Dec 2009 12:16:33 -0500 Subject: PR10601 cont'd, RHEL5 backward compatibility with more runtime/autoconf* --- runtime/autoconf-regset.c | 7 +++++++ runtime/autoconf-utrace-regset.c | 8 ++++++++ runtime/loc2c-runtime.h | 25 ++++++++++++++++++++----- 3 files changed, 35 insertions(+), 5 deletions(-) create mode 100644 runtime/autoconf-regset.c create mode 100644 runtime/autoconf-utrace-regset.c (limited to 'runtime') diff --git a/runtime/autoconf-regset.c b/runtime/autoconf-regset.c new file mode 100644 index 00000000..9d994b03 --- /dev/null +++ b/runtime/autoconf-regset.c @@ -0,0 +1,7 @@ +#include + +int foobar(int n) { + const struct user_regset_view *rsv = task_user_regset_view(current); + const struct user_regset *rs = & rsv->regsets[0]; + return rsv->n + n + (rs->get)(current, rs, 0, 0, NULL, NULL); +} diff --git a/runtime/autoconf-utrace-regset.c b/runtime/autoconf-utrace-regset.c new file mode 100644 index 00000000..1728f239 --- /dev/null +++ b/runtime/autoconf-utrace-regset.c @@ -0,0 +1,8 @@ +#include + +/* old rhel5 utrace regset */ +int foobar(int n) { + const struct utrace_regset_view *rsv = utrace_native_view(current); + const struct utrace_regset *rs = & rsv->regsets[0]; + return rsv->n + n + (rs->get)(current, rs, 0, 0, NULL, NULL); +} diff --git a/runtime/loc2c-runtime.h b/runtime/loc2c-runtime.h index d23df7f2..c89d5b2c 100644 --- a/runtime/loc2c-runtime.h +++ b/runtime/loc2c-runtime.h @@ -82,9 +82,20 @@ }) #endif - -/* PR 10601: user-space (user_regset) register access. Needs porting to each architecture. */ +/* PR 10601: user-space (user_regset) register access. */ +#if defined(STAPCONF_REGSET) #include +#endif + +#if defined(STAPCONF_UTRACE_REGSET) +#include +/* adapt new names to old decls */ +#define user_regset_view utrace_regset_view +#define user_regset utrace_regset +#define task_user_regset_view utrace_native_view +#endif + +#if defined(STAPCONF_REGSET) || defined(STAPCONF_UTRACE_REGSET) struct usr_regset_lut { char *name; @@ -122,7 +133,7 @@ static const struct usr_regset_lut url_x86_64[] = { { "r14", NT_PRSTATUS, 1*8 }, { "r15", NT_PRSTATUS, 0*8 }, }; - +/* XXX: insert other architectures here. */ static u32 ursl_fetch32 (const struct usr_regset_lut* lut, unsigned lutsize, int e_machine, unsigned regno) @@ -279,8 +290,6 @@ static void ursl_store64 (const struct usr_regset_lut* lut,unsigned lutsize, in #else -#error "no can do" - /* Some other architecture; downgrade to kernel register access. */ #define u_fetch_register(regno) k_fetch_register(regno) #define u_store_register(regno,value) k_store_register(regno,value) @@ -288,6 +297,12 @@ static void ursl_store64 (const struct usr_regset_lut* lut,unsigned lutsize, in #endif +#else /* ! STAPCONF_REGSET */ +/* Downgrade to kernel register access. */ +#define u_fetch_register(regno) k_fetch_register(regno) +#define u_store_register(regno,value) k_store_register(regno,value) +#endif + #if defined (STAPCONF_X86_UNIREGS) && defined (__i386__) -- cgit From 028e1bde11b9644d1ac62f0a1a2188cf8def234a Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Sun, 20 Dec 2009 13:34:16 -0500 Subject: runtime backtracing: port commit #2e7f8442 to uprobes1 * runtime/uprobes/uprobes.c (uprobe_get_pc): new function --- runtime/stack-x86_64.c | 2 -- runtime/uprobes/uprobes.c | 38 ++++++++++++++++++++++++++++++++++++++ runtime/uprobes/uprobes.h | 8 ++++++++ 3 files changed, 46 insertions(+), 2 deletions(-) (limited to 'runtime') diff --git a/runtime/stack-x86_64.c b/runtime/stack-x86_64.c index d6c63412..3fc203f7 100644 --- a/runtime/stack-x86_64.c +++ b/runtime/stack-x86_64.c @@ -38,7 +38,6 @@ static void __stp_stack_print(struct pt_regs *regs, int verbose, int levels, while (levels && (tsk || !arch_unw_user_mode(&info))) { int ret = unwind(&info, tsk); -#if defined(UPROBES_API_VERSION) && UPROBES_API_VERSION > 1 unsigned long maybe_pc = 0; if (ri) { maybe_pc = uprobe_get_pc(ri, UNW_PC(&info), @@ -48,7 +47,6 @@ static void __stp_stack_print(struct pt_regs *regs, int verbose, int levels, else UNW_PC(&info) = maybe_pc; } -#endif dbug_unwind(1, "ret=%d PC=%lx SP=%lx\n", ret, UNW_PC(&info), UNW_SP(&info)); if (ret == 0) { _stp_func_print(UNW_PC(&info), verbose, 1, tsk); diff --git a/runtime/uprobes/uprobes.c b/runtime/uprobes/uprobes.c index 60c84509..cdb98707 100644 --- a/runtime/uprobes/uprobes.c +++ b/runtime/uprobes/uprobes.c @@ -2596,6 +2596,44 @@ static void uretprobe_set_trampoline(struct uprobe_process *uproc, } } +unsigned long uprobe_get_pc(struct uretprobe_instance *ri, unsigned long pc, + unsigned long sp) +{ + struct uretprobe *rp; + struct uprobe_kimg *uk; + struct uprobe_process *uproc; + unsigned long trampoline_addr; + struct hlist_node *r; + struct uretprobe_instance *ret_inst; + + if (!ri) + return 0; + rp = ri->rp; + uk = (struct uprobe_kimg *)rp->u.kdata; + if (!uk) + return 0; + uproc = uk->ppt->uproc; + if (IS_ERR(uproc->uretprobe_trampoline_addr)) + return pc; + trampoline_addr = (unsigned long)uproc->uretprobe_trampoline_addr; + if (pc != trampoline_addr) + return pc; + r = &ri->hlist; + hlist_for_each_entry_from(ret_inst, r, hlist) { + if (ret_inst->ret_addr == trampoline_addr) + continue; + /* First handler with a stack pointer lower than the + address (or equal) must be the one. */ + if (ret_inst->sp == sp || compare_stack_ptrs(ret_inst->sp, sp)) + return ret_inst->ret_addr; + } + printk(KERN_ERR "Original return address for trampoline not found at " + "0x%lx pid/tgid=%d/%d\n", sp, current->pid, current->tgid); + return 0; +} + +EXPORT_SYMBOL_GPL(uprobe_get_pc); + #else /* ! CONFIG_URETPROBES */ static void uretprobe_handle_entry(struct uprobe *u, struct pt_regs *regs, diff --git a/runtime/uprobes/uprobes.h b/runtime/uprobes/uprobes.h index d542420d..e888f9e8 100644 --- a/runtime/uprobes/uprobes.h +++ b/runtime/uprobes/uprobes.h @@ -95,6 +95,14 @@ extern void unregister_uretprobe(struct uretprobe *rp); /* For PRs 9940, 6852... */ extern void unmap_uprobe(struct uprobe *u); extern void unmap_uretprobe(struct uretprobe *rp); +/* + * Given a program counter, translate it back to the original address + * if it is the address of the trampoline. sp is the stack pointer for + * the frame that corresponds to the address. + */ +extern unsigned long uprobe_get_pc(struct uretprobe_instance *ri, + unsigned long pc, + unsigned long sp); #ifdef UPROBES_IMPLEMENTATION -- cgit From d38b64a1e5b10514b9360b4a59545ad6033ac100 Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Sun, 20 Dec 2009 15:43:53 -0500 Subject: runtime: rhel4 build fix for uretprobe-wannabe stack tracebacks * runtime/stack.c (_stp_stack_print): Comment out *retprobe logic if !CONFIG_UTRACE. --- runtime/stack.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'runtime') diff --git a/runtime/stack.c b/runtime/stack.c index 4dd1dca3..50dde6e1 100644 --- a/runtime/stack.c +++ b/runtime/stack.c @@ -133,13 +133,15 @@ static void _stp_stack_print(struct pt_regs *regs, int verbose, struct kretprobe } _stp_symbol_print((unsigned long)_stp_ret_addr_r(pi)); } else if (ri) { +#ifdef CONFIG_UTRACE /* as a proxy for presence of uprobes */ if (verbose == SYM_VERBOSE_FULL) { _stp_print("Returning from: "); - _stp_usymbol_print(ri->rp->u.vaddr, tsk); + _stp_usymbol_print(ri->rp->u.vaddr, tsk); /* otherwise this dereference fails */ _stp_print("\nReturning to : "); _stp_usymbol_print(ri->ret_addr, tsk); } else _stp_func_print(ri->ret_addr, verbose, 0, tsk); +#endif } else { _stp_print_char(' '); if (tsk) -- cgit From a87e40ba65ede2a884e4bce31ba7a1d87e71f981 Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Sun, 20 Dec 2009 16:54:27 -0500 Subject: PR10601: unfork deref() * runtime/loc2c-runtime.h: Remove k_ vs u_[store_]deref; share instead. * tapsets.cxx: Remove k_ vs u_ redirection for *deref(). --- runtime/loc2c-runtime.h | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) (limited to 'runtime') diff --git a/runtime/loc2c-runtime.h b/runtime/loc2c-runtime.h index c89d5b2c..de59f0e5 100644 --- a/runtime/loc2c-runtime.h +++ b/runtime/loc2c-runtime.h @@ -428,7 +428,7 @@ static void ursl_store64 (const struct usr_regset_lut* lut,unsigned lutsize, in STORE_DEREF_FAULT(ptr); \ }) -#define k_deref(size, addr) ({ \ +#define deref(size, addr) ({ \ intptr_t _i = 0; \ switch (size) { \ case 1: _i = kread((u8 *)(addr)); break; \ @@ -440,7 +440,7 @@ static void ursl_store64 (const struct usr_regset_lut* lut,unsigned lutsize, in _i; \ }) -#define k_store_deref(size, addr, value) ({ \ +#define store_deref(size, addr, value) ({ \ switch (size) { \ case 1: kwrite((u8 *)(addr), (value)); break; \ case 2: kwrite((u16 *)(addr), (value)); break; \ @@ -458,7 +458,7 @@ extern void __store_deref_bad(void); #if defined __i386__ -#define k_deref(size, addr) \ +#define deref(size, addr) \ ({ \ int _bad = 0; \ u8 _b; u16 _w; u32 _l; \ @@ -478,7 +478,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define k_store_deref(size, addr, value) \ +#define store_deref(size, addr, value) \ ({ \ int _bad = 0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -498,7 +498,7 @@ extern void __store_deref_bad(void); #elif defined __x86_64__ -#define k_deref(size, addr) \ +#define deref(size, addr) \ ({ \ int _bad = 0; \ u8 _b; u16 _w; u32 _l; u64 _q; \ @@ -519,7 +519,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define k_store_deref(size, addr, value) \ +#define store_deref(size, addr, value) \ ({ \ int _bad = 0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -538,7 +538,7 @@ extern void __store_deref_bad(void); }) #elif defined __ia64__ -#define k_deref(size, addr) \ +#define deref(size, addr) \ ({ \ int _bad = 0; \ intptr_t _v=0; \ @@ -557,7 +557,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define k_store_deref(size, addr, value) \ +#define store_deref(size, addr, value) \ ({ \ int _bad=0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -616,7 +616,7 @@ extern void __store_deref_bad(void); "i"(sizeof(unsigned long))) -#define k_deref(size, addr) \ +#define deref(size, addr) \ ({ \ int _bad = 0; \ intptr_t _v = 0; \ @@ -636,7 +636,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define k_store_deref(size, addr, value) \ +#define store_deref(size, addr, value) \ ({ \ int _bad = 0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -790,7 +790,7 @@ extern void __store_deref_bad(void); : "r" (x), "i" (-EFAULT) \ : "cc") -#define k_deref(size, addr) \ +#define deref(size, addr) \ ({ \ int _bad = 0; \ intptr_t _v=0; \ @@ -808,7 +808,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define k_store_deref(size, addr, value) \ +#define store_deref(size, addr, value) \ ({ \ int _bad=0; \ if (lookup_bad_addr((unsigned long)addr, size)) \ @@ -878,7 +878,7 @@ extern void __store_deref_bad(void); : "cc"); \ }) -#define k_deref(size, addr) \ +#define deref(size, addr) \ ({ \ u8 _b; u16 _w; u32 _l; u64 _q; \ int _bad = 0; \ @@ -915,7 +915,7 @@ extern void __store_deref_bad(void); _v; \ }) -#define k_store_deref(size, addr, value) \ +#define store_deref(size, addr, value) \ ({ \ int _bad = 0; \ int i; \ @@ -961,20 +961,14 @@ extern void __store_deref_bad(void); #else #define kread(ptr) \ - ( (typeof(*(ptr))) k_deref(sizeof(*(ptr)), (ptr)) ) + ( (typeof(*(ptr))) deref(sizeof(*(ptr)), (ptr)) ) #define kwrite(ptr, value) \ - ( k_store_deref(sizeof(*(ptr)), (ptr), (long)(typeof(*(ptr)))(value)) ) + ( store_deref(sizeof(*(ptr)), (ptr), (long)(typeof(*(ptr)))(value)) ) #endif #endif /* STAPCONF_PROBE_KERNEL */ -/* XXX: PR10601 */ -/* Perhaps this should use something like set_fs(USER_DS); k_deref() ; set_fs(KERNEL_DS) - * But then again, the addr_map protections do that already. */ -#define u_deref(a,b) k_deref(a,b) -#define u_store_deref(a,b,c) k_store_deref(a,b,c) - #define deref_string(dst, addr, maxbytes) \ ({ \ -- cgit From f0da6b3c8ba5a26f983714ddfcfd559ea05af5e3 Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Sun, 20 Dec 2009 22:28:50 -0500 Subject: runtime: more build fix for non-utrace kernels * runtime/stack-i386.c (_stp_stack_print): Make call to uprobe_get_pc() conditional on CONFIG_UTRACE. * runtime/stack-x86_64.c: Ditto. --- runtime/stack-i386.c | 2 +- runtime/stack-x86_64.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'runtime') diff --git a/runtime/stack-i386.c b/runtime/stack-i386.c index 66d892d3..d10aeb2f 100644 --- a/runtime/stack-i386.c +++ b/runtime/stack-i386.c @@ -63,7 +63,7 @@ static void __stp_stack_print (struct pt_regs *regs, int verbose, int levels, while (levels && (tsk || !arch_unw_user_mode(&info))) { int ret = unwind(&info, tsk); -#if defined(UPROBES_API_VERSION) && UPROBES_API_VERSION > 1 +#ifdef CONFIG_UTRACE unsigned long maybe_pc = 0; if (ri) { maybe_pc = uprobe_get_pc(ri, UNW_PC(&info), diff --git a/runtime/stack-x86_64.c b/runtime/stack-x86_64.c index 3fc203f7..6aebcb58 100644 --- a/runtime/stack-x86_64.c +++ b/runtime/stack-x86_64.c @@ -38,6 +38,7 @@ static void __stp_stack_print(struct pt_regs *regs, int verbose, int levels, while (levels && (tsk || !arch_unw_user_mode(&info))) { int ret = unwind(&info, tsk); +#ifdef CONFIG_UTRACE unsigned long maybe_pc = 0; if (ri) { maybe_pc = uprobe_get_pc(ri, UNW_PC(&info), @@ -47,6 +48,7 @@ static void __stp_stack_print(struct pt_regs *regs, int verbose, int levels, else UNW_PC(&info) = maybe_pc; } +#undef dbug_unwind(1, "ret=%d PC=%lx SP=%lx\n", ret, UNW_PC(&info), UNW_SP(&info)); if (ret == 0) { _stp_func_print(UNW_PC(&info), verbose, 1, tsk); -- cgit From 261135850dbd4a2efd7d4cbd3d0e3dcde46688ee Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Sun, 20 Dec 2009 22:49:00 -0500 Subject: brown paper bag typo fixy --- runtime/stack-x86_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'runtime') diff --git a/runtime/stack-x86_64.c b/runtime/stack-x86_64.c index 6aebcb58..030e5ef1 100644 --- a/runtime/stack-x86_64.c +++ b/runtime/stack-x86_64.c @@ -48,7 +48,7 @@ static void __stp_stack_print(struct pt_regs *regs, int verbose, int levels, else UNW_PC(&info) = maybe_pc; } -#undef +#endif dbug_unwind(1, "ret=%d PC=%lx SP=%lx\n", ret, UNW_PC(&info), UNW_SP(&info)); if (ret == 0) { _stp_func_print(UNW_PC(&info), verbose, 1, tsk); -- cgit From ea549ffc2915aa58861637472b12196222673fa2 Mon Sep 17 00:00:00 2001 From: Mark Wielaard Date: Mon, 21 Dec 2009 13:02:19 +0100 Subject: PR11015 Support shared library reloading (in different processes) * runtime/task_finder_vma.c (stap_remove_vma_map_info): Return negative status on failure. (stap_find_vma_map_info): Likewise. (stap_find_vma_map_info_user): New function. (stap_drop_vma_maps): New function. * runtime/sym.h (addr): Renamed to static_addr, to store addresses for sections which are always mapped at the same address. (_stp_module_relocate): Add extra struct task_struct *tsk argument. * runtime/sym.c (_stp_tf_exec_cb): New callback, calls stap_drop_vma_maps. (_stp_tf_mmap_cb): Don't store address in module.section, but call stap_add_vma_map_info() per tsk->group_leader for matched module. Don't register empty/null modules. (_stp_module_relocate): Take extra struct task_struct *tsk argument, cache last tsk used. Only use section->static_addr for none dynamic modules. Use stap_find_vma_map_info_user() to locate dynamic modules. (_stp_mod_sec_lookup): Add extra argument unsigned long *rel_addr to optionally store relative address when module/section found. (_stp_kallsyms_lookup): Use _stp_mod_sec_lookup to find relative address. (_stp_sym_init): Register _stp_tf_exec_cb in stap_task_finder_target. Add error check to see if task finder could be initialized. * dwflpp.cxx (emit_address): Pass NULL for kernel/modules and current for user tasks to _stp_module_relocate. * runtime/transport/symbols.c (_stp_do_relocation): Set new static_addr _stp_section field. * runtime/unwind.c (adjustStartLoc): Take new struct task_struct *tsk argument and pass to stap_find_vma_map_info_user and _stp_module_relocate to find adjusted addr. (_stp_search_unwind_hdr): Pass through struct task_struct *tsk. (unwind_frame): Likewise. * tapset/context-symbols.stp (probemod): Add NULL to _stp_mod_sec_lookup call to indicate we aren't interested in relative address. * tapsets.cxx (dwarf_derived_probe_group::emit_module_init): Pass NULL to _stp_module_relocate to indicate kernel/module address. --- runtime/sym.c | 167 +++++++++++++++++++++++++++++--------------- runtime/sym.h | 7 +- runtime/task_finder_vma.c | 71 +++++++++++++++++-- runtime/transport/symbols.c | 2 +- runtime/unwind.c | 31 ++++---- 5 files changed, 201 insertions(+), 77 deletions(-) (limited to 'runtime') diff --git a/runtime/sym.c b/runtime/sym.c index cd0c8a71..06691dc9 100644 --- a/runtime/sym.c +++ b/runtime/sym.c @@ -19,6 +19,22 @@ /* Callback that needs to be registered (in session.unwindsyms_modules) for every user task path for which we might need symbols or unwind info. */ +static int _stp_tf_exec_cb(struct stap_task_finder_target *tgt, + struct task_struct *tsk, + int register_p, + int process_p) +{ +#ifdef DEBUG_TASK_FINDER_VMA + _stp_dbug(__FUNCTION__, __LINE__, + "tsk %d:%d , register_p: %d, process_p: %d\n", + tsk->pid, tsk->tgid, register_p, process_p); +#endif + if (process_p && ! register_p) + stap_drop_vma_maps(tsk); + + return 0; +} + static int _stp_tf_mmap_cb(struct stap_task_finder_target *tgt, struct task_struct *tsk, char *path, @@ -42,40 +58,22 @@ static int _stp_tf_mmap_cb(struct stap_task_finder_target *tgt, if (strcmp(path, _stp_modules[i]->path) == 0) { #ifdef DEBUG_TASK_FINDER_VMA - _stp_dbug(__FUNCTION__, __LINE__, - "vm_cb: matched path %s to module\n", - path); + _stp_dbug(__FUNCTION__, __LINE__, + "vm_cb: matched path %s to module (for sec: %s)\n", + path, _stp_modules[i]->sections[0].name); #endif - module = _stp_modules[i]; - // cheat... - // We are abusing the "first" section address - // here to indicate where the module (actually - // first segment) is loaded (which is why we - // are ignoring the offset). It would be good - // to redesign the stp_module/stp_section - // data structures to better align with the - // actual memory mappings we are interested - // in (especially the "section" naming is - // slightly confusing since what we really - // seem to mean are elf segments (which can - // contain multiple elf sections). PR11015. - if (strcmp(".dynamic", - module->sections[0].name) == 0) - { - if (module->sections[0].addr == 0) - module->sections[0].addr = addr; - else if (module->sections[0].addr != addr) - _stp_error ("Reloaded module '%s'" - " at 0x%lx, was 0x%lx\n", - path, addr, - module->sections[0].addr); - } - break; + module = _stp_modules[i]; + /* XXX We really only need to register .dynamic + sections, but .absolute exes are also necessary + atm. */ + return stap_add_vma_map_info(tsk->group_leader, + addr, + addr + length, + offset, + module); } } } - stap_add_vma_map_info(tsk->group_leader, addr, addr + length, offset, - module); return 0; } @@ -84,15 +82,25 @@ static int _stp_tf_munmap_cb(struct stap_task_finder_target *tgt, unsigned long addr, unsigned long length) { + /* Unconditionally remove vm map info, ignore if not present. */ stap_remove_vma_map_info(tsk->group_leader, addr, addr + length, 0); return 0; } -/* XXX: this needs to be address-space-specific. */ -static unsigned long _stp_module_relocate(const char *module, const char *section, unsigned long offset) +/* Returns absolute address of offset into module/section for given task. + If tsk == NULL module/section is assumed to be absolute/static already + (e.g. kernel, kernel-modules and static executables). Returns zero when + module and section couldn't be found (aren't in memory yet). */ +static unsigned long _stp_module_relocate(const char *module, + const char *section, + unsigned long offset, + struct task_struct *tsk) { + /* XXX This doesn't look thread safe XXX */ static struct _stp_module *last = NULL; static struct _stp_section *last_sec; + static struct task_struct *last_tsk; + static unsigned long last_offset; unsigned i, j; /* if module is -1, we invalidate last. _stp_del_module calls this when modules are deleted. */ @@ -110,8 +118,10 @@ static unsigned long _stp_module_relocate(const char *module, const char *sectio /* Most likely our relocation is in the same section of the same module as the last. */ if (last) { - if (!strcmp(module, last->name) && !strcmp(section, last_sec->name)) { - offset += last_sec->addr; + if (!strcmp(module, last->name) + && !strcmp(section, last_sec->name) + && tsk == last_tsk) { + offset += last_offset; dbug_sym(1, "cached address=%lx\n", offset); return offset; } @@ -124,11 +134,33 @@ static unsigned long _stp_module_relocate(const char *module, const char *sectio for (j = 0; j < last->num_sections; j++) { last_sec = &last->sections[j]; if (!strcmp(section, last_sec->name)) { - - if (last_sec->addr == 0) /* module/section not in memory */ - continue; - - offset += last_sec->addr; + /* mod and sec name match. tsk should match dynamic/static. */ + if (last_sec->static_addr != 0) { + last_offset = last_sec->static_addr; + } else { + if (!tsk) { /* static section, not in memory yet? */ + if (strcmp(".dynamic", section) == 0) + _stp_error("internal error, _stp_module_relocate '%s' " + "section '%s', should not be tsk dynamic\n", + module, section); + last = NULL; + return 0; + } else { /* dynamic section, look up through tsk vma. */ + if (strcmp(".dynamic", last_sec->name) != 0) { + _stp_error("internal error, _stp_module_relocate '%s' " + "section '%s', should not be tsk dynamic\n", + module, section); + return 0; + } + if (stap_find_vma_map_info_user(tsk->group_leader, last, + &last_offset, NULL, + NULL) != 0) { + last = NULL; + return 0; + } + } + } + offset += last_offset; dbug_sym(1, "address=%lx\n", offset); return offset; } @@ -140,11 +172,12 @@ static unsigned long _stp_module_relocate(const char *module, const char *sectio } /* Return module owner and, if sec != NULL, fills in closest section - of the address if found, return NULL otherwise. - XXX: needs to be address-space-specific. */ + of the address if found, return NULL otherwise. Fills in rel_addr + (addr relative to closest section) when given. */ static struct _stp_module *_stp_mod_sec_lookup(unsigned long addr, struct task_struct *task, - struct _stp_section **sec) + struct _stp_section **sec, + unsigned long *rel_addr) { void *user = NULL; unsigned midx = 0; @@ -160,11 +193,21 @@ static struct _stp_module *_stp_mod_sec_lookup(unsigned long addr, { struct _stp_module *m = (struct _stp_module *)user; if (sec) - *sec = &m->sections[0]; // XXX check actual section and relocate + *sec = &m->sections[0]; // dynamic user modules have one section. + if (rel_addr) + { + /* XXX .absolute sections really shouldn't be here... */ + if (strcmp(".dynamic", m->sections[0].name) == 0) + *rel_addr = addr - vm_start; + else + *rel_addr = addr; + } dbug_sym(1, "found section %s in module %s at 0x%lx\n", m->sections[0].name, m->name, vm_start); return m; } + /* XXX should really not fallthrough, but sometimes current is passed + when it shouldn't - see probefunc() for example. */ } for (midx = 0; midx < _stp_num_modules; midx++) @@ -174,12 +217,14 @@ static struct _stp_module *_stp_mod_sec_lookup(unsigned long addr, { unsigned long sec_addr; unsigned long sec_size; - sec_addr = _stp_modules[midx]->sections[secidx].addr; + sec_addr = _stp_modules[midx]->sections[secidx].static_addr; sec_size = _stp_modules[midx]->sections[secidx].size; if (addr >= sec_addr && addr < sec_addr + sec_size) { if (sec) *sec = & _stp_modules[midx]->sections[secidx]; + if (rel_addr) + *rel_addr = addr - sec_addr; return _stp_modules[midx]; } } @@ -188,7 +233,6 @@ static struct _stp_module *_stp_mod_sec_lookup(unsigned long addr, } -/* XXX: needs to be address-space-specific. */ static const char *_stp_kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, const char **modname, @@ -200,13 +244,14 @@ static const char *_stp_kallsyms_lookup(unsigned long addr, unsigned long *symbo struct _stp_section *sec = NULL; struct _stp_symbol *s = NULL; unsigned end, begin = 0; + unsigned long rel_addr = 0; - m = _stp_mod_sec_lookup(addr, task, &sec); + m = _stp_mod_sec_lookup(addr, task, &sec, &rel_addr); if (unlikely (m == NULL || sec == NULL)) return NULL; /* NB: relativize the address to the section. */ - addr -= sec->addr; + addr = rel_addr; end = sec->num_symbols; /* binary search for symbols within the module */ @@ -268,9 +313,9 @@ static int _stp_module_check(void) /* notes end address */ if (!strcmp(m->name, "kernel")) { notes_addr = _stp_module_relocate("kernel", - "_stext", m->build_id_offset); + "_stext", m->build_id_offset, NULL); base_addr = _stp_module_relocate("kernel", - "_stext", 0); + "_stext", 0, NULL); } else { notes_addr = m->notes_sect + m->build_id_offset; base_addr = m->notes_sect; @@ -446,14 +491,20 @@ static void _stp_sym_init(void) static struct stap_task_finder_target vmcb = { // NB: no .pid, no .procname filters here. // This means that we get a system-wide mmap monitoring - // widget while the script is running. (The system-wideness may - // be restricted by stap -c or -x.) But this seems to - // be necessary if we want to to stack tracebacks through arbitrary - // shared libraries. XXX: There may be an optimization opportunity - // for executables (for which the main task-finder callback should be - // sufficient). + // widget while the script is running. (The + // system-wideness may be restricted by stap -c or + // -x.) But this seems to be necessary if we want to + // to stack tracebacks through arbitrary shared libraries. + // + // XXX: There may be an optimization opportunity + // for executables (for which the main task-finder + // callback should be sufficient). + .pid = 0, + .procname = NULL, + .callback = &_stp_tf_exec_cb, .mmap_callback = &_stp_tf_mmap_cb, .munmap_callback = &_stp_tf_munmap_cb, + .mprotect_callback = NULL }; if (! initialized) { int rc; @@ -462,8 +513,10 @@ static void _stp_sym_init(void) #ifdef DEBUG_TASK_FINDER_VMA _stp_dbug(__FUNCTION__, __LINE__, "registered vmcb"); #endif - (void) rc; // XXX - initialized = 1; + if (rc != 0) + _stp_error("Couldn't register task finder target: %d\n", rc); + else + initialized = 1; } #endif } diff --git a/runtime/sym.h b/runtime/sym.h index 9f2bdfd0..ce6ab736 100644 --- a/runtime/sym.h +++ b/runtime/sym.h @@ -17,7 +17,7 @@ struct _stp_symbol { struct _stp_section { const char *name; - unsigned long addr; /* XXX: belongs in per-address-space tables */ + unsigned long static_addr; /* XXX non-null if everywhere the same. */ unsigned long size; /* length of the address space module covers. */ struct _stp_symbol *symbols; /* ordered by address */ unsigned num_symbols; @@ -70,7 +70,10 @@ static unsigned long _stp_kretprobe_trampoline; _stp_sym_init () should track vma maps. */ static char _stp_need_vma_tracker; -static unsigned long _stp_module_relocate (const char *module, const char *section, unsigned long offset); +static unsigned long _stp_module_relocate (const char *module, + const char *section, + unsigned long offset, + struct task_struct *tsk); static struct _stp_module *_stp_get_unwind_info (unsigned long addr); #endif /* _STP_SYM_H_ */ diff --git a/runtime/task_finder_vma.c b/runtime/task_finder_vma.c index ed9c6f4f..9a32323f 100644 --- a/runtime/task_finder_vma.c +++ b/runtime/task_finder_vma.c @@ -270,6 +270,7 @@ stap_add_vma_map_info(struct task_struct *tsk, unsigned long vm_start, // Remove the vma entry from the vma hash table. +// Returns -ESRCH if the entry isn't present. static int stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start, unsigned long vm_end, unsigned long vm_pgoff) @@ -277,6 +278,7 @@ stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start, struct hlist_head *head; struct hlist_node *node; struct __stp_tf_vma_entry *entry; + int rc = -ESRCH; // Take a write lock since we are most likely going to delete // after reading. @@ -286,13 +288,15 @@ stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start, if (entry != NULL) { hlist_del(&entry->hlist); __stp_tf_vma_put_free_entry(entry); + rc = 0; } write_unlock_irqrestore(&__stp_tf_vma_lock, flags); - return 0; + return rc; } -// Finds vma info if the vma is present in the vma map hash table. -// Returns ESRCH if not present. The __stp_tf_vma_lock must *not* be +// Finds vma info if the vma is present in the vma map hash table for +// a given task and address (between vm_start and vm_end). +// Returns -ESRCH if not present. The __stp_tf_vma_lock must *not* be // locked before calling this function. static int stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr, @@ -303,7 +307,7 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr, struct hlist_node *node; struct __stp_tf_vma_entry *entry; struct __stp_tf_vma_entry *found_entry = NULL; - int rc = ESRCH; + int rc = -ESRCH; unsigned long flags; read_lock_irqsave(&__stp_tf_vma_lock, flags); @@ -330,3 +334,62 @@ stap_find_vma_map_info(struct task_struct *tsk, unsigned long vm_addr, read_unlock_irqrestore(&__stp_tf_vma_lock, flags); return rc; } + +// Finds vma info if the vma is present in the vma map hash table for +// a given task with the given user handle. +// Returns -ESRCH if not present. The __stp_tf_vma_lock must *not* be +// locked before calling this function. +static int +stap_find_vma_map_info_user(struct task_struct *tsk, void *user, + unsigned long *vm_start, unsigned long *vm_end, + unsigned long *vm_pgoff) +{ + struct hlist_head *head; + struct hlist_node *node; + struct __stp_tf_vma_entry *entry; + struct __stp_tf_vma_entry *found_entry = NULL; + int rc = -ESRCH; + + unsigned long flags; + read_lock_irqsave(&__stp_tf_vma_lock, flags); + head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)]; + hlist_for_each_entry(entry, node, head, hlist) { + if (tsk->pid == entry->pid + && user == entry->user) { + found_entry = entry; + break; + } + } + if (found_entry != NULL) { + if (vm_start != NULL) + *vm_start = found_entry->vm_start; + if (vm_end != NULL) + *vm_end = found_entry->vm_end; + if (vm_pgoff != NULL) + *vm_pgoff = found_entry->vm_pgoff; + rc = 0; + } + read_unlock_irqrestore(&__stp_tf_vma_lock, flags); + return rc; +} + +static int +stap_drop_vma_maps(struct task_struct *tsk) +{ + struct hlist_head *head; + struct hlist_node *node; + struct hlist_node *n; + struct __stp_tf_vma_entry *entry; + + unsigned long flags; + write_lock_irqsave(&__stp_tf_vma_lock, flags); + head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)]; + hlist_for_each_entry_safe(entry, node, n, head, hlist) { + if (tsk->pid == entry->pid) { + hlist_del(&entry->hlist); + __stp_tf_vma_put_free_entry(entry); + } + } + write_unlock_irqrestore(&__stp_tf_vma_lock, flags); + return 0; +} diff --git a/runtime/transport/symbols.c b/runtime/transport/symbols.c index a214d1f2..e2f9fd65 100644 --- a/runtime/transport/symbols.c +++ b/runtime/transport/symbols.c @@ -61,7 +61,7 @@ static void _stp_do_relocation(const char __user *buf, size_t count) continue; else { - _stp_modules[mi]->sections[si].addr = msg.address; + _stp_modules[mi]->sections[si].static_addr = msg.address; break; } } /* loop over sections */ diff --git a/runtime/unwind.c b/runtime/unwind.c index 7607770e..c8e3580d 100644 --- a/runtime/unwind.c +++ b/runtime/unwind.c @@ -496,7 +496,7 @@ static char *_stp_eh_enc_name(signed type) // and the elfutils base relocation done during loading of the .dwarf_frame // in translate.cxx. static unsigned long -adjustStartLoc (unsigned long startLoc, +adjustStartLoc (unsigned long startLoc, struct task_struct *tsk, struct _stp_module *m, struct _stp_section *s, unsigned ptrType, int is_ehframe) @@ -521,10 +521,14 @@ adjustStartLoc (unsigned long startLoc, return startLoc; } - if (strcmp (s->name, ".dynamic") == 0) - return startLoc + s->addr; + if (strcmp (s->name, ".dynamic") == 0) { + unsigned long vm_addr; + if (stap_find_vma_map_info_user(tsk->group_leader, m, + &vm_addr, NULL, NULL) == 0) + return startLoc + vm_addr; + } - startLoc = _stp_module_relocate (m->name, s->name, startLoc); + startLoc = _stp_module_relocate (m->name, s->name, startLoc, tsk); startLoc -= m->dwarf_module_base; return startLoc; } @@ -532,7 +536,7 @@ adjustStartLoc (unsigned long startLoc, /* If we previously created an unwind header, then use it now to binary search */ /* for the FDE corresponding to pc. XXX FIXME not currently supported. */ -static u32 *_stp_search_unwind_hdr(unsigned long pc, +static u32 *_stp_search_unwind_hdr(unsigned long pc, struct task_struct *tsk, struct _stp_module *m, struct _stp_section *s) { @@ -581,7 +585,7 @@ static u32 *_stp_search_unwind_hdr(unsigned long pc, do { const u8 *cur = ptr + (num / 2) * (2 * tableSize); startLoc = read_pointer(&cur, cur + tableSize, hdr[3]); - startLoc = adjustStartLoc(startLoc, m, s, hdr[3], 1); + startLoc = adjustStartLoc(startLoc, tsk, m, s, hdr[3], 1); if (pc < startLoc) num /= 2; else { @@ -590,7 +594,7 @@ static u32 *_stp_search_unwind_hdr(unsigned long pc, } } while (startLoc && num > 1); - if (num == 1 && (startLoc = adjustStartLoc(read_pointer(&ptr, ptr + tableSize, hdr[3]), m, s, hdr[3], 1)) != 0 && pc >= startLoc) + if (num == 1 && (startLoc = adjustStartLoc(read_pointer(&ptr, ptr + tableSize, hdr[3]), tsk, m, s, hdr[3], 1)) != 0 && pc >= startLoc) fde = (void *)read_pointer(&ptr, ptr + tableSize, hdr[3]); dbug_unwind(1, "returning fde=%lx startLoc=%lx", (unsigned long) fde, startLoc); @@ -601,6 +605,7 @@ static u32 *_stp_search_unwind_hdr(unsigned long pc, * number in case of an error. A positive return means unwinding is finished; * don't try to fallback to dumping addresses on the stack. */ static int unwind_frame(struct unwind_frame_info *frame, + struct task_struct *tsk, struct _stp_module *m, struct _stp_section *s, void *table, uint32_t table_len, int is_ehframe) { @@ -619,7 +624,7 @@ static int unwind_frame(struct unwind_frame_info *frame, goto err; } - fde = _stp_search_unwind_hdr(pc, m, s); + fde = _stp_search_unwind_hdr(pc, tsk, m, s); dbug_unwind(1, "%s: fde=%lx\n", m->name, (unsigned long) fde); /* found the fde, now set startLoc and endLoc */ @@ -629,7 +634,7 @@ static int unwind_frame(struct unwind_frame_info *frame, ptr = (const u8 *)(fde + 2); ptrType = fde_pointer_type(cie, table, table_len); startLoc = read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType); - startLoc = adjustStartLoc(startLoc, m, s, ptrType, is_ehframe); + startLoc = adjustStartLoc(startLoc, tsk, m, s, ptrType, is_ehframe); dbug_unwind(2, "startLoc=%lx, ptrType=%s\n", startLoc, _stp_eh_enc_name(ptrType)); if (!(ptrType & DW_EH_PE_indirect)) @@ -660,7 +665,7 @@ static int unwind_frame(struct unwind_frame_info *frame, ptr = (const u8 *)(fde + 2); startLoc = read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType); - startLoc = adjustStartLoc(startLoc, m, s, ptrType, is_ehframe); + startLoc = adjustStartLoc(startLoc, tsk, m, s, ptrType, is_ehframe); dbug_unwind(2, "startLoc=%lx, ptrType=%s\n", startLoc, _stp_eh_enc_name(ptrType)); if (!startLoc) continue; @@ -902,18 +907,18 @@ static int unwind(struct unwind_frame_info *frame, struct task_struct *tsk) if (UNW_PC(frame) == 0) return -EINVAL; - m = _stp_mod_sec_lookup (pc, tsk, &s); + m = _stp_mod_sec_lookup (pc, tsk, &s, NULL); if (unlikely(m == NULL)) { dbug_unwind(1, "No module found for pc=%lx", pc); return -EINVAL; } dbug_unwind(1, "trying debug_frame\n"); - res = unwind_frame (frame, m, s, m->debug_frame, + res = unwind_frame (frame, tsk, m, s, m->debug_frame, m->debug_frame_len, 0); if (res != 0) { dbug_unwind(1, "debug_frame failed: %d, trying eh_frame\n", res); - res = unwind_frame (frame, m, s, m->eh_frame, + res = unwind_frame (frame, tsk, m, s, m->eh_frame, m->eh_frame_len, 1); } -- cgit From c6fcc4c1ca5f222cf90bf3968e34a10f09b30be4 Mon Sep 17 00:00:00 2001 From: "Frank Ch. Eigler" Date: Mon, 21 Dec 2009 12:03:15 -0500 Subject: PR10601: cleanup for i386, x86-64 * runtime/loc2c-runtime.h (usr_i386): Fix si/di ordering. Add ip. (usr_x86_64): Add rip. (u_*_register): Use kernel standard ARRAY_SIZE() instead of S(). --- runtime/loc2c-runtime.h | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'runtime') diff --git a/runtime/loc2c-runtime.h b/runtime/loc2c-runtime.h index de59f0e5..c75639ee 100644 --- a/runtime/loc2c-runtime.h +++ b/runtime/loc2c-runtime.h @@ -103,7 +103,11 @@ struct usr_regset_lut { unsigned pos; }; -/* DWARF register number -to- user_regset offset/bank mapping table. */ + +/* DWARF register number -to- user_regset bank/offset mapping table. + The register numbers come from the processor-specific ELF documents. + The user-regset bank/offset values come from kernel $ARCH/include/asm/user*.h + or $ARCH/kernel/ptrace.c. */ static const struct usr_regset_lut url_i386[] = { { "ax", NT_PRSTATUS, 6*4 }, { "cx", NT_PRSTATUS, 1*4 }, @@ -111,8 +115,9 @@ static const struct usr_regset_lut url_i386[] = { { "bx", NT_PRSTATUS, 0*4 }, { "sp", NT_PRSTATUS, 15*4 }, { "bp", NT_PRSTATUS, 5*4 }, - { "di", NT_PRSTATUS, 4*4 }, { "si", NT_PRSTATUS, 3*4 }, + { "di", NT_PRSTATUS, 4*4 }, + { "ip", NT_PRSTATUS, 12*4 }, }; static const struct usr_regset_lut url_x86_64[] = { @@ -132,6 +137,11 @@ static const struct usr_regset_lut url_x86_64[] = { { "r13", NT_PRSTATUS, 2*8 }, { "r14", NT_PRSTATUS, 1*8 }, { "r15", NT_PRSTATUS, 0*8 }, + { "rip", NT_PRSTATUS, 16*8 }, + /* XXX: SSE registers %xmm0-%xmm7 */ + /* XXX: SSE2 registers %xmm8-%xmm15 */ + /* XXX: FP registers %st0-%st7 */ + /* XXX: MMX registers %mm0-%mm7 */ }; /* XXX: insert other architectures here. */ @@ -277,16 +287,15 @@ static void ursl_store64 (const struct usr_regset_lut* lut,unsigned lutsize, in } -#define S(array) sizeof(array)/sizeof(array[0]) #if defined (__i386__) -#define u_fetch_register(regno) ursl_fetch32(url_i386, S(url_i386), EM_386, regno) -#define u_store_register(regno,value) ursl_store32(url_i386, S(url_i386), EM_386, regno, value) +#define u_fetch_register(regno) ursl_fetch32(url_i386, ARRAY_SIZE(url_i386), EM_386, regno) +#define u_store_register(regno,value) ursl_store32(url_i386, ARRAY_SIZE(url_i386), EM_386, regno, value) #elif defined (__x86_64__) -#define u_fetch_register(regno) (_stp_probing_32bit_app(c->regs) ? ursl_fetch32(url_i386, S(url_i386), EM_386, regno) : ursl_fetch64(url_x86_64, S(url_x86_64), EM_X86_64, regno)) -#define u_store_register(regno,value) (_stp_probing_32bit_app(c->regs) ? ursl_store2(url_i386, S(url_i386), EM_386, regno, value) : ursl_store64(url_x86_64, S(url_x86_64), EM_X86_64, regno, value)) +#define u_fetch_register(regno) (_stp_probing_32bit_app(c->regs) ? ursl_fetch32(url_i386, ARRAY_SIZE(url_i386), EM_386, regno) : ursl_fetch64(url_x86_64, ARRAY_SIZE(url_x86_64), EM_X86_64, regno)) +#define u_store_register(regno,value) (_stp_probing_32bit_app(c->regs) ? ursl_store2(url_i386, ARRAY_SIZE(url_i386), EM_386, regno, value) : ursl_store64(url_x86_64, ARRAY_SIZE(url_x86_64), EM_X86_64, regno, value)) #else -- cgit