summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/ChangeLog25
-rw-r--r--runtime/print_new.c6
-rw-r--r--runtime/staprun/ChangeLog6
-rw-r--r--runtime/staprun/staprun_funcs.c2
-rw-r--r--runtime/task_finder.c104
-rw-r--r--runtime/transport/ChangeLog6
-rw-r--r--runtime/transport/utt.c94
-rw-r--r--runtime/transport/utt.h36
-rw-r--r--runtime/utrace_compatibility.h33
9 files changed, 278 insertions, 34 deletions
diff --git a/runtime/ChangeLog b/runtime/ChangeLog
index 6ad7e51f..6672dbb5 100644
--- a/runtime/ChangeLog
+++ b/runtime/ChangeLog
@@ -1,3 +1,28 @@
+2008-09-26 David Smith <dsmith@redhat.com>
+
+ * task_finder.c (__STP_ATTACHED_TASK_EVENTS): Removed UTRACE_STOP,
+ which isn't needed anymore.
+
+2008-09-25 David Smith <dsmith@redhat.com>
+
+ * task_finder.c (__stp_utrace_attach): Added action flag to know
+ to request the thread to be stopped or not.
+ (stap_utrace_attach): Now just calls __stp_utrace_attach().
+ (__stp_utrace_task_finder_target_quiesce): Handles
+ utrace_set_events() errors properly.
+
+ * utrace_compatibility.h (enum utrace_resume_action): Added
+ utrace_resume_action enum.
+ (utrace_control): Added UTRACE_STOP support.
+ (utrace_engine_put): New.
+ (utrace_barrier): New.
+
+2008-09-17 Frank Ch. Eigler <fche@elastic.org>
+
+ PR 6487, 6504.
+ From Masami Hiramatsu <mhiramat@redhat.com>
+ * print_new.c (stp_print_flush): Use new utt_reserve().
+
2008-09-12 Masami Hiramatsu <mhiramat@redhat.com>
BZ 6028
diff --git a/runtime/print_new.c b/runtime/print_new.c
index 07af2e33..4136ecbe 100644
--- a/runtime/print_new.c
+++ b/runtime/print_new.c
@@ -34,13 +34,13 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
#ifdef STP_BULKMODE
{
#ifdef NO_PERCPU_HEADERS
- void *buf = relay_reserve(_stp_utt->rchan, len);
+ void *buf = utt_reserve(_stp_utt, len);
if (likely(buf))
memcpy(buf, pb->buf, len);
else
atomic_inc (&_stp_transport_failures);
#else
- void *buf = relay_reserve(_stp_utt->rchan,
+ void *buf = utt_reserve(_stp_utt,
sizeof(struct _stp_trace) + len);
if (likely(buf)) {
struct _stp_trace t = { .sequence = _stp_seq_inc(),
@@ -56,7 +56,7 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
void *buf;
unsigned long flags;
spin_lock_irqsave(&_stp_print_lock, flags);
- buf = relay_reserve(_stp_utt->rchan, len);
+ buf = utt_reserve(_stp_utt, len);
if (likely(buf))
memcpy(buf, pb->buf, len);
else
diff --git a/runtime/staprun/ChangeLog b/runtime/staprun/ChangeLog
index 21e02e47..c6d75106 100644
--- a/runtime/staprun/ChangeLog
+++ b/runtime/staprun/ChangeLog
@@ -1,3 +1,9 @@
+2008-09-18 David Smith <dsmith@redhat.com>
+
+ PR 6903.
+ * staprun_funcs.c (check_permissions): Instead of checking the
+ effective uid, check the real uid for root permissions.
+
2008-09-06 Frank Ch. Eigler <fche@elastic.org>
* mainloop.c (start_cmd): Rewrite to use wordexp/execvp/ptrace.
diff --git a/runtime/staprun/staprun_funcs.c b/runtime/staprun/staprun_funcs.c
index 8fa95e45..5e7fa102 100644
--- a/runtime/staprun/staprun_funcs.c
+++ b/runtime/staprun/staprun_funcs.c
@@ -307,7 +307,7 @@ int check_permissions(void)
int path_check = 0;
/* If we're root, we can do anything. */
- if (geteuid() == 0)
+ if (getuid() == 0)
return 1;
/* Lookup the gid for group "stapdev" */
diff --git a/runtime/task_finder.c b/runtime/task_finder.c
index 493ca6f7..db7a8f38 100644
--- a/runtime/task_finder.c
+++ b/runtime/task_finder.c
@@ -89,7 +89,7 @@ struct stap_task_finder_target {
size_t pathlen;
/* public: */
- const char *pathname;
+ const char *pathname;
pid_t pid;
stap_task_finder_callback callback;
stap_task_finder_vm_callback vm_callback;
@@ -148,8 +148,8 @@ static int
stap_register_task_finder_target(struct stap_task_finder_target *new_tgt)
{
// Since this __stp_task_finder_list is (currently) only
- // written to in one big setup operation before the task
- // finder process is started, we don't need to lock it.
+ // written to in one big setup operation before the task
+ // finder process is started, we don't need to lock it.
struct list_head *node;
struct stap_task_finder_target *tgt = NULL;
int found_node = 0;
@@ -258,6 +258,7 @@ stap_utrace_detach(struct task_struct *tsk,
rc, tsk->pid);
break;
}
+ utrace_engine_put(engine);
}
return rc;
}
@@ -386,7 +387,6 @@ __stp_get_mm_path(struct mm_struct *mm, char *buf, int buflen)
* events.
*/
#define __STP_ATTACHED_TASK_EVENTS (__STP_TASK_BASE_EVENTS \
- | UTRACE_STOP \
| UTRACE_EVENT(QUIESCE))
#define __STP_ATTACHED_TASK_BASE_EVENTS(tgt) \
@@ -394,9 +394,10 @@ __stp_get_mm_path(struct mm_struct *mm, char *buf, int buflen)
: __STP_TASK_VM_BASE_EVENTS)
static int
-stap_utrace_attach(struct task_struct *tsk,
- const struct utrace_engine_ops *ops, void *data,
- unsigned long event_flags)
+__stp_utrace_attach(struct task_struct *tsk,
+ const struct utrace_engine_ops *ops, void *data,
+ unsigned long event_flags,
+ enum utrace_resume_action action)
{
struct utrace_attached_engine *engine;
struct mm_struct *mm;
@@ -428,15 +429,51 @@ stap_utrace_attach(struct task_struct *tsk,
}
else {
rc = utrace_set_events(tsk, engine, event_flags);
- if (rc == 0)
+ if (rc == -EINPROGRESS) {
+ /*
+ * It's running our callback, so we have to
+ * synchronize. We can't keep rcu_read_lock,
+ * so the task pointer might die. But it's
+ * safe to call utrace_barrier() even with a
+ * stale task pointer, if we have an engine
+ * ref.
+ */
+ rc = utrace_barrier(tsk, engine);
+ if (rc != 0)
+ _stp_error("utrace_barrier returned error %d on pid %d",
+ rc, (int)tsk->pid);
+ }
+ if (rc == 0) {
debug_task_finder_attach();
+
+ if (action != UTRACE_RESUME) {
+ rc = utrace_control(tsk, engine, UTRACE_STOP);
+ /* EINPROGRESS means we must wait for
+ * a callback, which is what we want. */
+ if (rc != 0 && rc != -EINPROGRESS)
+ _stp_error("utrace_control returned error %d on pid %d",
+ rc, (int)tsk->pid);
+ else
+ rc = 0;
+ }
+
+ }
else
- _stp_error("utrace_set_events returned error %d on pid %d",
+ _stp_error("utrace_set_events2 returned error %d on pid %d",
rc, (int)tsk->pid);
+ utrace_engine_put(engine);
}
return rc;
}
+static int
+stap_utrace_attach(struct task_struct *tsk,
+ const struct utrace_engine_ops *ops, void *data,
+ unsigned long event_flags)
+{
+ return __stp_utrace_attach(tsk, ops, data, event_flags, UTRACE_RESUME);
+}
+
static inline void
__stp_utrace_attach_match_filename(struct task_struct *tsk,
const char * const filename,
@@ -485,9 +522,10 @@ __stp_utrace_attach_match_filename(struct task_struct *tsk,
// isn't set, we can go ahead and call the
// callback.
if (register_p) {
- rc = stap_utrace_attach(tsk, &cb_tgt->ops,
- cb_tgt,
- __STP_ATTACHED_TASK_EVENTS);
+ rc = __stp_utrace_attach(tsk, &cb_tgt->ops,
+ cb_tgt,
+ __STP_ATTACHED_TASK_EVENTS,
+ UTRACE_STOP);
if (rc != 0 && rc != EPERM)
break;
cb_tgt->engine_attached = 1;
@@ -601,8 +639,8 @@ __stp_utrace_task_finder_report_clone(enum utrace_resume_action action,
__stp_tf_handler_start();
// On clone, attach to the child.
- rc = stap_utrace_attach(child, engine->ops, 0,
- __STP_TASK_FINDER_EVENTS);
+ rc = __stp_utrace_attach(child, engine->ops, 0,
+ __STP_TASK_FINDER_EVENTS, UTRACE_RESUME);
if (rc != 0 && rc != EPERM) {
__stp_tf_handler_end();
return UTRACE_RESUME;
@@ -757,6 +795,22 @@ __stp_utrace_task_finder_target_quiesce(enum utrace_resume_action action,
// Turn off quiesce handling
rc = utrace_set_events(tsk, engine,
__STP_ATTACHED_TASK_BASE_EVENTS(tgt));
+
+ if (rc == -EINPROGRESS) {
+ /*
+ * It's running our callback, so we have to
+ * synchronize. We can't keep rcu_read_lock,
+ * so the task pointer might die. But it's
+ * safe to call utrace_barrier() even with
+ * a stale task pointer, if we have an engine ref.
+ */
+ rc = utrace_barrier(tsk, engine);
+ if (rc != 0)
+ _stp_error("utrace_barrier returned error %d on pid %d",
+ rc, (int)tsk->pid);
+ rc = utrace_set_events(tsk, engine,
+ __STP_ATTACHED_TASK_BASE_EVENTS(tgt));
+ }
if (rc != 0)
_stp_error("utrace_set_events returned error %d on pid %d",
rc, (int)tsk->pid);
@@ -1173,13 +1227,14 @@ stap_start_task_finder(void)
size_t mmpathlen;
struct list_head *tgt_node;
- /* Skip over processes other than that specified with
- stap -c or -x. */
- if (_stp_target && tsk->tgid != _stp_target)
- continue;
+ /* Skip over processes other than that specified with
+ * stap -c or -x. */
+ if (_stp_target && tsk->tgid != _stp_target)
+ continue;
- rc = stap_utrace_attach(tsk, &__stp_utrace_task_finder_ops, 0,
- __STP_TASK_FINDER_EVENTS);
+ rc = __stp_utrace_attach(tsk, &__stp_utrace_task_finder_ops, 0,
+ __STP_TASK_FINDER_EVENTS,
+ UTRACE_RESUME);
if (rc == EPERM) {
/* Ignore EPERM errors, which mean this wasn't
* a thread we can attach to. */
@@ -1242,16 +1297,17 @@ stap_start_task_finder(void)
continue;
// Set up events we need for attached tasks.
- rc = stap_utrace_attach(tsk, &cb_tgt->ops,
- cb_tgt,
- __STP_ATTACHED_TASK_EVENTS);
+ rc = __stp_utrace_attach(tsk, &cb_tgt->ops,
+ cb_tgt,
+ __STP_ATTACHED_TASK_EVENTS,
+ UTRACE_STOP);
if (rc != 0 && rc != EPERM)
goto stf_err;
cb_tgt->engine_attached = 1;
}
}
} while_each_thread(grp, tsk);
- stf_err:
+stf_err:
rcu_read_unlock();
_stp_kfree(mmpath_buf);
diff --git a/runtime/transport/ChangeLog b/runtime/transport/ChangeLog
index 693f06d1..42c6fc2a 100644
--- a/runtime/transport/ChangeLog
+++ b/runtime/transport/ChangeLog
@@ -1,3 +1,9 @@
+2008-09-17 Frank Ch. Eigler <fche@elastic.org>
+
+ PR 6487, 6504.
+ From Masami Hiramatsu <mhiramat@redhat.com>
+ * utt.c (utt_switch_subbof, _utt_wakeup*, utt_reserve): New.
+
2008-07-17 Frank Ch. Eigler <fche@elastic.org>
* symbols.c (_stp_do_relocation): Adapt to stp_module decl changes.
diff --git a/runtime/transport/utt.c b/runtime/transport/utt.c
index 182c1178..b8281bb4 100644
--- a/runtime/transport/utt.c
+++ b/runtime/transport/utt.c
@@ -31,6 +31,96 @@
static int utt_overwrite_flag = 0;
+/*
+ * utt_switch_subbuf - switch to a new sub-buffer
+ *
+ * Most of this function is deadcopy of relay_switch_subbuf.
+ */
+size_t utt_switch_subbuf(struct utt_trace *utt, struct rchan_buf *buf,
+ size_t length)
+{
+ void *old, *new;
+ size_t old_subbuf, new_subbuf;
+
+ if (unlikely(buf == NULL))
+ return 0;
+
+ if (unlikely(length > buf->chan->subbuf_size))
+ goto toobig;
+
+ if (buf->offset != buf->chan->subbuf_size + 1) {
+ buf->prev_padding = buf->chan->subbuf_size - buf->offset;
+ old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
+ buf->padding[old_subbuf] = buf->prev_padding;
+ buf->subbufs_produced++;
+ buf->dentry->d_inode->i_size += buf->chan->subbuf_size -
+ buf->padding[old_subbuf];
+ smp_mb();
+ if (waitqueue_active(&buf->read_wait))
+ /*
+ * Calling wake_up_interruptible() and __mod_timer()
+ * from here will deadlock if we happen to be logging
+ * from the scheduler and timer (trying to re-grab
+ * rq->lock/timer->base->lock), so just set a flag.
+ */
+ atomic_set(&utt->wakeup, 1);
+ }
+
+ old = buf->data;
+ new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
+ new = buf->start + new_subbuf * buf->chan->subbuf_size;
+ buf->offset = 0;
+ if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) {
+ buf->offset = buf->chan->subbuf_size + 1;
+ return 0;
+ }
+ buf->data = new;
+ buf->padding[new_subbuf] = 0;
+
+ if (unlikely(length + buf->offset > buf->chan->subbuf_size))
+ goto toobig;
+
+ return length;
+
+toobig:
+ buf->chan->last_toobig = length;
+ return 0;
+}
+
+static void __utt_wakeup_readers(struct rchan_buf *buf)
+{
+ if (buf && waitqueue_active(&buf->read_wait) &&
+ buf->subbufs_produced != buf->subbufs_consumed)
+ wake_up_interruptible(&buf->read_wait);
+}
+
+static void __utt_wakeup_timer(unsigned long val)
+{
+ struct utt_trace *utt = (struct utt_trace *)val;
+ int i;
+
+ if (atomic_read(&utt->wakeup)) {
+ atomic_set(&utt->wakeup, 0);
+ if (utt->is_global)
+ __utt_wakeup_readers(utt->rchan->buf[0]);
+ else
+ for_each_possible_cpu(i)
+ __utt_wakeup_readers(utt->rchan->buf[i]);
+ }
+
+ mod_timer(&utt->timer, jiffies + UTT_TIMER_INTERVAL);
+}
+
+static void __utt_timer_init(struct utt_trace * utt)
+{
+ atomic_set(&utt->wakeup, 0);
+ init_timer(&utt->timer);
+ utt->timer.expires = jiffies + UTT_TIMER_INTERVAL;
+ utt->timer.function = __utt_wakeup_timer;
+ utt->timer.data = (unsigned long)utt;
+ add_timer(&utt->timer);
+}
+
void utt_set_overwrite(int overwrite)
{
utt_overwrite_flag = overwrite;
@@ -241,6 +331,8 @@ struct utt_trace *utt_trace_setup(struct utt_trace_setup *utts)
goto err;
utt->rchan->private_data = utt;
+ utt->is_global = utts->is_global;
+
utt->trace_state = Utt_trace_setup;
utts->err = 0;
@@ -274,6 +366,7 @@ int utt_trace_startstop(struct utt_trace *utt, int start,
utt->trace_state == Utt_trace_stopped) {
if (trace_seq)
(*trace_seq)++;
+ __utt_timer_init(utt);
smp_mb();
utt->trace_state = Utt_trace_running;
ret = 0;
@@ -281,6 +374,7 @@ int utt_trace_startstop(struct utt_trace *utt, int start,
} else {
if (utt->trace_state == Utt_trace_running) {
utt->trace_state = Utt_trace_stopped;
+ del_timer_sync(&utt->timer);
relay_flush(utt->rchan);
ret = 0;
}
diff --git a/runtime/transport/utt.h b/runtime/transport/utt.h
index b2c9fa9f..fd704009 100644
--- a/runtime/transport/utt.h
+++ b/runtime/transport/utt.h
@@ -15,11 +15,18 @@ struct utt_trace {
atomic_t dropped;
struct dentry *utt_tree_root; /* systemtap */
void *private_data;
+ atomic_t wakeup;
+ struct timer_list timer;
+ int is_global;
};
#define UTT_TRACE_ROOT_NAME_SIZE 64 /* Largest string for a root dir identifier */
#define UTT_TRACE_NAME_SIZE 64 /* Largest string for a trace identifier */
+#ifndef UTT_TIMER_INTERVAL
+#define UTT_TIMER_INTERVAL ((HZ + 99) / 100) /* Wakeup timer interval in jiffies (default 10 ms)*/
+#endif
+
/*
* User setup structure
*/
@@ -39,4 +46,33 @@ extern int utt_trace_startstop(struct utt_trace *utt, int start,
extern void utt_trace_cleanup(struct utt_trace *utt);
extern int utt_trace_remove(struct utt_trace *utt);
+#ifndef STP_OLD_TRANSPORT
+extern size_t utt_switch_subbuf(struct utt_trace *utt, struct rchan_buf *buf,
+ size_t length);
+/**
+ * utt_reserve - reserve slot in channel buffer
+ * @utt: utt channel
+ * @length: number of bytes to reserve
+ *
+ * Returns pointer to reserved slot, NULL if full.
+ *
+ * This function is utt_switch_subbuf version of relay_reserve.
+ */
+static inline void *utt_reserve(struct utt_trace *utt, size_t length)
+{
+ void *reserved;
+ struct rchan_buf *buf = utt->rchan->buf[smp_processor_id()];
+
+ if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
+ length = utt_switch_subbuf(utt, buf, length);
+ if (!length)
+ return NULL;
+ }
+ reserved = buf->data + buf->offset;
+ buf->offset += length;
+
+ return reserved;
+}
+#endif
+
#endif
diff --git a/runtime/utrace_compatibility.h b/runtime/utrace_compatibility.h
index 80037015..27fca250 100644
--- a/runtime/utrace_compatibility.h
+++ b/runtime/utrace_compatibility.h
@@ -24,9 +24,11 @@
#define UTRACE_ORIG_VERSION
-#define UTRACE_RESUME UTRACE_ACTION_RESUME
-#define UTRACE_DETACH UTRACE_ACTION_DETACH
-#define UTRACE_STOP UTRACE_ACTION_QUIESCE
+enum utrace_resume_action {
+ UTRACE_STOP = UTRACE_ACTION_QUIESCE,
+ UTRACE_RESUME = UTRACE_ACTION_RESUME,
+ UTRACE_DETACH = UTRACE_ACTION_DETACH,
+};
static inline struct utrace_attached_engine *
utrace_attach_task(struct task_struct *target, int flags,
@@ -38,11 +40,17 @@ utrace_attach_task(struct task_struct *target, int flags,
static inline int __must_check
utrace_control(struct task_struct *target,
struct utrace_attached_engine *engine,
- unsigned long action)
+ enum utrace_resume_action action)
{
- if (action == UTRACE_DETACH)
+ switch (action) {
+ case UTRACE_DETACH:
return utrace_detach(target, engine);
- return -EINVAL;
+ case UTRACE_STOP:
+ return utrace_set_flags(target, engine,
+ (engine->flags | UTRACE_ACTION_QUIESCE));
+ default:
+ return -EINVAL;
+ }
}
static inline int __must_check
@@ -52,6 +60,19 @@ utrace_set_events(struct task_struct *target,
{
return utrace_set_flags(target, engine, eventmask);
}
+
+static inline void
+utrace_engine_put(struct utrace_attached_engine *engine)
+{
+ return;
+}
+
+static inline int __must_check
+utrace_barrier(struct task_struct *target,
+ struct utrace_attached_engine *engine)
+{
+ return 0;
+}
#endif
#endif /* _UTRACE_COMPATIBILITY_H_ */