summaryrefslogtreecommitdiffstats
path: root/runtime/time.c
diff options
context:
space:
mode:
authorjistone <jistone>2006-06-22 00:48:33 +0000
committerjistone <jistone>2006-06-22 00:48:33 +0000
commitf0278305cd0e133902f9d9f64493063cde41e49d (patch)
tree5ec98c62cdee3fbd9a4267c05d9eab07a728bfa0 /runtime/time.c
parentedaa31752b40245cfc3e3fc966353ba2f120525a (diff)
downloadsystemtap-steved-f0278305cd0e133902f9d9f64493063cde41e49d.tar.gz
systemtap-steved-f0278305cd0e133902f9d9f64493063cde41e49d.tar.xz
systemtap-steved-f0278305cd0e133902f9d9f64493063cde41e49d.zip
2006-06-21 Josh Stone <joshua.i.stone@intel.com>
runtime/ * time.c: Time-estimation with minimal dependency on xtime. runtime/transport/ * transport/transport.c (_stp_handle_start): Initialize timer functions. * transport.c (_stp_cleanup_and_exit): Teardown timer functions. tapset/ * timestamp.stp (gettimeofday_us, gettimeofday_ms, gettimeofday_s): Convert to using the runtime-provided _stp_gettimeofday_us().
Diffstat (limited to 'runtime/time.c')
-rw-r--r--runtime/time.c184
1 files changed, 184 insertions, 0 deletions
diff --git a/runtime/time.c b/runtime/time.c
new file mode 100644
index 00000000..a1a7accc
--- /dev/null
+++ b/runtime/time.c
@@ -0,0 +1,184 @@
+/* -*- linux-c -*-
+ * time-estimation with minimal dependency on xtime
+ * Copyright (C) 2006 Intel Corporation.
+ *
+ * This file is part of systemtap, and is free software. You can
+ * redistribute it and/or modify it under the terms of the GNU General
+ * Public License (GPL); either version 2, or (at your option) any
+ * later version.
+ */
+
+#include <linux/cpufreq.h>
+
+typedef struct __stp_time_t {
+ /*
+ * A write lock is taken by __stp_time_timer_callback() and
+ * __stp_time_cpufreq_callback(). The timer callback is called from a
+ * softIRQ, and cpufreq callback guarantees that it is not called within
+ * an interrupt context. Thus there should be no opportunity for a
+ * deadlock between writers.
+ *
+ * A read lock is taken by _stp_gettimeofday_us(). There is the potential
+ * for this to occur at any time, so there is a slim chance that this will
+ * happen while the write lock is held, and it will be impossible to get a
+ * read lock. However, we can limit how long we try to get the lock to
+ * avoid a deadlock.
+ *
+ * Note that seqlock is safer than rwlock because some kernels
+ * don't have read_trylock.
+ */
+ seqlock_t lock;
+
+ /* These provide a reference time to correlate cycles to real time */
+ struct timeval base_time;
+ cycles_t base_cycles;
+
+ /* The frequency in MHz of this CPU, for interpolating
+ * cycle counts from the base time. */
+ unsigned int cpufreq;
+
+ /* Callback used to schedule updates of the base_time */
+ struct timer_list timer;
+} stp_time_t;
+
+DEFINE_PER_CPU(stp_time_t, stp_time);
+
+/* Try to estimate the number of CPU cycles in a microsecond - i.e. MHz. This
+ * relies heavily on the accuracy of udelay. By calling udelay twice, we
+ * attempt to account for overhead in the call.
+ */
+static unsigned int
+__stp_estimate_cpufreq(void)
+{
+ cycles_t beg, mid, end;
+ beg = get_cycles(); barrier();
+ udelay(2); barrier();
+ mid = get_cycles(); barrier();
+ udelay(10); barrier();
+ end = get_cycles(); barrier();
+ return (beg - 2*mid + end)/8;
+}
+
+static void
+__stp_time_timer_callback(unsigned long val)
+{
+ unsigned long flags;
+ stp_time_t *time;
+ struct timeval tv;
+ cycles_t cycles;
+
+ do_gettimeofday(&tv);
+ cycles = get_cycles();
+
+ time = &__get_cpu_var(stp_time);
+ write_seqlock_irqsave(&time->lock, flags);
+ time->base_time = tv;
+ time->base_cycles = cycles;
+ write_sequnlock_irqrestore(&time->lock, flags);
+
+ mod_timer(&time->timer, jiffies + 1);
+}
+
+static void
+__stp_init_time(void *info)
+{
+ stp_time_t *time = &__get_cpu_var(stp_time);
+
+ seqlock_init(&time->lock);
+ do_gettimeofday(&time->base_time);
+ time->base_cycles = get_cycles();
+
+ time->cpufreq = cpufreq_get(smp_processor_id()) / 1000;
+ if (!time->cpufreq) {
+ time->cpufreq = __stp_estimate_cpufreq();
+ }
+
+ init_timer(&time->timer);
+ time->timer.expires = jiffies + 1;
+ time->timer.function = __stp_time_timer_callback;
+ add_timer(&time->timer);
+}
+
+static int
+__stp_time_cpufreq_callback(struct notifier_block *self,
+ unsigned long state, void *vfreqs)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct cpufreq_freqs *freqs;
+ unsigned int freq_mhz;
+ stp_time_t *time;
+
+ switch (state) {
+ case CPUFREQ_POSTCHANGE:
+ case CPUFREQ_RESUMECHANGE:
+ freqs = (struct cpufreq_freqs *)vfreqs;
+ freq_mhz = freqs->new / 1000;
+
+ time = &per_cpu(stp_time, freqs->cpu);
+ write_seqlock_irqsave(&time->lock, flags);
+ time->cpufreq = freq_mhz;
+ write_sequnlock_irqrestore(&time->lock, flags);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+struct notifier_block __stp_time_notifier = {
+ .notifier_call = __stp_time_cpufreq_callback,
+};
+
+void
+_stp_kill_time(void)
+{
+ int cpu;
+ for_each_online_cpu(cpu) {
+ stp_time_t *time = &per_cpu(stp_time, cpu);
+ del_timer_sync(&time->timer);
+ }
+ cpufreq_unregister_notifier(&__stp_time_notifier, CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+int
+_stp_init_time(void)
+{
+ int ret = 0;
+
+ if ((ret = on_each_cpu(__stp_init_time, NULL, 0, 1)))
+ return ret;
+
+ return cpufreq_register_notifier(&__stp_time_notifier, CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+int64_t
+_stp_gettimeofday_us(void)
+{
+ struct timeval base;
+ cycles_t last, delta;
+ unsigned int freq;
+ unsigned int seq;
+ int i = 0;
+
+ stp_time_t *time = &__get_cpu_var(stp_time);
+
+ seq = read_seqbegin(&time->lock);
+ base = time->base_time;
+ last = time->base_cycles;
+ freq = time->cpufreq;
+ while (unlikely(read_seqretry(&time->lock, seq))) {
+ if (unlikely(++i >= MAXTRYLOCK))
+ return 0;
+ ndelay(TRYLOCKDELAY);
+ seq = read_seqbegin(&time->lock);
+ base = time->base_time;
+ last = time->base_cycles;
+ freq = time->cpufreq;
+ }
+
+ delta = get_cycles() - last;
+ do_div(delta, freq);
+
+ return (USEC_PER_SEC * (int64_t)base.tv_sec) + base.tv_usec + delta;
+}
+