summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Smith <dsmith@redhat.com>2009-03-13 16:48:58 -0500
committerDavid Smith <dsmith@redhat.com>2009-03-13 16:48:58 -0500
commit7d573b8d318836ef75d1f2888a50da230b7d83cc (patch)
treecceceab094d24dd4d613e557df5453c7111e6c0a
parent325d52059ed170a698dd4d13e2b418e8e9ab0862 (diff)
downloadsystemtap-steved-7d573b8d318836ef75d1f2888a50da230b7d83cc.tar.gz
systemtap-steved-7d573b8d318836ef75d1f2888a50da230b7d83cc.tar.xz
systemtap-steved-7d573b8d318836ef75d1f2888a50da230b7d83cc.zip
Working bulkmode support.
2009-03-13 David Smith <dsmith@redhat.com> * print_new.c (stp_print_flush): Added bulkmode support for new transport. * transport/ring_buffer.c (__stp_find_next_entry): Fixed syntax error in bulkmode code. (_stp_transport_data_fs_init): Changed 'for_each_possible_cpu()' to 'for_each_online_cpu()' so that non-online cpu's won't have a trace file created.
-rw-r--r--runtime/print_new.c97
-rw-r--r--runtime/transport/ring_buffer.c8
2 files changed, 93 insertions, 12 deletions
diff --git a/runtime/print_new.c b/runtime/print_new.c
index 4857b301..f8749842 100644
--- a/runtime/print_new.c
+++ b/runtime/print_new.c
@@ -33,15 +33,95 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
// if (unlikely(!_stp_utt || _stp_utt->trace_state != Utt_trace_running))
// return;
+#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10)
#ifdef STP_BULKMODE
{
+ struct _stp_entry *entry;
#ifdef NO_PERCPU_HEADERS
- void *buf = utt_reserve(_stp_utt, len);
- if (likely(buf))
- memcpy(buf, pb->buf, len);
- else
- atomic_inc (&_stp_transport_failures);
+ {
+ uint32_t cnt;
+ char *bufp = pb->buf;
+
+ printk(KERN_ERR "%s:%d - flushing %d(%d) bytes\n",
+ __FUNCTION__, __LINE__, pb->len, len);
+ while (len > 0) {
+ if (len > MAX_RESERVE_SIZE) {
+ len -= MAX_RESERVE_SIZE;
+ cnt = MAX_RESERVE_SIZE;
+ }
+ else {
+ cnt = len;
+ len = 0;
+ }
+
+ printk(KERN_ERR "%s:%d - reserving %d bytes\n",
+ __FUNCTION__, __LINE__, cnt);
+ entry = _stp_data_write_reserve(cnt);
+ if (likely(entry)) {
+ memcpy(entry->buf, bufp, cnt);
+ _stp_data_write_commit(entry);
+ bufp += cnt;
+ }
+ else {
+ atomic_inc (&_stp_transport_failures);
+ break;
+ }
+ }
+ }
#else
+
+#undef MAX_RESERVE_SIZE
+#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10 - sizeof(struct _stp_trace))
+ {
+ uint32_t cnt;
+ char *bufp = pb->buf;
+ struct _stp_trace t = { .sequence = _stp_seq_inc(),
+ .pdu_len = len};
+
+ printk(KERN_ERR "%s:%d - flushing %d(%d) bytes\n",
+ __FUNCTION__, __LINE__, pb->len, len);
+
+ entry = _stp_data_write_reserve(sizeof(struct _stp_trace));
+ if (likely(entry)) {
+ /* prevent unaligned access by using
+ * memcpy() */
+ memcpy(entry->buf, &t, sizeof(t));
+ _stp_data_write_commit(entry);
+ }
+ else {
+ atomic_inc (&_stp_transport_failures);
+ return;
+ }
+
+ while (len > 0) {
+ if (len > MAX_RESERVE_SIZE) {
+ len -= MAX_RESERVE_SIZE;
+ cnt = MAX_RESERVE_SIZE;
+ }
+ else {
+ cnt = len;
+ len = 0;
+ }
+
+ printk(KERN_ERR "%s:%d - reserving %d bytes\n",
+ __FUNCTION__, __LINE__, cnt);
+ entry = _stp_data_write_reserve(cnt);
+ if (likely(entry)) {
+ memcpy(entry->buf, bufp, cnt);
+ _stp_data_write_commit(entry);
+ bufp += cnt;
+ }
+ else {
+ atomic_inc (&_stp_transport_failures);
+ break;
+ }
+ }
+ }
+
+
+
+
+#if 0
void *buf = utt_reserve(_stp_utt,
sizeof(struct _stp_trace) + len);
if (likely(buf)) {
@@ -52,6 +132,7 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
} else
atomic_inc (&_stp_transport_failures);
#endif
+#endif
}
#else
{
@@ -72,7 +153,9 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
uint32_t cnt;
char *bufp = pb->buf;
-#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10)
+//#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10)
+ printk(KERN_ERR "%s:%d - flushing %d(%d) bytes\n",
+ __FUNCTION__, __LINE__, pb->len, len);
while (len > 0) {
if (len > MAX_RESERVE_SIZE) {
len -= MAX_RESERVE_SIZE;
@@ -83,6 +166,8 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb)
len = 0;
}
+ printk(KERN_ERR "%s:%d - reserving %d bytes\n",
+ __FUNCTION__, __LINE__, cnt);
entry = _stp_data_write_reserve(cnt);
if (likely(entry)) {
memcpy(entry->buf, bufp, cnt);
diff --git a/runtime/transport/ring_buffer.c b/runtime/transport/ring_buffer.c
index 3ea29f95..1d46c378 100644
--- a/runtime/transport/ring_buffer.c
+++ b/runtime/transport/ring_buffer.c
@@ -4,10 +4,6 @@
#include <linux/poll.h>
#include <linux/cpumask.h>
-#ifdef STP_BULKMODE
-#error "bulkmode support unfinished..."
-#endif
-
static struct ring_buffer *__stp_ring_buffer = NULL;
//DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
@@ -200,7 +196,7 @@ __stp_find_next_entry(long cpu_file, int *ent_cpu, u64 *ent_ts)
* If we are in a per_cpu trace file, don't bother by iterating over
* all cpus and peek directly.
*/
- if (ring_buffer_empty_cpu(buffer, (int)cpu_file))
+ if (ring_buffer_empty_cpu(__stp_ring_buffer, (int)cpu_file))
return NULL;
ent = peek_next_entry(cpu_file, ent_ts);
if (ent_cpu)
@@ -385,7 +381,7 @@ static int _stp_transport_data_fs_init(void)
return rc;
// create file(s)
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
char cpu_file[9]; /* 5(trace) + 3(XXX) + 1(\0) = 9 */
if (cpu > 999 || cpu < 0) {