From 7d573b8d318836ef75d1f2888a50da230b7d83cc Mon Sep 17 00:00:00 2001 From: David Smith Date: Fri, 13 Mar 2009 16:48:58 -0500 Subject: Working bulkmode support. 2009-03-13 David Smith * print_new.c (stp_print_flush): Added bulkmode support for new transport. * transport/ring_buffer.c (__stp_find_next_entry): Fixed syntax error in bulkmode code. (_stp_transport_data_fs_init): Changed 'for_each_possible_cpu()' to 'for_each_online_cpu()' so that non-online cpu's won't have a trace file created. --- runtime/print_new.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 91 insertions(+), 6 deletions(-) (limited to 'runtime/print_new.c') diff --git a/runtime/print_new.c b/runtime/print_new.c index 4857b301..f8749842 100644 --- a/runtime/print_new.c +++ b/runtime/print_new.c @@ -33,15 +33,95 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb) // if (unlikely(!_stp_utt || _stp_utt->trace_state != Utt_trace_running)) // return; +#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10) #ifdef STP_BULKMODE { + struct _stp_entry *entry; #ifdef NO_PERCPU_HEADERS - void *buf = utt_reserve(_stp_utt, len); - if (likely(buf)) - memcpy(buf, pb->buf, len); - else - atomic_inc (&_stp_transport_failures); + { + uint32_t cnt; + char *bufp = pb->buf; + + printk(KERN_ERR "%s:%d - flushing %d(%d) bytes\n", + __FUNCTION__, __LINE__, pb->len, len); + while (len > 0) { + if (len > MAX_RESERVE_SIZE) { + len -= MAX_RESERVE_SIZE; + cnt = MAX_RESERVE_SIZE; + } + else { + cnt = len; + len = 0; + } + + printk(KERN_ERR "%s:%d - reserving %d bytes\n", + __FUNCTION__, __LINE__, cnt); + entry = _stp_data_write_reserve(cnt); + if (likely(entry)) { + memcpy(entry->buf, bufp, cnt); + _stp_data_write_commit(entry); + bufp += cnt; + } + else { + atomic_inc (&_stp_transport_failures); + break; + } + } + } #else + +#undef MAX_RESERVE_SIZE +#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10 - sizeof(struct _stp_trace)) + { + uint32_t cnt; + char *bufp = pb->buf; + struct _stp_trace t = { .sequence = _stp_seq_inc(), + .pdu_len = len}; + + printk(KERN_ERR "%s:%d - flushing %d(%d) bytes\n", + __FUNCTION__, __LINE__, pb->len, len); + + entry = _stp_data_write_reserve(sizeof(struct _stp_trace)); + if (likely(entry)) { + /* prevent unaligned access by using + * memcpy() */ + memcpy(entry->buf, &t, sizeof(t)); + _stp_data_write_commit(entry); + } + else { + atomic_inc (&_stp_transport_failures); + return; + } + + while (len > 0) { + if (len > MAX_RESERVE_SIZE) { + len -= MAX_RESERVE_SIZE; + cnt = MAX_RESERVE_SIZE; + } + else { + cnt = len; + len = 0; + } + + printk(KERN_ERR "%s:%d - reserving %d bytes\n", + __FUNCTION__, __LINE__, cnt); + entry = _stp_data_write_reserve(cnt); + if (likely(entry)) { + memcpy(entry->buf, bufp, cnt); + _stp_data_write_commit(entry); + bufp += cnt; + } + else { + atomic_inc (&_stp_transport_failures); + break; + } + } + } + + + + +#if 0 void *buf = utt_reserve(_stp_utt, sizeof(struct _stp_trace) + len); if (likely(buf)) { @@ -51,6 +131,7 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb) memcpy(buf + sizeof(t), pb->buf, len); } else atomic_inc (&_stp_transport_failures); +#endif #endif } #else @@ -72,7 +153,9 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb) uint32_t cnt; char *bufp = pb->buf; -#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10) +//#define MAX_RESERVE_SIZE (4080 /*BUF_PAGE_SIZE*/ - sizeof(struct _stp_entry) - 10) + printk(KERN_ERR "%s:%d - flushing %d(%d) bytes\n", + __FUNCTION__, __LINE__, pb->len, len); while (len > 0) { if (len > MAX_RESERVE_SIZE) { len -= MAX_RESERVE_SIZE; @@ -83,6 +166,8 @@ void EXPORT_FN(stp_print_flush) (_stp_pbuf *pb) len = 0; } + printk(KERN_ERR "%s:%d - reserving %d bytes\n", + __FUNCTION__, __LINE__, cnt); entry = _stp_data_write_reserve(cnt); if (likely(entry)) { memcpy(entry->buf, bufp, cnt); -- cgit