summaryrefslogtreecommitdiffstats
path: root/runtime/counter.c
blob: 40ea66a0fc7d26879549a612da2661fd3051e25e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
/* -*- linux-c -*- 
 * Counter aggregation Functions
 * Copyright (C) 2005-2008 Red Hat Inc.
 *
 * This file is part of systemtap, and is free software.  You can
 * redistribute it and/or modify it under the terms of the GNU General
 * Public License (GPL); either version 2, or (at your option) any
 * later version.
 */

#ifndef _COUNTER_C_
#define _COUNTER_C_

/** @file counter.c
 * @brief Counter Aggregation
 */

/** @addtogroup counter Counter Aggregation
 * This is a 64-bit per-cpu Counter.  It is much more efficient than an atomic counter
 * because there is no contention between processors and caches in an SMP system. Use
 * it when you want to count things and do not read the counter often.  Ideally you
 * should wait until probe exit time to read the Counter.
 * @{
 */

/* This define will probably go away with the next checkin. */
/* locks are only here for testing */
#ifndef NEED_COUNTER_LOCKS
#define NEED_COUNTER_LOCKS 0
#endif

#if NEED_COUNTER_LOCKS == 1
#define COUNTER_LOCK(c) spin_lock(&c->lock)
#define COUNTER_UNLOCK(c) spin_unlock(&c->lock)
#else
#define COUNTER_LOCK(c) ;
#define COUNTER_UNLOCK(c) ;
#endif

struct _counter {
	int64_t count;
#if NEED_COUNTER_LOCKS == 1
	spinlock_t lock;
#endif
};

typedef struct _counter *Counter;


/** Initialize a Counter.
 * Call this during probe initialization to create a Counter
 * 
 * @return a Counter. Will be NULL on error.
 */
static Counter _stp_counter_init (void)
{
	Counter cnt = _stp_alloc_percpu (struct _counter);
#if NEED_COUNTER_LOCKS == 1
	{
		int i;
		stp_for_each_cpu(i) {
			Counter c = per_cpu_ptr (cnt, i);
			spin_lock_init(c->lock);
		}
	}
#endif
	return cnt;
}

/** Add to a Counter.
 * Adds an int64 to a Counter
 * 
 * @param cnt Counter
 * @param val int64 value
 */
static void _stp_counter_add (Counter cnt,  int64_t val)
{
	Counter c = per_cpu_ptr (cnt, get_cpu());
	COUNTER_LOCK(c);
	c->count += val;
	COUNTER_UNLOCK(c);
	put_cpu();
}

/** Get a Counter's per-cpu value.
 * Get the value of a Counter for a specific CPU.
 * 
 * @param cnt Counter
 * @param cpu CPU number
 * @param clear Set this to have the value cleared after reading.
 * @return An int64 value.
 */
static int64_t _stp_counter_get_cpu (Counter cnt, int cpu, int clear)
{
	int64_t val;
	Counter c = per_cpu_ptr (cnt, cpu);
	COUNTER_LOCK(c);
	val = c->count;
	if (clear)
		c->count = 0;
	COUNTER_UNLOCK(c);
	return val;
}

/** Get a Counter's value.
 * Get the value of a Counter. This is the sum of the counters for
 * all CPUs. Because computing this sum requires reading all of the
 * per-cpu values, doing it often will result in poor performance in
 * multiprocessor systems.
 * 
 * The clear parameter is intended for use in a polling situation when the
 * values should be immediately cleared after reading.  
 * @param cnt Counter
 * @param clear Set this to have the value cleared after reading.
 * @return An int64 value.
 */
static int64_t _stp_counter_get (Counter cnt, int clear)
{
	int i;
	int64_t sum = 0;

	stp_for_each_cpu(i) {
		Counter c = per_cpu_ptr (cnt, i);
		COUNTER_LOCK(c);
		sum += c->count;
		if (clear)
			c->count = 0;
		COUNTER_UNLOCK(c);
	}
	return sum;
}

/** Free a Counter.
 * @param cnt Counter
 */
static void _stp_counter_free (Counter cnt)
{
	_stp_free_percpu (cnt);
}

/** @} */

#undef COUNTER_LOCK
#undef COUNTER_UNLOCK
#endif /* _COUNTER_C_ */