diff options
author | hunt <hunt> | 2005-12-07 20:02:04 +0000 |
---|---|---|
committer | hunt <hunt> | 2005-12-07 20:02:04 +0000 |
commit | 953c5ad1c72d55e3c017e7975becfa175ccdf7f5 (patch) | |
tree | 27b47f177cdf09bf266a60f1bff9113953270ee4 | |
parent | 872d26246196a87da8551661635cce52c8e5ed3a (diff) | |
download | systemtap-steved-953c5ad1c72d55e3c017e7975becfa175ccdf7f5.tar.gz systemtap-steved-953c5ad1c72d55e3c017e7975becfa175ccdf7f5.tar.xz systemtap-steved-953c5ad1c72d55e3c017e7975becfa175ccdf7f5.zip |
2005-12-07 Martin Hunt <hunt@redhat.com>
PR1923
* map.h (struct map_root): Remove membuf.
(struct pmap): Define.
(PMAP): Declare.
* map.c (_stp_map_init): Use kmalloc() to allocate individual
nodes instead of using vmalloc() to allocate one big chunk.
(_stp_map_new): Use kmalloc.
(_stp_pmap_new): Use kmalloc. Return a PMAP.
(__stp_map_del): New function. Free all the nodes in a map.
(_stp_map_del): Call __stp_map_del() then free map struct.
(_stp_pmap_del): Takes a PMAP. Calls __stp_map_del() for
each cpu.
(_stp_pmap_printn_cpu): Change arg to PMAP.
(_stp_pmap_agg): Change arg to PMAP.
(_stp_pmap_get_agg): Change arg to PMAP.
* map-stat.c (_stp_pmap_new_hstat_linear): Use PMAP
instead of MAP. Fix allocations.
(_stp_pmap_new_hstat_log): Ditto.
* pmap-gen.c Fix all functions to take or return PMAPS
instead of MAPS.
* alloc.c: Remove everything except kmalloc_node().
All runtime code now uses kmalloc() directly.
-rw-r--r-- | runtime/ChangeLog | 28 | ||||
-rw-r--r-- | runtime/alloc.c | 187 | ||||
-rw-r--r-- | runtime/map-stat.c | 26 | ||||
-rw-r--r-- | runtime/map.c | 148 | ||||
-rw-r--r-- | runtime/map.h | 21 | ||||
-rw-r--r-- | runtime/pmap-gen.c | 50 |
6 files changed, 165 insertions, 295 deletions
diff --git a/runtime/ChangeLog b/runtime/ChangeLog index e647f0b8..1e0ae778 100644 --- a/runtime/ChangeLog +++ b/runtime/ChangeLog @@ -1,3 +1,31 @@ +2005-12-07 Martin Hunt <hunt@redhat.com> + PR1923 + * map.h (struct map_root): Remove membuf. + (struct pmap): Define. + (PMAP): Declare. + + * map.c (_stp_map_init): Use kmalloc() to allocate individual + nodes instead of using vmalloc() to allocate one big chunk. + (_stp_map_new): Use kmalloc. + (_stp_pmap_new): Use kmalloc. Return a PMAP. + (__stp_map_del): New function. Free all the nodes in a map. + (_stp_map_del): Call __stp_map_del() then free map struct. + (_stp_pmap_del): Takes a PMAP. Calls __stp_map_del() for + each cpu. + (_stp_pmap_printn_cpu): Change arg to PMAP. + (_stp_pmap_agg): Change arg to PMAP. + (_stp_pmap_get_agg): Change arg to PMAP. + + * map-stat.c (_stp_pmap_new_hstat_linear): Use PMAP + instead of MAP. Fix allocations. + (_stp_pmap_new_hstat_log): Ditto. + + * pmap-gen.c Fix all functions to take or return PMAPS + instead of MAPS. + + * alloc.c: Remove everything except kmalloc_node(). + All runtime code now uses kmalloc() directly. + 2005-11-30 Martin Hunt <hunt@redhat.com> * io.c (_stp_exit): To prevent any possible interactions diff --git a/runtime/alloc.c b/runtime/alloc.c index 9f72a65b..d52c6080 100644 --- a/runtime/alloc.c +++ b/runtime/alloc.c @@ -11,196 +11,11 @@ #ifndef _ALLOC_C_ #define _ALLOC_C_ -/** @file alloc.c - * @brief Memory functions. - */ -/** @addtogroup alloc Memory Functions - * Basic malloc/calloc/free functions. These will be changed so - * that memory allocation errors will call a handler. The default will - * send a signal to the user-space daemon that will trigger the module to - * be unloaded. - * @{ - */ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) -/** - * vmalloc_node - allocate virtually contiguous memory - * - * @size: allocation size - * @node: preferred node - * - * This vmalloc variant try to allocate memory from a preferred node. - * This code is from Eric Dumazet, posted to the LKML. - * FIXME: The version in the mm kernel is different. Should probably - * switch if is is easily backported. - */ -#ifdef CONFIG_NUMA -/* Until we get something working */ -#define vmalloc_node(size,node) vmalloc(size) -#else -#define vmalloc_node(size,node) vmalloc(size) -#endif /* CONFIG_NUMA */ -#endif /* LINUX_VERSION_CODE */ +/* does this file really need to exist? */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) #define kmalloc_node(size,flags,node) kmalloc(size,flags) #endif /* LINUX_VERSION_CODE */ -/** Allocates memory within a probe. - * This is used for small allocations from within a running - * probe where the process cannot sleep. - * @param len Number of bytes to allocate. - * @return a valid pointer on success or NULL on failure. - * @note Not currently used by the runtime. Deprecate? - */ - -void *_stp_alloc(size_t len) -{ - void *ptr = kmalloc(len, GFP_ATOMIC); - if (unlikely(ptr == NULL)) - _stp_error("_stp_alloc failed.\n"); - return ptr; -} - -void *_stp_alloc_cpu(size_t len, int cpu) -{ - void *ptr = kmalloc_node(len, GFP_ATOMIC, cpu_to_node(cpu)); - if (unlikely(ptr == NULL)) - _stp_error("_stp_alloc failed.\n"); - return ptr; -} - -/** Allocates and clears memory within a probe. - * This is used for small allocations from within a running - * probe where the process cannot sleep. - * @param len Number of bytes to allocate. - * @return a valid pointer on success or NULL on failure. - * @note Not currently used by the runtime. Deprecate? - */ - -void *_stp_calloc(size_t len) -{ - void *ptr = _stp_alloc(len); - if (likely(ptr)) - memset(ptr, 0, len); - return ptr; -} - -/** Allocates and clears memory outside a probe. - * This is typically used in the module initialization to - * allocate new maps, lists, etc. - * @param len Number of bytes to allocate. - * @return a valid pointer on success or NULL on failure. - */ - -void *_stp_valloc(size_t len) -{ - void *ptr = vmalloc(len); - if (likely(ptr)) - memset(ptr, 0, len); - else - _stp_error("_stp_valloc failed.\n"); - return ptr; -} - -void *_stp_valloc_cpu(size_t len, int cpu) -{ - void *ptr = vmalloc_node(len, cpu_to_node(cpu)); - if (likely(ptr)) - memset(ptr, 0, len); - else - _stp_error("_stp_valloc failed.\n"); - return ptr; -} - -struct _stp_percpu_data { - void *ptrs[NR_CPUS]; - void *data; -}; - -/** - * __stp_valloc_percpu - allocate one copy of the object for every present - * cpu in the system, using vmalloc and zeroing them. - * Objects should be dereferenced using the per_cpu_ptr macro only. - * - * @size: how many bytes of memory are required. - * @align: the alignment, which can't be greater than SMP_CACHE_BYTES. - */ -static void *__stp_valloc_percpu(size_t size, size_t align) -{ - int i; - struct _stp_percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); - - if (!pdata) - return NULL; - - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - pdata->ptrs[i] = vmalloc_node(size, cpu_to_node(i)); - - if (!pdata->ptrs[i]) - goto unwind_oom; - memset(pdata->ptrs[i], 0, size); - } - - /* Catch derefs w/o wrappers */ - return (void *) (~(unsigned long) pdata); - -unwind_oom: - while (--i >= 0) { - if (!cpu_possible(i)) - continue; - vfree(pdata->ptrs[i]); - } - kfree(pdata); - return NULL; -} - -void _stp_vfree_percpu(const void *objp) -{ - int i; - struct _stp_percpu_data *p = (struct _stp_percpu_data *) (~(unsigned long) objp); - - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - vfree(p->ptrs[i]); - } - kfree(p); -} - -#define _stp_valloc_percpu(type) \ - ((type *)(__stp_valloc_percpu(sizeof(type), __alignof__(type)))) - -#define _stp_percpu_dptr(ptr) (((struct _stp_percpu_data *)~(unsigned long)(ptr))->data) - -#define _stp_per_cpu_ptr(ptr, cpu) \ -({ \ - struct _stp_percpu_data *__p = (struct _stp_percpu_data *)~(unsigned long)(ptr); \ - (__typeof__(ptr))__p->ptrs[(cpu)]; \ -}) - -/** Frees memory allocated by _stp_alloc or _stp_calloc. - * @param ptr pointer to memory to free - * @note Not currently used by the runtime. Deprecate? - */ - -void _stp_free(void *ptr) -{ - if (likely(ptr)) - kfree(ptr); -} - -/** Frees memory allocated by _stp_valloc. - * @param ptr pointer to memory to free - */ - -void _stp_vfree(void *ptr) -{ - if (likely(ptr)) - vfree(ptr); -} -/** @} */ #endif /* _ALLOC_C_ */ diff --git a/runtime/map-stat.c b/runtime/map-stat.c index c9ba4b02..6634a2b7 100644 --- a/runtime/map-stat.c +++ b/runtime/map-stat.c @@ -64,9 +64,9 @@ static MAP _stp_map_new_hstat_linear (unsigned max_entries, int ksize, int start return m; } -static MAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int start, int stop, int interval) +static PMAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int start, int stop, int interval) { - MAP map; + PMAP pmap; int size; int buckets = (stop - start) / interval; if ((stop - start) % interval) buckets++; @@ -74,12 +74,12 @@ static MAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int star /* add size for buckets */ size = buckets * sizeof(int64_t) + sizeof(stat); - map = _stp_pmap_new (max_entries, STAT, ksize, size); - if (map) { + pmap = _stp_pmap_new (max_entries, STAT, ksize, size); + if (pmap) { int i; MAP m; for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); + m = (MAP)per_cpu_ptr (pmap->map, i); m->hist.type = HIST_LINEAR; m->hist.start = start; m->hist.stop = stop; @@ -87,33 +87,33 @@ static MAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int star m->hist.buckets = buckets; } /* now set agg map params */ - m = _stp_percpu_dptr(map); + m = &pmap->agg; m->hist.type = HIST_LINEAR; m->hist.start = start; m->hist.stop = stop; m->hist.interval = interval; m->hist.buckets = buckets; } - return map; + return pmap; } -static MAP _stp_pmap_new_hstat_log (unsigned max_entries, int key_size, int buckets) +static PMAP _stp_pmap_new_hstat_log (unsigned max_entries, int key_size, int buckets) { /* add size for buckets */ int size = buckets * sizeof(int64_t) + sizeof(stat); - MAP map = _stp_pmap_new (max_entries, STAT, key_size, size); - if (map) { + PMAP pmap = _stp_pmap_new (max_entries, STAT, key_size, size); + if (pmap) { int i; MAP m; for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); + m = (MAP)per_cpu_ptr (pmap->map, i); m->hist.type = HIST_LOG; m->hist.buckets = buckets; } /* now set agg map params */ - m = _stp_percpu_dptr(map); + m = &pmap->agg; m->hist.type = HIST_LOG; m->hist.buckets = buckets; } - return map; + return pmap; } diff --git a/runtime/map.c b/runtime/map.c index 556cd85f..fa4150fe 100644 --- a/runtime/map.c +++ b/runtime/map.c @@ -180,7 +180,6 @@ static int _stp_map_init(MAP m, unsigned max_entries, int type, int key_size, in { int size; - INIT_LIST_HEAD(&m->head); m->maxnum = max_entries; m->type = type; if (type >= END) { @@ -190,9 +189,6 @@ static int _stp_map_init(MAP m, unsigned max_entries, int type, int key_size, in if (max_entries) { void *tmp; int i; - struct list_head *e; - - INIT_LIST_HEAD(&m->pool); /* size is the size of the map_node. */ /* add space for the value. */ @@ -203,78 +199,89 @@ static int _stp_map_init(MAP m, unsigned max_entries, int type, int key_size, in data_size = ALIGN(data_size,4); size = key_size + data_size; - if (cpu < 0) - tmp = _stp_valloc(max_entries * size); - else - tmp = _stp_valloc_cpu(max_entries * size, cpu); - if (!tmp) { - _stp_error("Allocating memory while creating map failed.\n"); - return -1; - } - - for (i = max_entries - 1; i >= 0; i--) { - e = i * size + tmp; - dbug ("e=%lx\n", (long)e); - list_add(e, &m->pool); - ((struct map_node *)e)->map = m; + for (i = 0; i < max_entries; i++) { + if (cpu < 0) + tmp = kmalloc(size, GFP_KERNEL); + else + tmp = kmalloc_node(size, GFP_KERNEL, cpu); + + if (!tmp) { + _stp_error("Allocating memory while creating map failed.\n"); + return -1; + } + + dbug ("allocated %lx\n", (long)tmp); + list_add((struct list_head *)tmp, &m->pool); + ((struct map_node *)tmp)->map = m; } - m->membuf = tmp; } if (type == STAT) m->hist.type = HIST_NONE; return 0; -} + } static MAP _stp_map_new(unsigned max_entries, int type, int key_size, int data_size) { - MAP m = (MAP) _stp_valloc(sizeof(struct map_root)); + MAP m = (MAP) kmalloc(sizeof(struct map_root), GFP_KERNEL); if (m == NULL) return NULL; + + memset (m, 0, sizeof(struct map_root)); + INIT_LIST_HEAD(&m->pool); + INIT_LIST_HEAD(&m->head); if (_stp_map_init(m, max_entries, type, key_size, data_size, -1)) { - _stp_vfree(m); + _stp_map_del(m); return NULL; } return m; } -static MAP _stp_pmap_new(unsigned max_entries, int type, int key_size, int data_size) +static PMAP _stp_pmap_new(unsigned max_entries, int type, int key_size, int data_size) { - int i, failed; + int i; MAP map, m; - map = (MAP) _stp_valloc_percpu (struct map_root); - if (map == NULL) + PMAP pmap = (PMAP) kmalloc(sizeof(struct pmap), GFP_KERNEL); + if (pmap == NULL) return NULL; + memset (pmap, 0, sizeof(struct pmap)); + pmap->map = map = (MAP) alloc_percpu (struct map_root); + if (map == NULL) + goto err; + + /* initialize the memory lists first so if allocations fail */ + /* at some point, it is easy to clean up. */ + for_each_cpu(i) { + m = per_cpu_ptr (map, i); + INIT_LIST_HEAD(&m->pool); + INIT_LIST_HEAD(&m->head); + } + INIT_LIST_HEAD(&pmap->agg.pool); + INIT_LIST_HEAD(&pmap->agg.head); + for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); + m = per_cpu_ptr (map, i); if (_stp_map_init(m, max_entries, type, key_size, data_size, i)) { - failed = i; - goto err; + goto err1; } } - /* now create a copy of the map data for aggregation */ - failed = i + 1; - m = (MAP) _stp_valloc(sizeof(struct map_root)); - if (m == NULL) - goto err; - _stp_percpu_dptr(map) = m; - if (_stp_map_init(m, max_entries, type, key_size, data_size, -1)) + if (_stp_map_init(&pmap->agg, max_entries, type, key_size, data_size, -1)) goto err1; - return map; + return pmap; + err1: - _stp_vfree(m); -err: for_each_cpu(i) { - if (i >= failed) - break; - _stp_vfree(m->membuf); + m = per_cpu_ptr (map, i); + __stp_map_del(m); } - _stp_vfree_percpu(map); + free_percpu(map); +err: + kfree(pmap); return NULL; } @@ -348,6 +355,24 @@ void _stp_map_clear(MAP map) } } + +static void __stp_map_del(MAP map) +{ + struct list_head *p, *tmp; + + /* free unused pool */ + list_for_each_safe(p, tmp, &map->pool) { + list_del(p); + kfree(p); + } + + /* free used list */ + list_for_each_safe(p, tmp, &map->head) { + list_del(p); + kfree(p); + } +} + /** Deletes a map. * Deletes a map, freeing all memory in all elements. Normally done only when the module exits. * @param map @@ -357,28 +382,29 @@ void _stp_map_del(MAP map) { if (map == NULL) return; - _stp_vfree(map->membuf); - _stp_vfree(map); + + __stp_map_del(map); + + kfree(map); } -void _stp_pmap_del(MAP map) +void _stp_pmap_del(PMAP pmap) { int i; - MAP m; - if (map == NULL) + if (pmap == NULL) return; for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); - _stp_vfree(m->membuf); + MAP m = per_cpu_ptr (pmap->map, i); + __stp_map_del(m); } + free_percpu(pmap->map); - m = _stp_percpu_dptr(map); - _stp_vfree(m->membuf); - _stp_vfree(m); - - _stp_vfree_percpu(map); + /* free agg map elements */ + __stp_map_del(&pmap->agg); + + kfree(pmap); } /* sort keynum values */ @@ -709,9 +735,9 @@ void _stp_map_printn (MAP map, int n, const char *fmt) */ #define _stp_map_print(map,fmt) _stp_map_printn(map,0,fmt) -void _stp_pmap_printn_cpu (MAP map, int n, const char *fmt, int cpu) +void _stp_pmap_printn_cpu (PMAP pmap, int n, const char *fmt, int cpu) { - MAP m = _stp_per_cpu_ptr (map, cpu); + MAP m = per_cpu_ptr (pmap->map, cpu); _stp_map_printn (m, n, fmt); } @@ -811,7 +837,7 @@ static void _stp_add_agg(struct map_node *aptr, struct map_node *ptr) * @param map A pointer to a pmap. * @returns a pointer to an aggregated map. */ -MAP _stp_pmap_agg (MAP map) +MAP _stp_pmap_agg (PMAP pmap) { int i, hash; MAP m, agg; @@ -819,14 +845,14 @@ MAP _stp_pmap_agg (MAP map) struct hlist_head *head, *ahead; struct hlist_node *e, *f; - agg = _stp_percpu_dptr(map); + agg = &pmap->agg; /* FIXME. we either clear the aggregation map or clear each local map */ /* every time we aggregate. which would be best? */ _stp_map_clear (agg); for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); + m = per_cpu_ptr (pmap->map, i); /* walk the hash chains. */ for (hash = 0; hash < HASH_TABLE_SIZE; hash++) { head = &m->hashes[hash]; @@ -858,7 +884,7 @@ MAP _stp_pmap_agg (MAP map) * @returns a pointer to an aggregated map. * @sa _stp_pmap_agg() */ -#define _stp_pmap_get_agg(map) (_stp_percpu_dptr(map)) +#define _stp_pmap_get_agg(pmap) (&pmap->agg) #define _stp_pmap_printn(map,n,fmt) _stp_map_printn (_stp_pmap_agg(map), n, fmt) #define _stp_pmap_print(map,fmt) _stp_map_printn(_stp_pmap_agg(map),0,fmt) diff --git a/runtime/map.h b/runtime/map.h index f414f73b..cc470e19 100644 --- a/runtime/map.h +++ b/runtime/map.h @@ -105,9 +105,6 @@ struct map_root { /* the hash table for this array */ struct hlist_head hashes[HASH_TABLE_SIZE]; - /* pointer to allocated memory space. Used for freeing memory. */ - void *membuf; - /* used if this map's nodes contain stats */ struct _Hist hist; }; @@ -115,6 +112,12 @@ struct map_root { /** All maps are of this type. */ typedef struct map_root *MAP; +struct pmap { + MAP map; /* per-cpu maps */ + struct map_root agg; /* aggregation map */ +}; +typedef struct pmap *PMAP; + /** Extracts string from key1 union */ #define key1str(ptr) (_stp_key_get_str(ptr,1)) /** Extracts string from key2 union */ @@ -161,7 +164,7 @@ char * _stp_get_str(struct map_node *m); stat *_stp_get_stat(struct map_node *m); unsigned int str_hash(const char *key1); static MAP _stp_map_new(unsigned max_entries, int type, int key_size, int data_size); -static MAP _stp_pmap_new(unsigned max_entries, int type, int key_size, int data_size); +static PMAP _stp_pmap_new(unsigned max_entries, int type, int key_size, int data_size); static int msb64(int64_t x); static MAP _stp_map_new_hstat_log(unsigned max_entries, int key_size, int buckets); static MAP _stp_map_new_hstat_linear(unsigned max_entries, int ksize, int start, int stop, int interval); @@ -174,16 +177,14 @@ void _stp_map_print(MAP map, const char *fmt); static struct map_node *_new_map_create (MAP map, struct hlist_head *head); static int _new_map_set_int64 (MAP map, struct map_node *n, int64_t val, int add); -static int64_t _new_map_get_int64 (MAP map, struct map_node *n); -static char *_new_map_get_str (MAP map, struct map_node *n); static int _new_map_set_str (MAP map, struct map_node *n, char *val, int add); -static stat *_new_map_get_stat (MAP map, struct map_node *n); -static int _new_map_set_stat (MAP map, struct map_node *n, int64_t val, int add); static void _new_map_clear_node (struct map_node *); static void _new_map_del_node (MAP map, struct map_node *n); -static MAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int start, int stop, int interval); -static MAP _stp_pmap_new_hstat_log (unsigned max_entries, int key_size, int buckets); +static PMAP _stp_pmap_new_hstat_linear (unsigned max_entries, int ksize, int start, int stop, int interval); +static PMAP _stp_pmap_new_hstat_log (unsigned max_entries, int key_size, int buckets); static void _stp_add_agg(struct map_node *aptr, struct map_node *ptr); static struct map_node *_stp_new_agg(MAP agg, struct hlist_head *ahead, struct map_node *ptr); +static void __stp_map_del(MAP map); +static int _new_map_set_stat (MAP map, struct map_node *n, int64_t val, int add); /** @endcond */ #endif /* _MAP_H_ */ diff --git a/runtime/pmap-gen.c b/runtime/pmap-gen.c index a57806d4..49d4f0f4 100644 --- a/runtime/pmap-gen.c +++ b/runtime/pmap-gen.c @@ -400,33 +400,33 @@ static unsigned int KEYSYM(phash) (ALLKEYSD(key)) #if VALUE_TYPE == INT64 || VALUE_TYPE == STRING -MAP KEYSYM(_stp_pmap_new) (unsigned max_entries) +PMAP KEYSYM(_stp_pmap_new) (unsigned max_entries) { - MAP map = _stp_pmap_new (max_entries, VALUE_TYPE, sizeof(struct KEYSYM(pmap_node)), 0); - if (map) { + PMAP pmap = _stp_pmap_new (max_entries, VALUE_TYPE, sizeof(struct KEYSYM(pmap_node)), 0); + if (pmap) { int i; MAP m; for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); + m = (MAP)per_cpu_ptr (pmap->map, i); m->get_key = KEYSYM(pmap_get_key); m->copy = KEYSYM(pmap_copy_keys); m->cmp = KEYSYM(pmap_key_cmp); } - m = _stp_percpu_dptr(map); + m = &pmap->agg; m->get_key = KEYSYM(pmap_get_key); m->copy = KEYSYM(pmap_copy_keys); m->cmp = KEYSYM(pmap_key_cmp); } - return map; + return pmap; } #else /* _stp_pmap_new_key1_key2...val (num, HIST_LINEAR, start, end, interval) */ /* _stp_pmap_new_key1_key2...val (num, HIST_LOG, buckets) */ -MAP KEYSYM(_stp_pmap_new) (unsigned max_entries, int htype, ...) +PMAP KEYSYM(_stp_pmap_new) (unsigned max_entries, int htype, ...) { int buckets=0, start=0, stop=0, interval=0; - MAP map; + PMAP pmap; va_list ap; if (htype != HIST_NONE) { @@ -445,36 +445,36 @@ MAP KEYSYM(_stp_pmap_new) (unsigned max_entries, int htype, ...) switch (htype) { case HIST_NONE: - map = _stp_pmap_new (max_entries, STAT, sizeof(struct KEYSYM(pmap_node)), 0); + pmap = _stp_pmap_new (max_entries, STAT, sizeof(struct KEYSYM(pmap_node)), 0); break; case HIST_LOG: - map = _stp_pmap_new_hstat_log (max_entries, sizeof(struct KEYSYM(pmap_node)), + pmap = _stp_pmap_new_hstat_log (max_entries, sizeof(struct KEYSYM(pmap_node)), buckets); break; case HIST_LINEAR: - map = _stp_pmap_new_hstat_linear (max_entries, sizeof(struct KEYSYM(pmap_node)), + pmap = _stp_pmap_new_hstat_linear (max_entries, sizeof(struct KEYSYM(pmap_node)), start, stop, interval); break; default: _stp_warn ("Unknown histogram type %d\n", htype); - map = NULL; + pmap = NULL; } - if (map) { + if (pmap) { int i; MAP m; for_each_cpu(i) { - m = _stp_per_cpu_ptr (map, i); + m = per_cpu_ptr (pmap->map, i); m->get_key = KEYSYM(pmap_get_key); m->copy = KEYSYM(pmap_copy_keys); m->cmp = KEYSYM(pmap_key_cmp); } - m = _stp_percpu_dptr(map); + m = &pmap->agg; m->get_key = KEYSYM(pmap_get_key); m->copy = KEYSYM(pmap_copy_keys); m->cmp = KEYSYM(pmap_key_cmp); } - return map; + return pmap; } #endif /* VALUE_TYPE */ @@ -532,24 +532,24 @@ int KEYSYM(__stp_pmap_set) (MAP map, ALLKEYSD(key), VSTYPE val, int add) return MAP_SET_VAL(map,(struct map_node *)n, val, 0); } -int KEYSYM(_stp_pmap_set) (MAP map, ALLKEYSD(key), VSTYPE val) +int KEYSYM(_stp_pmap_set) (PMAP pmap, ALLKEYSD(key), VSTYPE val) { - MAP m = _stp_per_cpu_ptr (map, get_cpu()); + MAP m = per_cpu_ptr (pmap->map, get_cpu()); int res = KEYSYM(__stp_pmap_set) (m, ALLKEYS(key), val, 0); put_cpu(); return res; } -int KEYSYM(_stp_pmap_add) (MAP map, ALLKEYSD(key), VSTYPE val) +int KEYSYM(_stp_pmap_add) (PMAP pmap, ALLKEYSD(key), VSTYPE val) { - MAP m = _stp_per_cpu_ptr (map, get_cpu()); + MAP m = per_cpu_ptr (pmap->map, get_cpu()); int res = KEYSYM(__stp_pmap_set) (m, ALLKEYS(key), val, 1); put_cpu(); return res; } -VALTYPE KEYSYM(_stp_pmap_get_cpu) (MAP pmap, ALLKEYSD(key)) +VALTYPE KEYSYM(_stp_pmap_get_cpu) (PMAP pmap, ALLKEYSD(key)) { unsigned int hv; struct hlist_head *head; @@ -561,7 +561,7 @@ VALTYPE KEYSYM(_stp_pmap_get_cpu) (MAP pmap, ALLKEYSD(key)) if (pmap == NULL) return (VALTYPE)0; - map = _stp_per_cpu_ptr (pmap, get_cpu()); + map = per_cpu_ptr (pmap->map, get_cpu()); hv = KEYSYM(phash) (ALLKEYS(key)); head = &map->hashes[hv]; @@ -597,7 +597,7 @@ VALTYPE KEYSYM(_stp_pmap_get_cpu) (MAP pmap, ALLKEYSD(key)) #endif } -VALTYPE KEYSYM(_stp_pmap_get) (MAP pmap, ALLKEYSD(key)) +VALTYPE KEYSYM(_stp_pmap_get) (PMAP pmap, ALLKEYSD(key)) { unsigned int hv; int cpu; @@ -613,7 +613,7 @@ VALTYPE KEYSYM(_stp_pmap_get) (MAP pmap, ALLKEYSD(key)) hv = KEYSYM(phash) (ALLKEYS(key)); /* first look it up in the aggregation map */ - agg = _stp_percpu_dptr(pmap); + agg = &pmap->agg; ahead = &agg->hashes[hv]; hlist_for_each(e, ahead) { n = (struct KEYSYM(pmap_node) *)((long)e - sizeof(struct list_head)); @@ -639,7 +639,7 @@ VALTYPE KEYSYM(_stp_pmap_get) (MAP pmap, ALLKEYSD(key)) /* now total each cpu */ for_each_cpu(cpu) { - map = _stp_per_cpu_ptr (pmap, cpu); + map = per_cpu_ptr (pmap->map, cpu); head = &map->hashes[hv]; hlist_for_each(e, head) { n = (struct KEYSYM(pmap_node) *)((long)e - sizeof(struct list_head)); |