diff options
author | hunt <hunt> | 2005-11-08 19:48:23 +0000 |
---|---|---|
committer | hunt <hunt> | 2005-11-08 19:48:23 +0000 |
commit | 7ba70af105ed918b5b07e5bba8b5d27a8d911249 (patch) | |
tree | b33bcea26aa98488bfa987a62764837ae0c39bad /runtime/alloc.c | |
parent | 4893e4b2858af12d916c3915a97336cdb0c8236b (diff) | |
download | systemtap-steved-7ba70af105ed918b5b07e5bba8b5d27a8d911249.tar.gz systemtap-steved-7ba70af105ed918b5b07e5bba8b5d27a8d911249.tar.xz systemtap-steved-7ba70af105ed918b5b07e5bba8b5d27a8d911249.zip |
2005-11-08 Martin Hunt <hunt@redhat.com>
* map.c (_stp_map_init): New function. Extracted from _stp_map_new()
so it can be used in _stp_pmap_new().
(_stp_map_new): Call _stp_map_init().
(_stp_pmap_new): New function.
(_stp_pmap_new_hstat_linear): New function.
(_stp_pmap_new_hstat_log): New function.
(_stp_pmap_del): New function.
(_stp_pmap_printn_cpu): New function.
(_stp_pmap_printn): New function.
(_stp_new_agg): New function.
(_stp_add_agg): New function.
(_stp_pmap_agg): New function.
(_new_map_clear_node): New function.
* map.h (struct map_root): Add Hist struct. Add copy
and cmp function pointers for pmaps.
* stat.h: Remove Stat struct. Replace with Hist struct
that is limited to only histogram params.
* map-stat.c: Fix up references to histogram params in map_root.
* stat-common.c: Ditto.
* stat.c: Ditto.
* pmap-gen.c: New file. Implements per-cpu maps.
* map-gen.c: Minor bug fixes. Use new VAL_IS_ZERO() macro.
* alloc.c (vmalloc_node): For NUMA, provide a vmalloc that
does node-local allocations.
(_stp_alloc_cpu): A version of _stp_alloc() that does
node-local allocations.
(_stp_valloc): A version of _stp_valloc() that does
node-local allocations.
(__stp_valloc_percpu): New function. Like alloc_percpu()
except uses _stp_valloc().
(_stp_vfree_percpu): New function. Like free_percpu().
Diffstat (limited to 'runtime/alloc.c')
-rw-r--r-- | runtime/alloc.c | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/runtime/alloc.c b/runtime/alloc.c index 5e5bb11a..c2c3d010 100644 --- a/runtime/alloc.c +++ b/runtime/alloc.c @@ -22,6 +22,49 @@ * @{ */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) +/** + * vmalloc_node - allocate virtually contiguous memory + * + * @size: allocation size + * @node: preferred node + * + * This vmalloc variant try to allocate memory from a preferred node. + * This code is from Eric Dumazet, posted to the LKML. + * FIXME: The version in the mm kernel is different. Should probably + * switch if is is easily backported. + */ +#ifdef CONFIG_NUMA +void *vmalloc_node(unsigned long size, int node) +{ + void *result; + struct mempolicy *oldpol = current->mempolicy; + mm_segment_t oldfs = get_fs(); + DECLARE_BITMAP(prefnode, MAX_NUMNODES); + + mpol_get(oldpol); + bitmap_zero(prefnode, MAX_NUMNODES); + set_bit(node, prefnode); + + set_fs(KERNEL_DS); + sys_set_mempolicy(MPOL_PREFERRED, prefnode, MAX_NUMNODES); + set_fs(oldfs); + + result = vmalloc(size); + + mpol_free(current->mempolicy); + current->mempolicy = oldpol; + return result; +} +#else +#define vmalloc_node(size,node) vmalloc(size) +#endif /* CONFIG_NUMA */ +#endif /* LINUX_VERSION_CODE */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) +#define kmalloc_node(size,flags,node) kmalloc(size,flags) +#endif /* LINUX_VERSION_CODE */ + /** Allocates memory within a probe. * This is used for small allocations from within a running * probe where the process cannot sleep. @@ -38,6 +81,14 @@ void *_stp_alloc(size_t len) return ptr; } +void *_stp_alloc_cpu(size_t len, int cpu) +{ + void *ptr = kmalloc_node(len, GFP_ATOMIC, cpu_to_node(cpu)); + if (unlikely(ptr == NULL)) + _stp_error("_stp_alloc failed.\n"); + return ptr; +} + /** Allocates and clears memory within a probe. * This is used for small allocations from within a running * probe where the process cannot sleep. @@ -71,6 +122,93 @@ void *_stp_valloc(size_t len) return ptr; } +void *_stp_valloc_cpu(size_t len, int cpu) +{ + void *ptr = vmalloc_node(len, cpu_to_node(cpu)); + if (likely(ptr)) + memset(ptr, 0, len); + else + _stp_error("_stp_valloc failed.\n"); + return ptr; +} + +struct percpu_data { + void *ptrs[NR_CPUS]; + void *blkp; +}; + +#ifdef CONFIG_SMP +/** + * __stp_valloc_percpu - allocate one copy of the object for every present + * cpu in the system, using vmalloc and zeroing them. + * Objects should be dereferenced using the per_cpu_ptr macro only. + * + * @size: how many bytes of memory are required. + * @align: the alignment, which can't be greater than SMP_CACHE_BYTES. + */ +static void *__stp_valloc_percpu(size_t size, size_t align) +{ + int i; + struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); + + if (!pdata) + return NULL; + + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_possible(i)) + continue; + pdata->ptrs[i] = vmalloc_node(size, GFP_KERNEL, + cpu_to_node(i)); + + if (!pdata->ptrs[i]) + goto unwind_oom; + memset(pdata->ptrs[i], 0, size); + } + + /* Catch derefs w/o wrappers */ + return (void *) (~(unsigned long) pdata); + +unwind_oom: + while (--i >= 0) { + if (!cpu_possible(i)) + continue; + vfree(pdata->ptrs[i]); + } + kfree(pdata); + return NULL; +} + +void _stp_vfree_percpu(const void *objp) +{ + int i; + struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); + + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_possible(i)) + continue; + vfree(p->ptrs[i]); + } + kfree(p); +} +#else +static inline void *__stp_valloc_percpu(size_t size, size_t align) +{ + void *ret = kmalloc(size, GFP_KERNEL); + if (ret) + memset(ret, 0, size); + return ret; +} +void _stp_vfree_percpu(const void *ptr) +{ + kfree(ptr); +} +#endif + +#define _stp_valloc_percpu(type) \ + ((type *)(__stp_valloc_percpu(sizeof(type), __alignof__(type)))) + +#define _stp_percpu_dptr(ptr) (((struct percpu_data *)~(unsigned long)(ptr))->blkp) + /** Frees memory allocated by _stp_alloc or _stp_calloc. * @param ptr pointer to memory to free * @note Not currently used by the runtime. Deprecate? |