diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 35 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 11 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/bcd.c | 14 | ||||
-rw-r--r-- | lib/bitmap.c | 11 | ||||
-rw-r--r-- | lib/cmdline.c | 16 | ||||
-rw-r--r-- | lib/debug_locks.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 50 | ||||
-rw-r--r-- | lib/idr.c | 142 | ||||
-rw-r--r-- | lib/inflate.c | 52 | ||||
-rw-r--r-- | lib/iomap.c | 3 | ||||
-rw-r--r-- | lib/kobject.c | 12 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/list_debug.c | 50 | ||||
-rw-r--r-- | lib/lmb.c | 2 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress.c | 6 | ||||
-rw-r--r-- | lib/plist.c | 13 | ||||
-rw-r--r-- | lib/radix-tree.c | 180 | ||||
-rw-r--r-- | lib/random32.c | 48 | ||||
-rw-r--r-- | lib/ratelimit.c | 56 | ||||
-rw-r--r-- | lib/scatterlist.c | 4 | ||||
-rw-r--r-- | lib/show_mem.c | 63 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 5 | ||||
-rw-r--r-- | lib/swiotlb.c | 6 | ||||
-rw-r--r-- | lib/syscall.c | 75 | ||||
-rw-r--r-- | lib/vsprintf.c | 13 |
26 files changed, 632 insertions, 249 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 882c5104899..0b504814e37 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -394,7 +394,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER if !X86 && !MIPS + select FRAME_POINTER if !X86 && !MIPS && !PPC select KALLSYMS select KALLSYMS_ALL @@ -505,6 +505,18 @@ config DEBUG_WRITECOUNT If unsure, say N. +config DEBUG_MEMORY_INIT + bool "Debug memory initialisation" if EMBEDDED + default !EMBEDDED + help + Enable this for additional checks during memory initialisation. + The sanity checks verify aspects of the VM such as the memory model + and other information provided by the architecture. Verbose + information will be printed at KERN_DEBUG loglevel depending + on the mminit_loglevel= command-line option. + + If unsure, say Y + config DEBUG_LIST bool "Debug linked list manipulation" depends on DEBUG_KERNEL @@ -664,13 +676,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER + select FRAME_POINTER if !PPC help Provide stacktrace filter for fault-injection capabilities config LATENCYTOP bool "Latency measuring infrastructure" - select FRAME_POINTER if !MIPS + select FRAME_POINTER if !MIPS && !PPC select KALLSYMS select KALLSYMS_ALL select STACKTRACE @@ -681,6 +693,14 @@ config LATENCYTOP Enable this option if you want to use the LatencyTOP tool to find out which userspace is blocking on what kernel operations. +config SYSCTL_SYSCALL_CHECK + bool "Sysctl checks" + depends on SYSCTL_SYSCALL + ---help--- + sys_sysctl uses binary paths that have been found challenging + to properly maintain and use. This enables checks that help + you to keep things correct. + source kernel/trace/Kconfig config PROVIDE_OHCI1394_DMA_INIT @@ -723,6 +743,15 @@ config FIREWIRE_OHCI_REMOTE_DMA If unsure, say N. +menuconfig BUILD_DOCSRC + bool "Build targets in Documentation/ tree" + depends on HEADERS_CHECK + help + This option attempts to build objects from the source files in the + kernel Documentation/ tree. + + Say N if you are unsure. + source "samples/Kconfig" source "lib/Kconfig.kgdb" diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 2cfd2721f7e..9b5d1d7f2ef 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -4,14 +4,17 @@ config HAVE_ARCH_KGDB menuconfig KGDB bool "KGDB: kernel debugging with remote gdb" - select FRAME_POINTER depends on HAVE_ARCH_KGDB depends on DEBUG_KERNEL && EXPERIMENTAL help If you say Y here, it will be possible to remotely debug the - kernel using gdb. Documentation of kernel debugger is available - at http://kgdb.sourceforge.net as well as in DocBook form - in Documentation/DocBook/. If unsure, say N. + kernel using gdb. It is recommended but not required, that + you also turn on the kernel config option + CONFIG_FRAME_POINTER to aid in producing more reliable stack + backtraces in the external debugger. Documentation of + kernel debugger is available at http://kgdb.sourceforge.net + as well as in DocBook form in Documentation/DocBook/. If + unsure, say N. if KGDB diff --git a/lib/Makefile b/lib/Makefile index 818c4d45551..3b1f94bbe9d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,14 +11,14 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o + proportions.o prio_heap.o ratelimit.o show_mem.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o klist.o -obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ +obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) @@ -78,6 +78,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_LMB) += lmb.o +obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/bcd.c b/lib/bcd.c new file mode 100644 index 00000000000..d74257fd0fe --- /dev/null +++ b/lib/bcd.c @@ -0,0 +1,14 @@ +#include <linux/bcd.h> +#include <linux/module.h> + +unsigned bcd2bin(unsigned char val) +{ + return (val & 0x0f) + (val >> 4) * 10; +} +EXPORT_SYMBOL(bcd2bin); + +unsigned char bin2bcd(unsigned val) +{ + return ((val / 10) << 4) + val % 10; +} +EXPORT_SYMBOL(bin2bcd); diff --git a/lib/bitmap.c b/lib/bitmap.c index 482df94ea21..06fb57c86de 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -316,6 +316,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, EXPORT_SYMBOL(bitmap_scnprintf); /** + * bitmap_scnprintf_len - return buffer length needed to convert + * bitmap to an ASCII hex string + * @nr_bits: number of bits to be converted + */ +int bitmap_scnprintf_len(unsigned int nr_bits) +{ + unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4; + return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1; +} + +/** * __bitmap_parse - convert an ASCII hex string into a bitmap. * @buf: pointer to buffer containing string. * @buflen: buffer size in bytes. If string is smaller than this diff --git a/lib/cmdline.c b/lib/cmdline.c index f596c08d213..5ba8a942a47 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints) /** * memparse - parse a string with mem suffixes into a number * @ptr: Where parse begins - * @retptr: (output) Pointer to next char after parse completes + * @retptr: (output) Optional pointer to next char after parse completes * * Parses a string into a number. The number stored at @ptr is * potentially suffixed with %K (for kilobytes, or 1024 bytes), @@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints) * megabyte, or one gigabyte, respectively. */ -unsigned long long memparse (char *ptr, char **retptr) +unsigned long long memparse(char *ptr, char **retptr) { - unsigned long long ret = simple_strtoull (ptr, retptr, 0); + char *endptr; /* local pointer to end of parsed string */ - switch (**retptr) { + unsigned long long ret = simple_strtoull(ptr, &endptr, 0); + + switch (*endptr) { case 'G': case 'g': ret <<= 10; @@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr) case 'K': case 'k': ret <<= 10; - (*retptr)++; + endptr++; default: break; } + + if (retptr) + *retptr = endptr; + return ret; } diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0ef01d14727..0218b4693dd 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -8,6 +8,7 @@ * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ +#include <linux/kernel.h> #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/module.h> @@ -37,6 +38,7 @@ int debug_locks_off(void) { if (xchg(&debug_locks, 0)) { if (!debug_locks_silent) { + oops_in_progress = 1; console_verbose(); return 1; } diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 85b18d79be8..e3ab374e133 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) /* * Allocate a new object. If the pool is empty, switch off the debugger. + * Must be called with interrupts disabled. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) @@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) static void free_object(struct debug_obj *obj) { unsigned long idx = (unsigned long)(obj - obj_static_pool); + unsigned long flags; if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&obj->node, &obj_pool); obj_pool_free++; obj_pool_used--; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); } else { - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); obj_pool_used--; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); } } @@ -171,6 +173,7 @@ static void debug_objects_oom(void) { struct debug_bucket *db = obj_hash; struct hlist_node *node, *tmp; + HLIST_HEAD(freelist); struct debug_obj *obj; unsigned long flags; int i; @@ -179,11 +182,14 @@ static void debug_objects_oom(void) for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { spin_lock_irqsave(&db->lock, flags); - hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_move_list(&db->list, &freelist); + spin_unlock_irqrestore(&db->lock, flags); + + /* Now free them */ + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } - spin_unlock_irqrestore(&db->lock, flags); } } @@ -205,9 +211,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg) if (limit < 5 && obj->descr != descr_test) { limit++; - printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, + WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, obj_states[obj->state], obj->descr->name); - WARN_ON(1); } debug_objects_warnings++; } @@ -226,15 +231,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), static void debug_object_is_on_stack(void *addr, int onstack) { - void *stack = current->stack; int is_on_stack; static int limit; if (limit > 4) return; - is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); - + is_on_stack = object_is_on_stack(addr); if (is_on_stack == onstack) return; @@ -501,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) return; default: hlist_del(&obj->node); + spin_unlock_irqrestore(&db->lock, flags); free_object(obj); - break; + return; } out_unlock: spin_unlock_irqrestore(&db->lock, flags); @@ -513,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; struct hlist_node *node, *tmp; + HLIST_HEAD(freelist); struct debug_obj_descr *descr; enum debug_obj_state state; struct debug_bucket *db; @@ -548,11 +553,18 @@ repeat: goto repeat; default: hlist_del(&obj->node); - free_object(obj); + hlist_add_head(&obj->node, &freelist); break; } } spin_unlock_irqrestore(&db->lock, flags); + + /* Now free them */ + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { + hlist_del(&obj->node); + free_object(obj); + } + if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; } @@ -735,26 +747,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) obj = lookup_object(addr, db); if (!obj && state != ODEBUG_STATE_NONE) { - printk(KERN_ERR "ODEBUG: selftest object not found\n"); - WARN_ON(1); + WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); goto out; } if (obj && obj->state != state) { - printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", + WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", obj->state, state); - WARN_ON(1); goto out; } if (fixups != debug_objects_fixups) { - printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", + WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", fixups, debug_objects_fixups); - WARN_ON(1); goto out; } if (warnings != debug_objects_warnings) { - printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", + WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", warnings, debug_objects_warnings); - WARN_ON(1); goto out; } res = 0; diff --git a/lib/idr.c b/lib/idr.c index 7a02e173f02..e728c7fccc4 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -6,6 +6,8 @@ * Modified by George Anzinger to reuse immediately and to use * find bit instructions. Also removed _irq on spinlocks. * + * Modified by Nadia Derbey to make it RCU safe. + * * Small id to pointer translation service. * * It uses a radix tree like structure as a sparse array indexed @@ -35,7 +37,7 @@ static struct kmem_cache *idr_layer_cache; -static struct idr_layer *alloc_layer(struct idr *idp) +static struct idr_layer *get_from_free_list(struct idr *idp) { struct idr_layer *p; unsigned long flags; @@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp) return(p); } +static void idr_layer_rcu_free(struct rcu_head *head) +{ + struct idr_layer *layer; + + layer = container_of(head, struct idr_layer, rcu_head); + kmem_cache_free(idr_layer_cache, layer); +} + +static inline void free_layer(struct idr_layer *p) +{ + call_rcu(&p->rcu_head, idr_layer_rcu_free); +} + /* only called when idp->lock is held */ -static void __free_layer(struct idr *idp, struct idr_layer *p) +static void __move_to_free_list(struct idr *idp, struct idr_layer *p) { p->ary[0] = idp->id_free; idp->id_free = p; idp->id_free_cnt++; } -static void free_layer(struct idr *idp, struct idr_layer *p) +static void move_to_free_list(struct idr *idp, struct idr_layer *p) { unsigned long flags; @@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p) * Depends on the return element being zeroed. */ spin_lock_irqsave(&idp->lock, flags); - __free_layer(idp, p); + __move_to_free_list(idp, p); spin_unlock_irqrestore(&idp->lock, flags); } @@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) * @gfp_mask: memory allocation flags * * This function should be called prior to locking and calling the - * following function. It preallocates enough memory to satisfy + * idr_get_new* functions. It preallocates enough memory to satisfy * the worst possible allocation. * * If the system is REALLY out of memory this function returns 0, @@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) new = kmem_cache_alloc(idr_layer_cache, gfp_mask); if (new == NULL) return (0); - free_layer(idp, new); + move_to_free_list(idp, new); } return 1; } @@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) /* if already at the top layer, we need to grow */ if (!(p = pa[l])) { *starting_id = id; - return -2; + return IDR_NEED_TO_GROW; } /* If we need to go up one layer, continue the @@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = ((id >> sh) ^ n ^ m) << sh; } if ((id >= MAX_ID_BIT) || (id < 0)) - return -3; + return IDR_NOMORE_SPACE; if (l == 0) break; /* * Create the layer below if it is missing. */ if (!p->ary[m]) { - if (!(new = alloc_layer(idp))) + new = get_from_free_list(idp); + if (!new) return -1; - p->ary[m] = new; + rcu_assign_pointer(p->ary[m], new); p->count++; } pa[l--] = p; @@ -192,7 +208,7 @@ build_up: p = idp->top; layers = idp->layers; if (unlikely(!p)) { - if (!(p = alloc_layer(idp))) + if (!(p = get_from_free_list(idp))) return -1; layers = 1; } @@ -204,7 +220,7 @@ build_up: layers++; if (!p->count) continue; - if (!(new = alloc_layer(idp))) { + if (!(new = get_from_free_list(idp))) { /* * The allocation failed. If we built part of * the structure tear it down. @@ -214,7 +230,7 @@ build_up: p = p->ary[0]; new->ary[0] = NULL; new->bitmap = new->count = 0; - __free_layer(idp, new); + __move_to_free_list(idp, new); } spin_unlock_irqrestore(&idp->lock, flags); return -1; @@ -225,10 +241,10 @@ build_up: __set_bit(0, &new->bitmap); p = new; } - idp->top = p; + rcu_assign_pointer(idp->top, p); idp->layers = layers; v = sub_alloc(idp, &id, pa); - if (v == -2) + if (v == IDR_NEED_TO_GROW) goto build_up; return(v); } @@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) * Successfully found an empty slot. Install the user * pointer and mark the slot full. */ - pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; + rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], + (struct idr_layer *)ptr); pa[0]->count++; idr_mark_full(pa, id); } @@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) * This is a cheap hack until the IDR code can be fixed to * return proper error values. */ - if (rv < 0) { - if (rv == -1) - return -EAGAIN; - else /* Will be -3 */ - return -ENOSPC; - } + if (rv < 0) + return _idr_rc_to_errno(rv); *id = rv; return 0; } @@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id) * This is a cheap hack until the IDR code can be fixed to * return proper error values. */ - if (rv < 0) { - if (rv == -1) - return -EAGAIN; - else /* Will be -3 */ - return -ENOSPC; - } + if (rv < 0) + return _idr_rc_to_errno(rv); *id = rv; return 0; } @@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new); static void idr_remove_warning(int id) { - printk("idr_remove called for id=%d which is not allocated.\n", id); + printk(KERN_WARNING + "idr_remove called for id=%d which is not allocated.\n", id); dump_stack(); } @@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id) struct idr_layer *p = idp->top; struct idr_layer **pa[MAX_LEVEL]; struct idr_layer ***paa = &pa[0]; + struct idr_layer *to_free; int n; *paa = NULL; @@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id) n = id & IDR_MASK; if (likely(p != NULL && test_bit(n, &p->bitmap))){ __clear_bit(n, &p->bitmap); - p->ary[n] = NULL; + rcu_assign_pointer(p->ary[n], NULL); + to_free = NULL; while(*paa && ! --((**paa)->count)){ - free_layer(idp, **paa); + if (to_free) + free_layer(to_free); + to_free = **paa; **paa-- = NULL; } if (!*paa) idp->layers = 0; + if (to_free) + free_layer(to_free); } else idr_remove_warning(id); } @@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id) void idr_remove(struct idr *idp, int id) { struct idr_layer *p; + struct idr_layer *to_free; /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && - idp->top->ary[0]) { // We can drop a layer - + idp->top->ary[0]) { + /* + * Single child at leftmost slot: we can shrink the tree. + * This level is not needed anymore since when layers are + * inserted, they are inserted at the top of the existing + * tree. + */ + to_free = idp->top; p = idp->top->ary[0]; - idp->top->bitmap = idp->top->count = 0; - free_layer(idp, idp->top); - idp->top = p; + rcu_assign_pointer(idp->top, p); --idp->layers; + to_free->bitmap = to_free->count = 0; + free_layer(to_free); } while (idp->id_free_cnt >= IDR_FREE_MAX) { - p = alloc_layer(idp); + p = get_from_free_list(idp); + /* + * Note: we don't call the rcu callback here, since the only + * layers that fall into the freelist are those that have been + * preallocated. + */ kmem_cache_free(idr_layer_cache, p); } return; @@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp) id += 1 << n; while (n < fls(id)) { - if (p) { - memset(p, 0, sizeof *p); - free_layer(idp, p); - } + if (p) + free_layer(p); n += IDR_BITS; p = *--paa; } } - idp->top = NULL; + rcu_assign_pointer(idp->top, NULL); idp->layers = 0; } EXPORT_SYMBOL(idr_remove_all); @@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all); void idr_destroy(struct idr *idp) { while (idp->id_free_cnt) { - struct idr_layer *p = alloc_layer(idp); + struct idr_layer *p = get_from_free_list(idp); kmem_cache_free(idr_layer_cache, p); } } @@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy); * return indicates that @id is not valid or you passed %NULL in * idr_get_new(). * - * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). + * This function can be called under rcu_read_lock(), given that the leaf + * pointers lifetimes are correctly managed. */ void *idr_find(struct idr *idp, int id) { @@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id) struct idr_layer *p; n = idp->layers * IDR_BITS; - p = idp->top; + p = rcu_dereference(idp->top); /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; @@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id) while (n > 0 && p) { n -= IDR_BITS; - p = p->ary[(id >> n) & IDR_MASK]; + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); } @@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp, struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = idp->top; + p = rcu_dereference(idp->top); max = 1 << n; id = 0; @@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp, while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; - p = p->ary[(id >> n) & IDR_MASK]; + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } if (p) { @@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each); * A -ENOENT return indicates that @id was not found. * A -EINVAL return indicates that @id was not within valid constraints. * - * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). + * The caller must serialize with writers. */ void *idr_replace(struct idr *idp, void *ptr, int id) { @@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id) return ERR_PTR(-ENOENT); old_p = p->ary[n]; - p->ary[n] = ptr; + rcu_assign_pointer(p->ary[n], ptr); return old_p; } EXPORT_SYMBOL(idr_replace); -static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) +static void idr_cache_ctor(void *idr_layer) { memset(idr_layer, 0, sizeof(struct idr_layer)); } @@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) restart: /* get vacant slot */ t = idr_get_empty_slot(&ida->idr, idr_id, pa); - if (t < 0) { - if (t == -1) - return -EAGAIN; - else /* will be -3 */ - return -ENOSPC; - } + if (t < 0) + return _idr_rc_to_errno(t); if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) return -ENOSPC; @@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) return -EAGAIN; memset(bitmap, 0, sizeof(struct ida_bitmap)); - pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; + rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], + (void *)bitmap); pa[0]->count++; } @@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) * allocation. */ if (ida->idr.id_free_cnt || ida->free_bitmap) { - struct idr_layer *p = alloc_layer(&ida->idr); + struct idr_layer *p = get_from_free_list(&ida->idr); if (p) kmem_cache_free(idr_layer_cache, p); } diff --git a/lib/inflate.c b/lib/inflate.c index 9762294be06..1a8e8a97812 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = { #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} #define DUMPBITS(n) {b>>=(n);k-=(n);} +#ifndef NO_INFLATE_MALLOC +/* A trivial malloc implementation, adapted from + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + */ + +static unsigned long malloc_ptr; +static int malloc_count; + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + error("Malloc error"); + if (!malloc_ptr) + malloc_ptr = free_mem_ptr; + + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + + p = (void *)malloc_ptr; + malloc_ptr += size; + + if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + error("Out of memory"); + + malloc_count++; + return p; +} + +static void free(void *where) +{ + malloc_count--; + if (!malloc_count) + malloc_ptr = free_mem_ptr; +} +#else +#define malloc(a) kmalloc(a, GFP_KERNEL) +#define free(a) kfree(a) +#endif /* Huffman code decoding is performed using a multi-level table lookup. @@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void) int e; /* last block flag */ int r; /* result code */ unsigned h; /* maximum struct huft's malloc'ed */ - void *ptr; /* initialize window, bit buffer */ wp = 0; @@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void) h = 0; do { hufts = 0; - gzip_mark(&ptr); - if ((r = inflate_block(&e)) != 0) { - gzip_release(&ptr); - return r; - } - gzip_release(&ptr); +#ifdef ARCH_HAS_DECOMP_WDOG + arch_decomp_wdog(); +#endif + r = inflate_block(&e); + if (r) + return r; if (hufts > h) h = hufts; } while (!e); diff --git a/lib/iomap.c b/lib/iomap.c index 37a3ea4cac9..d3222938515 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access) static int count = 10; if (count) { count--; - printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); - WARN_ON(1); + WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); } } diff --git a/lib/kobject.c b/lib/kobject.c index 744401571ed..fbf0ae28237 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj) return -ENOENT; if (!kobj->name || !kobj->name[0]) { - pr_debug("kobject: (%p): attempted to be registered with empty " + WARN(1, "kobject: (%p): attempted to be registered with empty " "name!\n", kobj); - WARN_ON(1); return -EINVAL; } @@ -224,8 +223,7 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, return -ENOMEM; /* ewww... some of these buggers have '/' in the name ... */ - s = strchr(kobj->name, '/'); - if (s) + while ((s = strchr(kobj->name, '/'))) s[0] = '!'; kfree(old_name); @@ -583,12 +581,10 @@ static void kobject_release(struct kref *kref) void kobject_put(struct kobject *kobj) { if (kobj) { - if (!kobj->state_initialized) { - printk(KERN_WARNING "kobject: '%s' (%p): is not " + if (!kobj->state_initialized) + WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " "initialized, yet kobject_put() is being " "called.\n", kobject_name(kobj), kobj); - WARN_ON(1); - } kref_put(&kobj->kref, kobject_release); } } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9f8d599459d..3f914725bda 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -285,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) int len; if (env->envp_idx >= ARRAY_SIZE(env->envp)) { - printk(KERN_ERR "add_uevent_var: too many keys\n"); - WARN_ON(1); + WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); return -ENOMEM; } @@ -297,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) va_end(args); if (len >= (sizeof(env->buf) - env->buflen)) { - printk(KERN_ERR "add_uevent_var: buffer size too small\n"); - WARN_ON(1); + WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); return -ENOMEM; } diff --git a/lib/list_debug.c b/lib/list_debug.c index 4350ba9655b..1a39f4e3ae1 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -20,18 +20,14 @@ void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (unlikely(next->prev != prev)) { - printk(KERN_ERR "list_add corruption. next->prev should be " - "prev (%p), but was %p. (next=%p).\n", - prev, next->prev, next); - BUG(); - } - if (unlikely(prev->next != next)) { - printk(KERN_ERR "list_add corruption. prev->next should be " - "next (%p), but was %p. (prev=%p).\n", - next, prev->next, prev); - BUG(); - } + WARN(next->prev != prev, + "list_add corruption. next->prev should be " + "prev (%p), but was %p. (next=%p).\n", + prev, next->prev, next); + WARN(prev->next != next, + "list_add corruption. prev->next should be " + "next (%p), but was %p. (prev=%p).\n", + next, prev->next, prev); next->prev = new; new->next = next; new->prev = prev; @@ -40,20 +36,6 @@ void __list_add(struct list_head *new, EXPORT_SYMBOL(__list_add); /** - * list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -void list_add(struct list_head *new, struct list_head *head) -{ - __list_add(new, head, head->next); -} -EXPORT_SYMBOL(list_add); - -/** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty on entry does not return true after this, the entry is @@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add); */ void list_del(struct list_head *entry) { - if (unlikely(entry->prev->next != entry)) { - printk(KERN_ERR "list_del corruption. prev->next should be %p, " - "but was %p\n", entry, entry->prev->next); - BUG(); - } - if (unlikely(entry->next->prev != entry)) { - printk(KERN_ERR "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, entry->next->prev); - BUG(); - } + WARN(entry->prev->next != entry, + "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, entry->prev->next); + WARN(entry->next->prev != entry, + "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, entry->next->prev); __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; diff --git a/lib/lmb.c b/lib/lmb.c index 5d7b9286503..97e54703708 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit) if (lmb.memory.region[0].size < lmb.rmo_size) lmb.rmo_size = lmb.memory.region[0].size; + memory_limit = lmb_end_of_DRAM(); + /* And truncate any reserves above the limit also. */ for (i = 0; i < lmb.reserved.cnt; i++) { p = &lmb.reserved.region[i]; diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 77f0f9b775a..5dc6b29c157 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c @@ -138,8 +138,7 @@ match: t += 31 + *ip++; } m_pos = op - 1; - m_pos -= le16_to_cpu(get_unaligned( - (const unsigned short *)ip)) >> 2; + m_pos -= get_unaligned_le16(ip) >> 2; ip += 2; } else if (t >= 16) { m_pos = op; @@ -157,8 +156,7 @@ match: } t += 7 + *ip++; } - m_pos -= le16_to_cpu(get_unaligned( - (const unsigned short *)ip)) >> 2; + m_pos -= get_unaligned_le16(ip) >> 2; ip += 2; if (m_pos == op) goto eof_found; diff --git a/lib/plist.c b/lib/plist.c index 3074a02272f..d6c64a824e1 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -31,12 +31,13 @@ static void plist_check_prev_next(struct list_head *t, struct list_head *p, struct list_head *n) { - if (n->prev != p || p->next != n) { - printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); - printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); - printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); - WARN_ON(1); - } + WARN(n->prev != p || p->next != n, + "top: %p, n: %p, p: %p\n" + "prev: %p, n: %p, p: %p\n" + "next: %p, n: %p, p: %p\n", + t, t->next, t->prev, + p, p->next, p->prev, + n, n->next, n->prev); } static void plist_check_list(struct list_head *top) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 56ec21a7f73..be86b32bc87 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert); * Returns: the slot corresponding to the position @index in the * radix tree @root. This is useful for update-if-exists operations. * - * This function cannot be called under rcu_read_lock, it must be - * excluded from writers, as must the returned slot for subsequent - * use by radix_tree_deref_slot() and radix_tree_replace slot. - * Caller must hold tree write locked across slot lookup and - * replace. + * This function can be called under rcu_read_lock iff the slot is not + * modified by radix_tree_replace_slot, otherwise it must be called + * exclusive from other writers. Any dereference of the slot must be done + * using radix_tree_deref_slot. */ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) { unsigned int height, shift; struct radix_tree_node *node, **slot; - node = root->rnode; + node = rcu_dereference(root->rnode); if (node == NULL) return NULL; @@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) do { slot = (struct radix_tree_node **) (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = *slot; + node = rcu_dereference(*slot); if (node == NULL) return NULL; @@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, EXPORT_SYMBOL(radix_tree_next_hole); static unsigned int -__lookup(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index) { unsigned int nr_found = 0; @@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index, /* Bottom level: grab some items */ for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { - struct radix_tree_node *node; index++; - node = slot->slots[i]; - if (node) { - results[nr_found++] = rcu_dereference(node); + if (slot->slots[i]) { + results[nr_found++] = &(slot->slots[i]); if (nr_found == max_items) goto out; } @@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, ret = 0; while (ret < max_items) { - unsigned int nr_found; + unsigned int nr_found, slots_found, i; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup(node, results + ret, cur_index, + slots_found = __lookup(node, (void ***)results + ret, cur_index, max_items - ret, &next_index); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } ret += nr_found; if (next_index == 0) break; @@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, } EXPORT_SYMBOL(radix_tree_gang_lookup); +/** + * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * + * Performs an index-ascending scan of the tree for present items. Places + * their slots at *@results and returns the number of items which were + * placed at *@results. + * + * The implementation is naive. + * + * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must + * be dereferenced with radix_tree_deref_slot, and if using only RCU + * protection, radix_tree_deref_slot may fail requiring a retry. + */ +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items) +{ + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup(node, results + ret, cur_index, + max_items - ret, &next_index); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_slot); + /* * FIXME: the two tag_get()s here should use find_next_bit() instead of * open-coding the search. */ static unsigned int -__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; @@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, unsigned long j = index & RADIX_TREE_MAP_MASK; for ( ; j < RADIX_TREE_MAP_SIZE; j++) { - struct radix_tree_node *node; index++; if (!tag_get(slot, tag, j)) continue; - node = slot->slots[j]; /* * Even though the tag was found set, we need to * recheck that we have a non-NULL node, because @@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, * lookup ->slots[x] without a lock (ie. can't * rely on its value remaining the same). */ - if (node) { - node = rcu_dereference(node); - results[nr_found++] = node; + if (slot->slots[j]) { + results[nr_found++] = &(slot->slots[j]); if (nr_found == max_items) goto out; } @@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, ret = 0; while (ret < max_items) { - unsigned int nr_found; + unsigned int nr_found, slots_found, i; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup_tag(node, results + ret, cur_index, - max_items - ret, &next_index, tag); + slots_found = __lookup_tag(node, (void ***)results + ret, + cur_index, max_items - ret, &next_index, tag); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } ret += nr_found; if (next_index == 0) break; @@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** + * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a + * radix tree based on a tag + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) + * + * Performs an index-ascending scan of the tree for present items which + * have the tag indexed by @tag set. Places the slots at *@results and + * returns the number of slots which were placed at *@results. + */ +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag) +{ + struct radix_tree_node *node; + unsigned long max_index; + unsigned long cur_index = first_index; + unsigned int ret; + + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) + return 0; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup_tag(node, results + ret, + cur_index, max_items - ret, &next_index, tag); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); + + +/** * radix_tree_shrink - shrink height of a radix tree to minimal * @root radix tree root */ @@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) EXPORT_SYMBOL(radix_tree_tagged); static void -radix_tree_node_ctor(struct kmem_cache *cachep, void *node) +radix_tree_node_ctor(void *node) { memset(node, 0, sizeof(struct radix_tree_node)); } diff --git a/lib/random32.c b/lib/random32.c index ca87d86992b..217d5c4b666 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state) return (state->s1 ^ state->s2 ^ state->s3); } -static void __set_random32(struct rnd_state *state, unsigned long s) +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) { - if (s == 0) - s = 1; /* default seed is 1 */ - -#define LCG(n) (69069 * n) - state->s1 = LCG(s); - state->s2 = LCG(state->s1); - state->s3 = LCG(state->s2); - - /* "warm it up" */ - __random32(state); - __random32(state); - __random32(state); - __random32(state); - __random32(state); - __random32(state); + return (x < m) ? x + m : x; } /** @@ -107,7 +96,7 @@ void srandom32(u32 entropy) */ for_each_possible_cpu (i) { struct rnd_state *state = &per_cpu(net_rand_state, i); - __set_random32(state, state->s1 ^ entropy); + state->s1 = __seed(state->s1 ^ entropy, 1); } } EXPORT_SYMBOL(srandom32); @@ -122,7 +111,19 @@ static int __init random32_init(void) for_each_possible_cpu(i) { struct rnd_state *state = &per_cpu(net_rand_state,i); - __set_random32(state, i + jiffies); + +#define LCG(x) ((x) * 69069) /* super-duper LCG */ + state->s1 = __seed(LCG(i + jiffies), 1); + state->s2 = __seed(LCG(state->s1), 7); + state->s3 = __seed(LCG(state->s2), 15); + + /* "warm it up" */ + __random32(state); + __random32(state); + __random32(state); + __random32(state); + __random32(state); + __random32(state); } return 0; } @@ -135,13 +136,18 @@ core_initcall(random32_init); static int __init random32_reseed(void) { int i; - unsigned long seed; for_each_possible_cpu(i) { struct rnd_state *state = &per_cpu(net_rand_state,i); + u32 seeds[3]; + + get_random_bytes(&seeds, sizeof(seeds)); + state->s1 = __seed(seeds[0], 1); + state->s2 = __seed(seeds[1], 7); + state->s3 = __seed(seeds[2], 15); - get_random_bytes(&seed, sizeof(seed)); - __set_random32(state, seed); + /* mix it in */ + __random32(state); } return 0; } diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 485e3040dcd..26187edcc7e 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -3,6 +3,9 @@ * * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> * + * 2008-05-01 rewrite the function and use a ratelimit_state data struct as + * parameter. Now every user can use their own standalone ratelimit_state. + * * This file is released under the GPLv2. * */ @@ -11,41 +14,44 @@ #include <linux/jiffies.h> #include <linux/module.h> +static DEFINE_SPINLOCK(ratelimit_lock); + /* * __ratelimit - rate limiting - * @ratelimit_jiffies: minimum time in jiffies between two callbacks - * @ratelimit_burst: number of callbacks we do before ratelimiting + * @rs: ratelimit_state data * - * This enforces a rate limit: not more than @ratelimit_burst callbacks - * in every ratelimit_jiffies + * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks + * in every @rs->ratelimit_jiffies */ -int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) +int __ratelimit(struct ratelimit_state *rs) { - static DEFINE_SPINLOCK(ratelimit_lock); - static unsigned toks = 10 * 5 * HZ; - static unsigned long last_msg; - static int missed; unsigned long flags; - unsigned long now = jiffies; - spin_lock_irqsave(&ratelimit_lock, flags); - toks += now - last_msg; - last_msg = now; - if (toks > (ratelimit_burst * ratelimit_jiffies)) - toks = ratelimit_burst * ratelimit_jiffies; - if (toks >= ratelimit_jiffies) { - int lost = missed; - - missed = 0; - toks -= ratelimit_jiffies; - spin_unlock_irqrestore(&ratelimit_lock, flags); - if (lost) - printk(KERN_WARNING "%s: %d messages suppressed\n", - __func__, lost); + if (!rs->interval) return 1; + + spin_lock_irqsave(&ratelimit_lock, flags); + if (!rs->begin) + rs->begin = jiffies; + + if (time_is_before_jiffies(rs->begin + rs->interval)) { + if (rs->missed) + printk(KERN_WARNING "%s: %d callbacks suppressed\n", + __func__, rs->missed); + rs->begin = 0; + rs->printed = 0; + rs->missed = 0; } - missed++; + if (rs->burst && rs->burst > rs->printed) + goto print; + + rs->missed++; spin_unlock_irqrestore(&ratelimit_lock, flags); return 0; + +print: + rs->printed++; + spin_unlock_irqrestore(&ratelimit_lock, flags); + return 1; } EXPORT_SYMBOL(__ratelimit); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 876ba6d5b67..8d2688ff135 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -422,9 +422,12 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, { unsigned int offset = 0; struct sg_mapping_iter miter; + unsigned long flags; sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); + local_irq_save(flags); + while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -442,6 +445,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_stop(&miter); + local_irq_restore(flags); return offset; } diff --git a/lib/show_mem.c b/lib/show_mem.c new file mode 100644 index 00000000000..238e72a18ce --- /dev/null +++ b/lib/show_mem.c @@ -0,0 +1,63 @@ +/* + * Generic show_mem() implementation + * + * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> + * All code subject to the GPL version 2. + */ + +#include <linux/mm.h> +#include <linux/nmi.h> +#include <linux/quicklist.h> + +void show_mem(void) +{ + pg_data_t *pgdat; + unsigned long total = 0, reserved = 0, shared = 0, + nonshared = 0, highmem = 0; + + printk(KERN_INFO "Mem-Info:\n"); + show_free_areas(); + + for_each_online_pgdat(pgdat) { + unsigned long i, flags; + + pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + struct page *page; + unsigned long pfn = pgdat->node_start_pfn + i; + + if (unlikely(!(i % MAX_ORDER_NR_PAGES))) + touch_nmi_watchdog(); + + if (!pfn_valid(pfn)) + continue; + + page = pfn_to_page(pfn); + + if (PageHighMem(page)) + highmem++; + + if (PageReserved(page)) + reserved++; + else if (page_count(page) == 1) + nonshared++; + else if (page_count(page) > 1) + shared += page_count(page) - 1; + + total++; + } + pgdat_resize_unlock(pgdat, &flags); + } + + printk(KERN_INFO "%lu pages RAM\n", total); +#ifdef CONFIG_HIGHMEM + printk(KERN_INFO "%lu pages HighMem\n", highmem); +#endif + printk(KERN_INFO "%lu pages reserved\n", reserved); + printk(KERN_INFO "%lu pages shared\n", shared); + printk(KERN_INFO "%lu pages non-shared\n", nonshared); +#ifdef CONFIG_QUICKLIST + printk(KERN_INFO "%lu pages in pagetable cache\n", + quicklist_total_size()); +#endif +} diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index c4381d9516f..0f8fc22ed10 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); - cpumask_of_cpu_ptr_declare(this_mask); if (likely(preempt_count)) goto out; @@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - cpumask_of_cpu_ptr_next(this_mask, this_cpu); - - if (cpus_equal(current->cpus_allowed, *this_mask)) + if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) goto out; /* diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8c..8826fdf0f18 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -491,8 +491,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, * the lowest available address range. */ dma_addr_t handle; - handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (swiotlb_dma_mapping_error(handle)) + handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE); + if (swiotlb_dma_mapping_error(hwdev, handle)) return NULL; ret = bus_to_virt(handle); @@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, } int -swiotlb_dma_mapping_error(dma_addr_t dma_addr) +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); } diff --git a/lib/syscall.c b/lib/syscall.c new file mode 100644 index 00000000000..a4f7067f72f --- /dev/null +++ b/lib/syscall.c @@ -0,0 +1,75 @@ +#include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <asm/syscall.h> + +static int collect_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc) +{ + struct pt_regs *regs = task_pt_regs(target); + if (unlikely(!regs)) + return -EAGAIN; + + *sp = user_stack_pointer(regs); + *pc = instruction_pointer(regs); + + *callno = syscall_get_nr(target, regs); + if (*callno != -1L && maxargs > 0) + syscall_get_arguments(target, regs, 0, maxargs, args); + + return 0; +} + +/** + * task_current_syscall - Discover what a blocked task is doing. + * @target: thread to examine + * @callno: filled with system call number or -1 + * @args: filled with @maxargs system call arguments + * @maxargs: number of elements in @args to fill + * @sp: filled with user stack pointer + * @pc: filled with user PC + * + * If @target is blocked in a system call, returns zero with *@callno + * set to the the call's number and @args filled in with its arguments. + * Registers not used for system call arguments may not be available and + * it is not kosher to use &struct user_regset calls while the system + * call is still in progress. Note we may get this result if @target + * has finished its system call but not yet returned to user mode, such + * as when it's stopped for signal handling or syscall exit tracing. + * + * If @target is blocked in the kernel during a fault or exception, + * returns zero with *@callno set to -1 and does not fill in @args. + * If so, it's now safe to examine @target using &struct user_regset + * get() calls as long as we're sure @target won't return to user mode. + * + * Returns -%EAGAIN if @target does not remain blocked. + * + * Returns -%EINVAL if @maxargs is too large (maximum is six). + */ +int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc) +{ + long state; + unsigned long ncsw; + + if (unlikely(maxargs > 6)) + return -EINVAL; + + if (target == current) + return collect_syscall(target, callno, args, maxargs, sp, pc); + + state = target->state; + if (unlikely(!state)) + return -EAGAIN; + + ncsw = wait_task_inactive(target, state); + if (unlikely(!ncsw) || + unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || + unlikely(wait_task_inactive(target, state) != ncsw)) + return -EAGAIN; + + return 0; +} +EXPORT_SYMBOL_GPL(task_current_syscall); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 1dc2d1d18fa..c399bc1093c 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -27,6 +27,7 @@ #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> +#include <asm/sections.h> /* for dereference_function_descriptor() */ /* Works only for digits and letters, but small and fast */ #define TOLOWER(x) ((x) | 0x20) @@ -220,7 +221,7 @@ int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\ if (len == 0) \ return -EINVAL; \ \ - val = simple_strtoul(cp, &tail, base); \ + val = simple_strtou##type(cp, &tail, base); \ if ((*tail == '\0') || \ ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ *res = val; \ @@ -513,16 +514,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio return buf; } -static inline void *dereference_function_descriptor(void *ptr) -{ -#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) - void *p; - if (!probe_kernel_address(ptr, p)) - ptr = p; -#endif - return ptr; -} - static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) { unsigned long value = (unsigned long) ptr; |