diff options
Diffstat (limited to 'include/linux')
81 files changed, 2342 insertions, 518 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 4c4142c5aa6..327f60658d9 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -97,6 +97,7 @@ header-y += ioctl.h header-y += ip6_tunnel.h header-y += ipmi_msgdefs.h header-y += ipsec.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += iso_fs.h @@ -355,6 +356,7 @@ unifdef-y += virtio_balloon.h unifdef-y += virtio_console.h unifdef-y += virtio_pci.h unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 972b12bcfb3..2b8df8b420f 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -30,6 +30,8 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 +#include <linux/list.h> + enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -78,6 +80,8 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); +extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c44..89781fd4885 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); +extern int bitmap_scnprintf_len(unsigned int nr_bits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88d68081a0f..e61f22be4d0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -655,6 +655,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_plug_device(struct request_queue *); +extern void blk_plug_device_unlocked(struct request_queue *); extern int blk_remove_plug(struct request_queue *); extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct request_queue *, diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 652470b687c..95837bfb525 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 50cfe8ceb47..eadaab44015 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) -TAS_BUFFER_FNS(Lock, locked) BUFFER_FNS(Req, req) TAS_BUFFER_FNS(Req, req) BUFFER_FNS(Mapped, mapped) @@ -321,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) __wait_on_buffer(bh); } +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); +} + static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (test_set_buffer_locked(bh)) + if (!trylock_buffer(bh)) __lock_buffer(bh); } diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h new file mode 100644 index 00000000000..29f002d73d9 --- /dev/null +++ b/include/linux/byteorder.h @@ -0,0 +1,372 @@ +#ifndef _LINUX_BYTEORDER_H +#define _LINUX_BYTEORDER_H + +#include <linux/types.h> +#include <linux/swab.h> + +#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define one endianness +#endif + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define arch endianness +#endif + +#ifdef __LITTLE_ENDIAN +# undef __LITTLE_ENDIAN +# define __LITTLE_ENDIAN 1234 +#endif + +#ifdef __BIG_ENDIAN +# undef __BIG_ENDIAN +# define __BIG_ENDIAN 4321 +#endif + +#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) +# define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) +# define __BIG_ENDIAN_BITFIELD +#endif + +#ifdef __LITTLE_ENDIAN +# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) + +# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) +# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) +# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) +#endif + +#ifdef __BIG_ENDIAN +# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) + +# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) +# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) +# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) +#endif + +/* + * These helpers could be phased out over time as the base version + * handles constant folding. + */ +#define __constant_htonl(x) __cpu_to_be32(x) +#define __constant_ntohl(x) __be32_to_cpu(x) +#define __constant_htons(x) __cpu_to_be16(x) +#define __constant_ntohs(x) __be16_to_cpu(x) + +#define __constant_le16_to_cpu(x) __le16_to_cpu(x) +#define __constant_le32_to_cpu(x) __le32_to_cpu(x) +#define __constant_le64_to_cpu(x) __le64_to_cpu(x) +#define __constant_be16_to_cpu(x) __be16_to_cpu(x) +#define __constant_be32_to_cpu(x) __be32_to_cpu(x) +#define __constant_be64_to_cpu(x) __be64_to_cpu(x) + +#define __constant_cpu_to_le16(x) __cpu_to_le16(x) +#define __constant_cpu_to_le32(x) __cpu_to_le32(x) +#define __constant_cpu_to_le64(x) __cpu_to_le64(x) +#define __constant_cpu_to_be16(x) __cpu_to_be16(x) +#define __constant_cpu_to_be32(x) __cpu_to_be32(x) +#define __constant_cpu_to_be64(x) __cpu_to_be64(x) + +static inline void __le16_to_cpus(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_le16s(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __le32_to_cpus(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_le32s(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __le64_to_cpus(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_le64s(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __be16_to_cpus(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_be16s(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __be32_to_cpus(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_be32s(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __be64_to_cpus(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_be64s(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline __u16 __le16_to_cpup(const __le16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __le32_to_cpup(const __le32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __le64_to_cpup(const __le64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le16)*p; +#else + return (__force __le16)__swab16p(p); +#endif +} + +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le32)*p; +#else + return (__force __le32)__swab32p(p); +#endif +} + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le64)*p; +#else + return (__force __le64)__swab64p(p); +#endif +} + +static inline __u16 __be16_to_cpup(const __be16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __be32_to_cpup(const __be32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __be64_to_cpup(const __be64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be16)*p; +#else + return (__force __be16)__swab16p(p); +#endif +} + +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)*p; +#else + return (__force __be32)__swab32p(p); +#endif +} + +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)*p; +#else + return (__force __be64)__swab64p(p); +#endif +} + +#ifdef __KERNEL__ + +# define le16_to_cpu __le16_to_cpu +# define le32_to_cpu __le32_to_cpu +# define le64_to_cpu __le64_to_cpu +# define be16_to_cpu __be16_to_cpu +# define be32_to_cpu __be32_to_cpu +# define be64_to_cpu __be64_to_cpu +# define cpu_to_le16 __cpu_to_le16 +# define cpu_to_le32 __cpu_to_le32 +# define cpu_to_le64 __cpu_to_le64 +# define cpu_to_be16 __cpu_to_be16 +# define cpu_to_be32 __cpu_to_be32 +# define cpu_to_be64 __cpu_to_be64 + +# define le16_to_cpup __le16_to_cpup +# define le32_to_cpup __le32_to_cpup +# define le64_to_cpup __le64_to_cpup +# define be16_to_cpup __be16_to_cpup +# define be32_to_cpup __be32_to_cpup +# define be64_to_cpup __be64_to_cpup +# define cpu_to_le16p __cpu_to_le16p +# define cpu_to_le32p __cpu_to_le32p +# define cpu_to_le64p __cpu_to_le64p +# define cpu_to_be16p __cpu_to_be16p +# define cpu_to_be32p __cpu_to_be32p +# define cpu_to_be64p __cpu_to_be64p + +# define le16_to_cpus __le16_to_cpus +# define le32_to_cpus __le32_to_cpus +# define le64_to_cpus __le64_to_cpus +# define be16_to_cpus __be16_to_cpus +# define be32_to_cpus __be32_to_cpus +# define be64_to_cpus __be64_to_cpus +# define cpu_to_le16s __cpu_to_le16s +# define cpu_to_le32s __cpu_to_le32s +# define cpu_to_le64s __cpu_to_le64s +# define cpu_to_be16s __cpu_to_be16s +# define cpu_to_be32s __cpu_to_be32s +# define cpu_to_be64s __cpu_to_be64s + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ +# undef ntohl +# undef ntohs +# undef htonl +# undef htons + +# define ___htonl(x) __cpu_to_be32(x) +# define ___htons(x) __cpu_to_be16(x) +# define ___ntohl(x) __be32_to_cpu(x) +# define ___ntohs(x) __be16_to_cpu(x) + +# define htonl(x) ___htonl(x) +# define ntohl(x) ___ntohl(x) +# define htons(x) ___htons(x) +# define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpup(var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpup(var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpup(var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpup(var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpup(var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpup(var) + val); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BYTEORDER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index d2961b66d53..57faa60de9b 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -55,4 +55,49 @@ extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) + +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +static inline bool try_wait_for_completion(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irq(&x->wait.lock); + return ret; +} + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +static inline bool completion_done(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + spin_unlock_irq(&x->wait.lock); + return ret; +} + #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d62c19ff041..7f627775c94 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -40,6 +40,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/err.h> #include <asm/atomic.h> @@ -129,8 +130,25 @@ struct configfs_attribute { /* * Users often need to create attribute structures for their configurable * attributes, containing a configfs_attribute member and function pointers - * for the show() and store() operations on that attribute. They can use - * this macro (similar to sysfs' __ATTR) to make defining attributes easier. + * for the show() and store() operations on that attribute. If they don't + * need anything else on the extended attribute structure, they can use + * this macro to define it The argument _item is the name of the + * config_item structure. + */ +#define CONFIGFS_ATTR_STRUCT(_item) \ +struct _item##_attribute { \ + struct configfs_attribute attr; \ + ssize_t (*show)(struct _item *, char *); \ + ssize_t (*store)(struct _item *, const char *, size_t); \ +} + +/* + * With the extended attribute structure, users can use this macro + * (similar to sysfs' __ATTR) to make defining attributes easier. + * An example: + * #define MYITEM_ATTR(_name, _mode, _show, _store) \ + * struct myitem_attribute childless_attr_##_name = \ + * __CONFIGFS_ATTR(_name, _mode, _show, _store) */ #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ { \ @@ -142,6 +160,52 @@ struct configfs_attribute { .show = _show, \ .store = _store, \ } +/* Here is a readonly version, only requiring a show() operation */ +#define __CONFIGFS_ATTR_RO(_name, _show) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = 0444, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ +} + +/* + * With these extended attributes, the simple show_attribute() and + * store_attribute() operations need to call the show() and store() of the + * attributes. This is a common pattern, so we provide a macro to define + * them. The argument _item is the name of the config_item structure. + * This macro expects the attributes to be named "struct <name>_attribute" + * and the function to_<name>() to exist; + */ +#define CONFIGFS_ATTR_OPS(_item) \ +static ssize_t _item##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_item##_attr->show) \ + ret = _item##_attr->show(_item, page); \ + return ret; \ +} \ +static ssize_t _item##_attr_store(struct config_item *item, \ + struct configfs_attribute *attr, \ + const char *page, size_t count) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = -EINVAL; \ + \ + if (_item##_attr->store) \ + ret = _item##_attr->store(_item, page, count); \ + return ret; \ +} /* * If allow_link() exists, the item can symlink(2) out to other diff --git a/include/linux/connector.h b/include/linux/connector.h index 96a89d3d672..5c7f9468f75 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -38,8 +38,9 @@ #define CN_W1_VAL 0x1 #define CN_IDX_V86D 0x4 #define CN_VAL_V86D_UVESAFB 0x1 +#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ -#define CN_NETLINK_USERS 5 +#define CN_NETLINK_USERS 6 /* * Maximum connector's message size. diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2270ca5ec63..6fd5668aa57 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -106,6 +106,7 @@ struct cpufreq_policy { #define CPUFREQ_ADJUST (0) #define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (2) +#define CPUFREQ_START (3) #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 96d0509fb8d..d3219d73f8e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -287,7 +287,7 @@ static inline const cpumask_t *get_cpu_mask(unsigned int cpu) * gcc optimizes it out (it's a constant) and there's no huge stack * variable created: */ -#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); }) +#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 00000000000..b69222cc1fd --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,50 @@ +/* Credentials management + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#define get_current_user() (get_uid(current->user)) + +#define task_uid(task) ((task)->uid) +#define task_gid(task) ((task)->gid) +#define task_euid(task) ((task)->euid) +#define task_egid(task) ((task)->egid) + +#define current_uid() (current->uid) +#define current_gid() (current->gid) +#define current_euid() (current->euid) +#define current_egid() (current->egid) +#define current_suid() (current->suid) +#define current_sgid() (current->sgid) +#define current_fsuid() (current->fsuid) +#define current_fsgid() (current->fsgid) +#define current_cap() (current->cap_effective) + +#define current_uid_gid(_uid, _gid) \ +do { \ + *(_uid) = current->uid; \ + *(_gid) = current->gid; \ +} while(0) + +#define current_euid_egid(_uid, _gid) \ +do { \ + *(_uid) = current->euid; \ + *(_gid) = current->egid; \ +} while(0) + +#define current_fsuid_fsgid(_uid, _gid) \ +do { \ + *(_uid) = current->fsuid; \ + *(_gid) = current->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98202c672fd..07aa198f19e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -230,6 +230,7 @@ extern void d_delete(struct dentry *); extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct inode *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct inode *, struct dentry *, struct qstr *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446b642..c30879cf93b 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -27,6 +27,7 @@ struct dm9000_plat_data { unsigned int flags; + unsigned char dev_addr[6]; /* allow replacement IO routines */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87df36..b4b038b89ee 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -27,9 +27,24 @@ struct ethtool_cmd { __u8 autoneg; /* Enable or disable autonegotiation */ __u32 maxtxpkt; /* Tx pkts before generating tx int */ __u32 maxrxpkt; /* Rx pkts before generating rx int */ - __u32 reserved[4]; + __u16 speed_hi; + __u16 reserved2; + __u32 reserved[3]; }; +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + #define ETHTOOL_BUSINFO_LEN 32 /* these strings are set to whatever the driver author decides... */ struct ethtool_drvinfo { diff --git a/include/linux/file.h b/include/linux/file.h index 27c64bdc68c..a20259e248a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -34,8 +34,9 @@ extern struct file *fget(unsigned int fd); extern struct file *fget_light(unsigned int fd, int *fput_needed); extern void set_close_on_exec(unsigned int fd, int flag); extern void put_filp(struct file *); +extern int alloc_fd(unsigned start, unsigned flags); extern int get_unused_fd(void); -extern int get_unused_fd_flags(int flags); +#define get_unused_fd_flags(flags) alloc_fd(0, (flags)) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index acbdbcc1605..6e199c8dfac 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,34 +24,8 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -/** - * Adds a firmware mapping entry. This function uses kmalloc() for memory - * allocation. Use firmware_map_add_early() if you want to use the bootmem - * allocator. - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type); - -/** - * Adds a firmware mapping entry. This function uses the bootmem allocator - * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type); diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h deleted file mode 100644 index efef11db790..00000000000 --- a/include/linux/harrier_defs.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * include/linux/harrier_defs.h - * - * Definitions for Motorola MCG Harrier North Bridge & Memory controller - * - * Author: Dale Farnsworth - * dale.farnsworth@mvista.com - * - * Extracted from asm-ppc/harrier.h by: - * Randy Vinson - * rvinson@mvista.com - * - * Copyright 2001-2002 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __ASMPPC_HARRIER_DEFS_H -#define __ASMPPC_HARRIER_DEFS_H - -#define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000 - -#define HARRIER_VEND_DEV_ID 0x1057480b - -#define HARRIER_VENI_OFF 0x00 - -#define HARRIER_REVI_OFF 0x05 -#define HARRIER_UCTL_OFF 0xd0 -#define HARRIER_XTAL64_MASK 0x02 - -#define HARRIER_MISC_CSR_OFF 0x1c -#define HARRIER_RSTOUT 0x01000000 -#define HARRIER_SYSCON 0x08000000 -#define HARRIER_EREADY 0x10000000 -#define HARRIER_ERDYS 0x20000000 - -/* Function exception registers */ -#define HARRIER_FEEN_OFF 0x40 /* enable */ -#define HARRIER_FEST_OFF 0x44 /* status */ -#define HARRIER_FEMA_OFF 0x48 /* mask */ -#define HARRIER_FECL_OFF 0x4c /* clear */ - -#define HARRIER_FE_DMA 0x80 -#define HARRIER_FE_MIDB 0x40 -#define HARRIER_FE_MIM0 0x20 -#define HARRIER_FE_MIM1 0x10 -#define HARRIER_FE_MIP 0x08 -#define HARRIER_FE_UA0 0x04 -#define HARRIER_FE_UA1 0x02 -#define HARRIER_FE_ABT 0x01 - -#define HARRIER_SERIAL_0_OFF 0xc0 - -#define HARRIER_MBAR_OFF 0xe0 -#define HARRIER_MBAR_MSK 0xfffc0000 -#define HARRIER_MPIC_CSR_OFF 0xe4 -#define HARRIER_MPIC_OPI_ENABLE 0x40 -#define HARRIER_MPIC_IFEVP_OFF 0x10200 -#define HARRIER_MPIC_IFEVP_VECT_MSK 0xff -#define HARRIER_MPIC_IFEDE_OFF 0x10210 - -/* - * Define the Memory Controller register offsets. - */ -#define HARRIER_SDBA_OFF 0x110 -#define HARRIER_SDBB_OFF 0x114 -#define HARRIER_SDBC_OFF 0x118 -#define HARRIER_SDBD_OFF 0x11c -#define HARRIER_SDBE_OFF 0x120 -#define HARRIER_SDBF_OFF 0x124 -#define HARRIER_SDBG_OFF 0x128 -#define HARRIER_SDBH_OFF 0x12c - -#define HARRIER_SDB_ENABLE 0x00000100 -#define HARRIER_SDB_SIZE_MASK 0xf -#define HARRIER_SDB_SIZE_SHIFT 16 -#define HARRIER_SDB_BASE_MASK 0xff -#define HARRIER_SDB_BASE_SHIFT 24 - -/* - * Define outbound register offsets. - */ -#define HARRIER_OTAD0_OFF 0x220 -#define HARRIER_OTOF0_OFF 0x224 -#define HARRIER_OTAD1_OFF 0x228 -#define HARRIER_OTOF1_OFF 0x22c -#define HARRIER_OTAD2_OFF 0x230 -#define HARRIER_OTOF2_OFF 0x234 -#define HARRIER_OTAD3_OFF 0x238 -#define HARRIER_OTOF3_OFF 0x23c - -#define HARRIER_OTADX_START_MSK 0xffff0000UL -#define HARRIER_OTADX_END_MSK 0x0000ffffUL - -#define HARRIER_OTOFX_OFF_MSK 0xffff0000UL -#define HARRIER_OTOFX_ENA 0x80UL -#define HARRIER_OTOFX_WPE 0x10UL -#define HARRIER_OTOFX_SGE 0x08UL -#define HARRIER_OTOFX_RAE 0x04UL -#define HARRIER_OTOFX_MEM 0x02UL -#define HARRIER_OTOFX_IOM 0x01UL - -/* - * Define generic message passing register offsets - */ -/* Mirrored registers (visible from both PowerPC and PCI space) */ -#define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */ -#define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */ -#define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */ -#define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */ -#define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */ - -#define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */ -#define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */ -#define HARRIER_MGID_OFF 0x18 /* inbound doorbells */ - -/* PowerPC-only registers */ -#define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */ - -/* PCI-only registers */ -#define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */ -#define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */ -#define HARRIER_MG_OMI0 (1<<4) -#define HARRIER_MG_OMI1 (1<<5) - -#define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */ - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_XCSR_TO_PCFS_OFF 0x300 - -/* - * Define message passing attribute register offset - */ -#define HARRIER_MPAT_OFF 0x44 - -/* - * Define inbound attribute register offsets. - */ -#define HARRIER_ITSZ0_OFF 0x48 -#define HARRIER_ITAT0_OFF 0x4c - -#define HARRIER_ITSZ1_OFF 0x50 -#define HARRIER_ITAT1_OFF 0x54 - -#define HARRIER_ITSZ2_OFF 0x58 -#define HARRIER_ITAT2_OFF 0x5c - -#define HARRIER_ITSZ3_OFF 0x60 -#define HARRIER_ITAT3_OFF 0x64 - -/* inbound translation size constants */ -#define HARRIER_ITSZ_MSK 0xff -#define HARRIER_ITSZ_4KB 0x00 -#define HARRIER_ITSZ_8KB 0x01 -#define HARRIER_ITSZ_16KB 0x02 -#define HARRIER_ITSZ_32KB 0x03 -#define HARRIER_ITSZ_64KB 0x04 -#define HARRIER_ITSZ_128KB 0x05 -#define HARRIER_ITSZ_256KB 0x06 -#define HARRIER_ITSZ_512KB 0x07 -#define HARRIER_ITSZ_1MB 0x08 -#define HARRIER_ITSZ_2MB 0x09 -#define HARRIER_ITSZ_4MB 0x0A -#define HARRIER_ITSZ_8MB 0x0B -#define HARRIER_ITSZ_16MB 0x0C -#define HARRIER_ITSZ_32MB 0x0D -#define HARRIER_ITSZ_64MB 0x0E -#define HARRIER_ITSZ_128MB 0x0F -#define HARRIER_ITSZ_256MB 0x10 -#define HARRIER_ITSZ_512MB 0x11 -#define HARRIER_ITSZ_1GB 0x12 -#define HARRIER_ITSZ_2GB 0x13 - -/* inbound translation offset */ -#define HARRIER_ITOF_SHIFT 0x10 -#define HARRIER_ITOF_MSK 0xffff - -/* inbound translation atttributes */ -#define HARRIER_ITAT_PRE (1<<3) -#define HARRIER_ITAT_RAE (1<<4) -#define HARRIER_ITAT_WPE (1<<5) -#define HARRIER_ITAT_MEM (1<<6) -#define HARRIER_ITAT_ENA (1<<7) -#define HARRIER_ITAT_GBL (1<<16) - -#define HARRIER_LBA_OFF 0x80 -#define HARRIER_LBA_MSK (1<<31) - -#define HARRIER_XCSR_SIZE 1024 - -/* macros to calculate message passing register offsets */ -#define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x) - -#define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x) - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0 -#define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1 -#define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2 -#define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3 -#define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4 - -#define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x) - -#endif /* __ASMPPC_HARRIER_DEFS_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 4862398e05b..bf34c5f4c05 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -39,7 +39,6 @@ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */ @@ -95,7 +94,6 @@ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index e6e9c814da6..f13255e0640 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h @@ -12,7 +12,9 @@ #ifndef __I2C_PNX_H__ #define __I2C_PNX_H__ -#include <asm/arch/i2c.h> +#include <linux/pm.h> + +struct platform_device; struct i2c_pnx_mif { int ret; /* Return value */ diff --git a/include/linux/ide.h b/include/linux/ide.h index b846bc44a27..87c12ed9695 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -219,18 +219,7 @@ static inline int __ide_default_irq(unsigned long base) #include <asm-generic/ide_iops.h> #endif -#ifndef MAX_HWIFS -#if defined(CONFIG_BLACKFIN) || defined(CONFIG_H8300) || defined(CONFIG_XTENSA) -# define MAX_HWIFS 1 -#else -# define MAX_HWIFS 10 -#endif -#endif - -#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) -#undef MAX_HWIFS -#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS -#endif +#define MAX_HWIFS 10 /* Currently only m68k, apus and m8xx need it */ #ifndef IDE_ARCH_ACK_INTR @@ -509,24 +498,33 @@ struct ide_tp_ops { extern const struct ide_tp_ops default_tp_ops; +/** + * struct ide_port_ops - IDE port operations + * + * @init_dev: host specific initialization of a device + * @set_pio_mode: routine to program host for PIO mode + * @set_dma_mode: routine to program host for DMA mode + * @selectproc: tweaks hardware to select drive + * @reset_poll: chipset polling based on hba specifics + * @pre_reset: chipset specific changes to default for device-hba resets + * @resetproc: routine to reset controller after a disk reset + * @maskproc: special host masking for drive selection + * @quirkproc: check host's drive quirk list + * + * @mdma_filter: filter MDMA modes + * @udma_filter: filter UDMA modes + * + * @cable_detect: detect cable type + */ struct ide_port_ops { - /* host specific initialization of a device */ void (*init_dev)(ide_drive_t *); - /* routine to program host for PIO mode */ void (*set_pio_mode)(ide_drive_t *, const u8); - /* routine to program host for DMA mode */ void (*set_dma_mode)(ide_drive_t *, const u8); - /* tweaks hardware to select drive */ void (*selectproc)(ide_drive_t *); - /* chipset polling based on hba specifics */ int (*reset_poll)(ide_drive_t *); - /* chipset specific changes to default for device-hba resets */ void (*pre_reset)(ide_drive_t *); - /* routine to reset controller after a disk reset */ void (*resetproc)(ide_drive_t *); - /* special host masking for drive selection */ void (*maskproc)(ide_drive_t *, int); - /* check host's drive quirk list */ void (*quirkproc)(ide_drive_t *); u8 (*mdma_filter)(ide_drive_t *); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a1630ba0b87..7f4df7c7659 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -506,6 +506,19 @@ struct ieee80211_channel_sw_ie { u8 count; } __attribute__ ((packed)); +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[0]; +} __attribute__ ((packed)); + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 2baace2788a..31d8629e75a 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -18,7 +18,7 @@ struct ihex_binrec { __be32 addr; __be16 len; uint8_t data[0]; -} __attribute__((aligned(4))); +} __attribute__((packed)); /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * diff --git a/include/linux/init.h b/include/linux/init.h index 11b84e10605..93538b696e3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 62aa4f895ab..58ff4e74b2f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -223,35 +223,6 @@ static inline int disable_irq_wake(unsigned int irq) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif -/* - * Temporary defines for UP kernels, until all code gets fixed. - */ -#ifndef CONFIG_SMP -static inline void __deprecated cli(void) -{ - local_irq_disable(); -} -static inline void __deprecated sti(void) -{ - local_irq_enable(); -} -static inline void __deprecated save_flags(unsigned long *x) -{ - local_save_flags(*x); -} -#define save_flags(x) save_flags(&x) -static inline void __deprecated restore_flags(unsigned long x) -{ - local_irq_restore(x); -} - -static inline void __deprecated save_and_cli(unsigned long *x) -{ - local_irq_save(*x); -} -#define save_and_cli(x) save_and_cli(&x) -#endif /* CONFIG_SMP */ - /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index f8598f58394..c975caf7538 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h @@ -8,4 +8,3 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long align_mask); extern void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr); -extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 2cd07cc2968..22d2115458c 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -118,6 +118,10 @@ extern int allocate_resource(struct resource *root, struct resource *new, int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(struct resource *res) +{ + return res->end - res->start + 1; +} /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h new file mode 100644 index 00000000000..ec6eb49af2d --- /dev/null +++ b/include/linux/ip_vs.h @@ -0,0 +1,245 @@ +/* + * IP Virtual Server + * data structure and functionality definitions + */ + +#ifndef _IP_VS_H +#define _IP_VS_H + +#include <linux/types.h> /* For __beXX types in userland */ + +#define IP_VS_VERSION_CODE 0x010201 +#define NVERSION(version) \ + (version >> 16) & 0xFF, \ + (version >> 8) & 0xFF, \ + version & 0xFF + +/* + * Virtual Service Flags + */ +#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ +#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ + +/* + * Destination Server Flags + */ +#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ +#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ + +/* + * IPVS sync daemon states + */ +#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ +#define IP_VS_STATE_MASTER 0x0001 /* started as master */ +#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ + +/* + * IPVS socket options + */ +#define IP_VS_BASE_CTL (64+1024+64) /* base */ + +#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ +#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) +#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) +#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) +#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) +#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) +#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) +#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) +#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) +#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) +#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) +#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) +#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) +#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) +#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) +#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO + +#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL +#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) +#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) +#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) +#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) +#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ +#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) +#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) +#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON + + +/* + * IPVS Connection Flags + */ +#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ +#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ +#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ +#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ +#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ +#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ +#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ +#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ +#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ +#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ +#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ +#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ +#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ +#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ +#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ + +#define IP_VS_SCHEDNAME_MAXLEN 16 +#define IP_VS_IFNAME_MAXLEN 16 + + +/* + * The struct ip_vs_service_user and struct ip_vs_dest_user are + * used to set IPVS rules through setsockopt. + */ +struct ip_vs_service_user { + /* virtual service addresses */ + u_int16_t protocol; + __be32 addr; /* virtual ip address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* virtual service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout in sec */ + __be32 netmask; /* persistent netmask */ +}; + + +struct ip_vs_dest_user { + /* destination server address */ + __be32 addr; + __be16 port; + + /* real server options */ + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + /* thresholds for active connections */ + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ +}; + + +/* + * IPVS statistics object (for user space) + */ +struct ip_vs_stats_user +{ + __u32 conns; /* connections scheduled */ + __u32 inpkts; /* incoming packets */ + __u32 outpkts; /* outgoing packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outbytes; /* outgoing bytes */ + + __u32 cps; /* current connection rate */ + __u32 inpps; /* current in packet rate */ + __u32 outpps; /* current out packet rate */ + __u32 inbps; /* current in byte rate */ + __u32 outbps; /* current out byte rate */ +}; + + +/* The argument to IP_VS_SO_GET_INFO */ +struct ip_vs_getinfo { + /* version number */ + unsigned int version; + + /* size of connection hash table */ + unsigned int size; + + /* number of virtual services */ + unsigned int num_services; +}; + + +/* The argument to IP_VS_SO_GET_SERVICE */ +struct ip_vs_service_entry { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout */ + __be32 netmask; /* persistent netmask */ + + /* number of real servers */ + unsigned int num_dests; + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +struct ip_vs_dest_entry { + __be32 addr; /* destination address */ + __be16 port; + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ + + u_int32_t activeconns; /* active connections */ + u_int32_t inactconns; /* inactive connections */ + u_int32_t persistconns; /* persistent connections */ + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +/* The argument to IP_VS_SO_GET_DESTS */ +struct ip_vs_get_dests { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* number of real servers */ + unsigned int num_dests; + + /* the real servers */ + struct ip_vs_dest_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_SERVICES */ +struct ip_vs_get_services { + /* number of virtual services */ + unsigned int num_services; + + /* service table */ + struct ip_vs_service_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_TIMEOUT */ +struct ip_vs_timeout_user { + int tcp_timeout; + int tcp_fin_timeout; + int udp_timeout; +}; + + +/* The argument to IP_VS_SO_GET_DAEMON */ +struct ip_vs_daemon_user { + /* sync daemon state (master/backup) */ + int state; + + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + + /* SyncID we belong to */ + int syncid; +}; + +#endif /* _IP_VS_H */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 57aefa160a9..b9614488744 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -108,8 +108,7 @@ static inline void print_fn_descriptor_symbol(const char *fmt, void *addr) static inline void print_ip_sym(unsigned long ip) { - printk("[<%p>]", (void *) ip); - print_symbol(" %s\n", ip); + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index fdbbf72ca2e..2651f805ba6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -75,6 +75,12 @@ extern const char linux_proc_banner[]; */ #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ #define KERN_CRIT "<2>" /* critical conditions */ @@ -102,6 +108,13 @@ struct completion; struct pt_regs; struct user; +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + /** * might_sleep - annotation for functions that can sleep * @@ -112,13 +125,6 @@ struct user; * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() -#else -# define might_resched() do { } while (0) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 82f88a8a827..32110cede64 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -130,8 +130,8 @@ void vmcoreinfo_append_str(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); unsigned long paddr_vmcoreinfo_note(void); -#define VMCOREINFO_OSRELEASE(name) \ - vmcoreinfo_append_str("OSRELEASE=%s\n", #name) +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 0ea064cbfbc..69511f74f91 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -371,6 +371,7 @@ struct kvm_trace_rec { #define KVM_CAP_PV_MMU 13 #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 +#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ /* * ioctls for VM fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 07d68a8ae8e..8525afc5310 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -121,6 +121,12 @@ struct kvm { struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; #endif + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif }; /* The guest did something we don't support. */ @@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) #define kvm_trace_cleanup() ((void)0) #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return 1; + /* + * Both reads happen under the mmu_lock and both values are + * modified under mmu_lock, so there's no need of smb_rmb() + * here in between, otherwise mmu_notifier_count should be + * read before mmu_notifier_seq, see + * mmu_notifier_invalidate_range_end write side. + */ + if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 5b247b8a6b3..06b80337303 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -60,9 +60,9 @@ /* note: prints function name for you */ #ifdef ATA_DEBUG -#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #ifdef ATA_VERBOSE_DEBUG -#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define VPRINTK(fmt, args...) #endif /* ATA_VERBOSE_DEBUG */ @@ -71,7 +71,7 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ -#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) /* NEW: debug levels */ #define HAVE_LIBATA_MSG 1 @@ -750,6 +750,7 @@ struct ata_port_operations { void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); + unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); void (*dev_config)(struct ata_device *dev); @@ -951,6 +952,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); +extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, diff --git a/include/linux/list.h b/include/linux/list.h index 453916bc041..db35ef02e74 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -214,22 +214,62 @@ static inline int list_is_singular(const struct list_head *head) return !list_empty(head) && (head->next == head->prev); } +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + static inline void __list_splice(const struct list_head *list, - struct list_head *head) + struct list_head *prev, + struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - first->prev = head; - head->next = first; + first->prev = prev; + prev->next = first; - last->next = at; - at->prev = last; + last->next = next; + next->prev = last; } /** - * list_splice - join two lists + * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ @@ -237,7 +277,19 @@ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) - __list_splice(list, head); + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); } /** @@ -251,7 +303,24 @@ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { - __list_splice(list, head); + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf..331e5f1c2d8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -89,6 +89,7 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; + unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: @@ -189,6 +190,14 @@ struct lock_chain { u64 chain_key; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -205,14 +214,14 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; - struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - + struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -226,11 +235,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - int irq_context; - int trylock; - int read; - int check; - int hardirqs_off; + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; }; /* @@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip); + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -313,8 +326,9 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) @@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# else +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# endif +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define lock_map_acquire(l) do { } while (0) +# define lock_map_release(l) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 5c948f33781..8f2d60da04e 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -37,7 +37,7 @@ */ #define MISDN_MAJOR_VERSION 1 #define MISDN_MINOR_VERSION 0 -#define MISDN_RELEASE 18 +#define MISDN_RELEASE 19 /* primitives for information exchange * generell format @@ -242,7 +242,8 @@ struct mISDNhead { #define TEI_SAPI 63 #define CTRL_SAPI 0 -#define MISDN_CHMAP_SIZE 4 +#define MISDN_MAX_CHANNEL 127 +#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) #define SOL_MISDN 0 @@ -275,11 +276,32 @@ struct mISDN_devinfo { u_int Dprotocols; u_int Bprotocols; u_int protocol; - u_long channelmap[MISDN_CHMAP_SIZE]; + u_char channelmap[MISDN_CHMAP_SIZE]; u_int nrbchan; char name[MISDN_MAX_IDLEN]; }; +static inline int +test_channelmap(u_int nr, u_char *map) +{ + if (nr <= MISDN_MAX_CHANNEL) + return map[nr >> 3] & (1 << (nr & 7)); + else + return 0; +} + +static inline void +set_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] |= (1 << (nr & 7)); +} + +static inline void +clear_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] &= ~(1 << (nr & 7)); +} + /* CONTROL_CHANNEL parameters */ #define MISDN_CTRL_GETOP 0x0000 #define MISDN_CTRL_LOOP 0x0001 @@ -405,7 +427,7 @@ struct mISDNdevice { u_int Dprotocols; u_int Bprotocols; u_int nrbchan; - u_long channelmap[MISDN_CHMAP_SIZE]; + u_char channelmap[MISDN_CHMAP_SIZE]; struct list_head bchannels; struct mISDNchannel *teimgr; struct device dev; @@ -430,7 +452,7 @@ struct mISDNstack { #endif }; -/* global alloc/queue dunctions */ +/* global alloc/queue functions */ static inline struct sk_buff * mI_alloc_skb(unsigned int len, gfp_t gfp_mask) diff --git a/include/linux/maple.h b/include/linux/maple.h index 523a286bb47..c23d3f51ba4 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -2,6 +2,7 @@ #define __LINUX_MAPLE_H #include <linux/device.h> +#include <mach/maple.h> extern struct bus_type maple_bus_type; @@ -33,6 +34,7 @@ struct mapleq { void *sendbuf, *recvbuf, *recvbufdcsp; unsigned char length; enum maple_code command; + struct mutex mutex; }; struct maple_devinfo { @@ -49,7 +51,6 @@ struct maple_devinfo { struct maple_device { struct maple_driver *driver; struct mapleq *mq; - void *private_data; void (*callback) (struct mapleq * mq); unsigned long when, interval, function; struct maple_devinfo devinfo; @@ -68,10 +69,17 @@ void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq * mq), unsigned long interval, unsigned long function); -int maple_driver_register(struct device_driver *drv); -void maple_add_packet(struct mapleq *mq); +int maple_driver_register(struct maple_driver *); +void maple_driver_unregister(struct maple_driver *); + +int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, + u32 command, u32 length, void *data); +void maple_clear_dev(struct maple_device *mdev); #define to_maple_dev(n) container_of(n, struct maple_device, dev) #define to_maple_driver(n) container_of(n, struct maple_driver, drv) +#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + #endif /* __LINUX_MAPLE_H */ diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 00000000000..e83c7f2036f --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,36 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include <linux/mfd/core.h> +#include <linux/mfd/tmio.h> + +struct t7l66xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 00000000000..fa06e0610b8 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,23 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index 7cc824a58f7..fec7b3f7a81 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -14,8 +14,8 @@ * published by the Free Software Foundation. */ -#ifndef TC6393XB_H -#define TC6393XB_H +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H /* Also one should provide the CK3P6MI clock */ struct tc6393xb_platform_data { @@ -29,7 +29,7 @@ struct tc6393xb_platform_data { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - int irq_base; /* a base for cascaded irq */ + int irq_base; /* base for subdevice irqs */ int gpio_base; struct tmio_nand_data *nand_data; @@ -40,9 +40,6 @@ struct tc6393xb_platform_data { */ #define IRQ_TC6393_NAND 0 #define IRQ_TC6393_MMC 1 -#define IRQ_TC6393_OHCI 2 -#define IRQ_TC6393_SERIAL 3 -#define IRQ_TC6393_FB 4 #define TC6393XB_NR_IRQS 8 diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 9438d8c9ac1..ec612e66391 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -1,6 +1,21 @@ #ifndef MFD_TMIO_H #define MFD_TMIO_H +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + /* * data for the NAND controller */ @@ -10,8 +25,4 @@ struct tmio_nand_data { unsigned int num_partitions; }; -#define TMIO_NAND_CONFIG "tmio-nand-config" -#define TMIO_NAND_CONTROL "tmio-nand-control" -#define TMIO_NAND_IRQ "tmio-nand" - #endif diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 071cf96cf01..6f65b2c8bb8 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -39,17 +39,18 @@ #include <linux/mlx4/doorbell.h> struct mlx4_cqe { - __be32 my_qpn; + __be32 vlan_my_qpn; __be32 immed_rss_invalid; __be32 g_mlpath_rqpn; - u8 sl; - u8 reserved1; + __be16 sl_vid; __be16 rlid; - __be32 ipoib_status; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; __be32 byte_cnt; __be16 wqe_index; __be16 checksum; - u8 reserved2[3]; + u8 reserved[3]; u8 owner_sr_opcode; }; @@ -64,6 +65,11 @@ struct mlx4_err_cqe { }; enum { + MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_QPN_MASK = 0xffffff, +}; + +enum { MLX4_CQE_OWNER_MASK = 0x80, MLX4_CQE_IS_SEND_MASK = 0x40, MLX4_CQE_OPCODE_MASK = 0x1f @@ -86,13 +92,19 @@ enum { }; enum { - MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, - MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, - MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, - MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, - MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, - MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, - MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, + MLX4_CQE_STATUS_IPV4 = 1 << 6, + MLX4_CQE_STATUS_IPV4F = 1 << 7, + MLX4_CQE_STATUS_IPV6 = 1 << 8, + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, + MLX4_CQE_STATUS_TCP = 1 << 10, + MLX4_CQE_STATUS_UDP = 1 << 11, + MLX4_CQE_STATUS_IPOK = 1 << 12, +}; + +enum { + MLX4_CQE_LLC = 1, + MLX4_CQE_SNAP = 1 << 1, + MLX4_CQE_BAD_FCS = 1 << 4, }; static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, diff --git a/include/linux/mm.h b/include/linux/mm.h index 866a3dbe5c7..fa651609b65 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -744,6 +744,8 @@ struct zap_details { struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, @@ -832,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST /* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm (force=0 and doesn't return any vmas). @@ -846,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); -#else -/* - * Should probably be moved to asm-generic, and architectures can include it if - * they don't implement their own get_user_pages_fast. - */ -#define get_user_pages_fast(start, nr_pages, write, pages) \ -({ \ - struct mm_struct *mm = current->mm; \ - int ret; \ - \ - down_read(&mm->mmap_sem); \ - ret = get_user_pages(current, mm, start, nr_pages, \ - write, 0, pages, NULL); \ - up_read(&mm->mmap_sem); \ - \ - ret; \ -}) -#endif - /* * A callback you can register to apply pressure to ageable caches. * @@ -1041,7 +1023,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); diff --git a/include/linux/mount.h b/include/linux/mount.h index b5efaa2132a..30a1d63b6fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -105,7 +105,8 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, struct nameidata; -extern int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, +struct path; +extern int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags, struct list_head *fslist); extern void mark_mounts_for_expiry(struct list_head *mounts); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 4ed40caff4e..92263654855 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -272,7 +272,11 @@ static inline void mtd_erase_callback(struct erase_info *instr) printk(KERN_INFO args); \ } while(0) #else /* CONFIG_MTD_DEBUG */ -#define DEBUG(n, args...) do { } while(0) +#define DEBUG(n, args...) \ + do { \ + if (0) \ + printk(KERN_INFO args); \ + } while(0) #endif /* CONFIG_MTD_DEBUG */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 83f678702df..81774e5facf 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -177,7 +177,9 @@ typedef enum { #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) -#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT)) +/* Large page NAND with SOFT_ECC should support subpage reads */ +#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ + && (chip->page_shift > 9)) /* Mask to zero out the chip options, which come from the id table */ #define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b4d056ceab9..488c56e649b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -61,9 +61,7 @@ struct wireless_dev; #define NET_XMIT_DROP 1 /* skb dropped */ #define NET_XMIT_CN 2 /* congestion notification */ #define NET_XMIT_POLICED 3 /* skb is shot by police */ -#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; - (TC use only - dev_queue_xmit - returns this as NET_XMIT_SUCCESS) */ +#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ @@ -440,6 +438,7 @@ static inline void napi_synchronize(const struct napi_struct *n) enum netdev_queue_state_t { __QUEUE_STATE_XOFF, + __QUEUE_STATE_FROZEN, }; struct netdev_queue { @@ -636,7 +635,7 @@ struct net_device unsigned int real_num_tx_queues; unsigned long tx_queue_len; /* Max frames per queue allowed */ - + spinlock_t tx_global_lock; /* * One part is mostly used on xmit path (device) */ @@ -1099,6 +1098,11 @@ static inline int netif_queue_stopped(const struct net_device *dev) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } +static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); +} + /** * netif_running - test if up * @dev: network device @@ -1475,6 +1479,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) txq->xmit_lock_owner = smp_processor_id(); } +static inline int __netif_tx_trylock(struct netdev_queue *txq) +{ + int ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) + txq->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -1484,12 +1508,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) */ static inline void netif_tx_lock(struct net_device *dev) { - int cpu = smp_processor_id(); unsigned int i; + int cpu; + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); } } @@ -1499,40 +1534,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev) netif_tx_lock(dev); } -static inline int __netif_tx_trylock(struct netdev_queue *txq) -{ - int ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); - return ok; -} - -static inline int netif_tx_trylock(struct net_device *dev) -{ - return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); -} - -static inline void __netif_tx_unlock(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock(&txq->_xmit_lock); -} - -static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock_bh(&txq->_xmit_lock); -} - static inline void netif_tx_unlock(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_unlock(txq); - } + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); + } + spin_unlock(&dev->tx_global_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) @@ -1556,13 +1573,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; + int cpu; - netif_tx_lock_bh(dev); + local_bh_disable(); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); } - netif_tx_unlock_bh(dev); + local_bh_enable(); } static inline void netif_addr_lock(struct net_device *dev) diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 22ce29995f1..a049df4f223 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -30,6 +30,9 @@ enum tcp_conntrack { /* Be liberal in window checking */ #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 +/* Has unacknowledged data */ +#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 + struct nf_ct_tcp_flags { u_int8_t flags; u_int8_t mask; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 54590a9a103..c74d3e87531 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ struct page; /* forward declaration */ -PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) @@ -239,9 +239,6 @@ static inline void __SetPageUptodate(struct page *page) { smp_wmb(); __set_bit(PG_uptodate, &(page)->flags); -#ifdef CONFIG_S390 - page_clear_dirty(page); -#endif } static inline void SetPageUptodate(struct page *page) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a39b38ccdc9..5da31c12101 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -143,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page) return 1; } +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON(page_count(page) == 0); + atomic_add(count, &page->_count); + +#else + if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + return 0; +#endif + VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + + return 1; +} + static inline int page_freeze_refs(struct page *page, int count) { return likely(atomic_cmpxchg(&page->_count, count, 0) == count); @@ -227,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -extern void remove_from_page_cache(struct page *page); -extern void __remove_from_page_cache(struct page *page); - -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - ClearPageLocked(page); - return error; -} - /* * Return byte-offset into filesystem object for page. */ @@ -271,13 +271,28 @@ extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); +static inline void set_page_locked(struct page *page) +{ + set_bit(PG_locked, &page->flags); +} + +static inline void clear_page_locked(struct page *page) +{ + clear_bit(PG_locked, &page->flags); +} + +static inline int trylock_page(struct page *page) +{ + return !test_and_set_bit(PG_locked, &page->flags); +} + /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page(page); } @@ -289,7 +304,7 @@ static inline void lock_page(struct page *page) static inline int lock_page_killable(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) return __lock_page_killable(page); return 0; } @@ -301,7 +316,7 @@ static inline int lock_page_killable(struct page *page) static inline void lock_page_nosync(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page_nosync(page); } @@ -386,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return ret; } +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void remove_from_page_cache(struct page *page); +extern void __remove_from_page_cache(struct page *page); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run set_page_locked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + set_page_locked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + clear_page_locked(page); + return error; +} + #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/parser.h b/include/linux/parser.h index cc554ca8bc7..7dcd0507575 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -14,7 +14,7 @@ struct match_token { const char *pattern; }; -typedef const struct match_token match_table_t[]; +typedef struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; diff --git a/include/linux/pci.h b/include/linux/pci.h index 825be3878f6..c0e14008a3c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -641,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); @@ -680,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus); /* Proper probing supporting hot-pluggable devices */ int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -static inline int __must_check pci_register_driver(struct pci_driver *driver) -{ - return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); -} + +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) void pci_unregister_driver(struct pci_driver *dev); void pci_remove_behind_bridge(struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 35a78415acc..9ec2bcce8e8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2177,8 +2177,6 @@ #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 -#define PCI_VENDOR_ID_RDC 0x17f3 - #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 2e4e97bd19f..d74f75ed1e4 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -1,6 +1,6 @@ /* interface for the pm_qos_power infrastructure of the linux kernel. * - * Mark Gross + * Mark Gross <mgross@linux.intel.com> */ #include <linux/list.h> #include <linux/notifier.h> diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 68ed19ccf1f..ea96ead1d39 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -78,6 +78,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index fd31756e1a0..ea7416c901d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -172,7 +172,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) child->ptrace = 0; if (unlikely(ptrace)) { child->ptrace = current->ptrace; - __ptrace_link(child, current->parent); + ptrace_link(child, current->parent); } } diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 742187f7a05..ca6b9b5c8d5 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -43,6 +43,8 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path, int remount); +int vfs_quota_on_path(struct super_block *sb, int type, int format_id, + struct path *path); int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int vfs_quota_off(struct super_block *sb, int type, int remount); diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 9f2549ac0e2..c200b9a34af 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -128,6 +128,7 @@ struct mddev_s #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ +#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */ int ro; diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcf..4ab84362272 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h new file mode 100644 index 00000000000..e84b0a9feda --- /dev/null +++ b/include/linux/regulator/bq24022.h @@ -0,0 +1,21 @@ +/* + * Support for TI bq24022 (bqTINY-II) Dual Input (USB/AC Adpater) + * 1-Cell Li-Ion Charger connected via GPIOs. + * + * Copyright (c) 2008 Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * bq24022_mach_info - platform data for bq24022 + * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging + * @gpio_iset2: GPIO line connected to the ISET2 pin, used to limit charging current to 100 mA / 500 mA + */ +struct bq24022_mach_info { + int gpio_nce; + int gpio_iset2; +}; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h new file mode 100644 index 00000000000..afdc4558bb9 --- /dev/null +++ b/include/linux/regulator/consumer.h @@ -0,0 +1,284 @@ +/* + * consumer.h -- SoC Regulator consumer support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Consumer Interface. + * + * A Power Management Regulator framework for SoC based devices. + * Features:- + * o Voltage and current level control. + * o Operating mode control. + * o Regulator status. + * o sysfs entries for showing client devices and status + * + * EXPERIMENTAL FEATURES: + * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators + * to use most efficient operating mode depending upon voltage and load and + * is transparent to client drivers. + * + * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during + * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when + * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA + * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% + * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate + * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * + */ + +#ifndef __LINUX_REGULATOR_CONSUMER_H_ +#define __LINUX_REGULATOR_CONSUMER_H_ + +/* + * Regulator operating modes. + * + * Regulators can run in a variety of different operating modes depending on + * output load. This allows further system power savings by selecting the + * best (and most efficient) regulator mode for a desired load. + * + * Most drivers will only care about NORMAL. The modes below are generic and + * will probably not match the naming convention of your regulator data sheet + * but should match the use cases in the datasheet. + * + * In order of power efficiency (least efficient at top). + * + * Mode Description + * FAST Regulator can handle fast changes in it's load. + * e.g. useful in CPU voltage & frequency scaling where + * load can quickly increase with CPU frequency increases. + * + * NORMAL Normal regulator power supply mode. Most drivers will + * use this mode. + * + * IDLE Regulator runs in a more efficient mode for light + * loads. Can be used for devices that have a low power + * requirement during periods of inactivity. This mode + * may be more noisy than NORMAL and may not be able + * to handle fast load switching. + * + * STANDBY Regulator runs in the most efficient mode for very + * light loads. Can be used by devices when they are + * in a sleep/standby state. This mode is likely to be + * the most noisy and may not be able to handle fast load + * switching. + * + * NOTE: Most regulators will only support a subset of these modes. Some + * will only just support NORMAL. + * + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + +#define REGULATOR_MODE_FAST 0x1 +#define REGULATOR_MODE_NORMAL 0x2 +#define REGULATOR_MODE_IDLE 0x4 +#define REGULATOR_MODE_STANDBY 0x8 + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator shut down by software. + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 + +struct regulator; + +/** + * struct regulator_bulk_data - Data used for bulk regulator operations. + * + * @supply The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer The regulator consumer for the supply. This will be managed + * by the bulk API. + * + * The regulator APIs provide a series of regulator_bulk_() API calls as + * a convenience to consumers which require multiple supplies. This + * structure is used to manage data for these calls. + */ +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; +}; + +#if defined(CONFIG_REGULATOR) + +/* regulator get and put */ +struct regulator *__must_check regulator_get(struct device *dev, + const char *id); +void regulator_put(struct regulator *regulator); + +/* regulator output control and status */ +int regulator_enable(struct regulator *regulator); +int regulator_disable(struct regulator *regulator); +int regulator_force_disable(struct regulator *regulator); +int regulator_is_enabled(struct regulator *regulator); + +int regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers); +void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers); + +int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); +int regulator_get_voltage(struct regulator *regulator); +int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA); +int regulator_get_current_limit(struct regulator *regulator); + +int regulator_set_mode(struct regulator *regulator, unsigned int mode); +unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); + +/* regulator notifier block */ +int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); + +/* driver data - core doesn't touch */ +void *regulator_get_drvdata(struct regulator *regulator); +void regulator_set_drvdata(struct regulator *regulator, void *data); + +#else + +/* + * Make sure client drivers will still build on systems with no software + * controllable voltage or current regulators. + */ +static inline struct regulator *__must_check regulator_get(struct device *dev, + const char *id) +{ + /* Nothing except the stubbed out regulator API should be + * looking at the value except to check if it is an error + * value so the actual return value doesn't matter. + */ + return (struct regulator *)id; +} +static inline void regulator_put(struct regulator *regulator) +{ +} + +static inline int regulator_enable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_is_enabled(struct regulator *regulator) +{ + return 1; +} + +static inline int regulator_bulk_get(struct device *dev, + int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers) +{ +} + +static inline int regulator_set_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_get_voltage(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA) +{ + return 0; +} + +static inline int regulator_get_current_limit(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_mode(struct regulator *regulator, + unsigned int mode) +{ + return 0; +} + +static inline unsigned int regulator_get_mode(struct regulator *regulator) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_set_optimum_mode(struct regulator *regulator, + int load_uA) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline void *regulator_get_drvdata(struct regulator *regulator) +{ + return NULL; +} + +static inline void regulator_set_drvdata(struct regulator *regulator, + void *data) +{ +} + +#endif + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h new file mode 100644 index 00000000000..1d712c7172a --- /dev/null +++ b/include/linux/regulator/driver.h @@ -0,0 +1,99 @@ +/* + * driver.h -- SoC Regulator driver support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Driver Interface. + */ + +#ifndef __LINUX_REGULATOR_DRIVER_H_ +#define __LINUX_REGULATOR_DRIVER_H_ + +#include <linux/device.h> +#include <linux/regulator/consumer.h> + +struct regulator_constraints; +struct regulator_dev; + +/** + * struct regulator_ops - regulator operations. + * + * This struct describes regulator operations. + */ +struct regulator_ops { + + /* get/set regulator voltage */ + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); + int (*get_voltage) (struct regulator_dev *); + + /* get/set regulator current */ + int (*set_current_limit) (struct regulator_dev *, + int min_uA, int max_uA); + int (*get_current_limit) (struct regulator_dev *); + + /* enable/disable regulator */ + int (*enable) (struct regulator_dev *); + int (*disable) (struct regulator_dev *); + int (*is_enabled) (struct regulator_dev *); + + /* get/set regulator operating mode (defined in regulator.h) */ + int (*set_mode) (struct regulator_dev *, unsigned int mode); + unsigned int (*get_mode) (struct regulator_dev *); + + /* get most efficient regulator operating mode for load */ + unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, + int output_uV, int load_uA); + + /* the operations below are for configuration of regulator state when + * it's parent PMIC enters a global STANBY/HIBERNATE state */ + + /* set regulator suspend voltage */ + int (*set_suspend_voltage) (struct regulator_dev *, int uV); + + /* enable/disable regulator in suspend state */ + int (*set_suspend_enable) (struct regulator_dev *); + int (*set_suspend_disable) (struct regulator_dev *); + + /* set regulator suspend operating mode (defined in regulator.h) */ + int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); +}; + +/* + * Regulators can either control voltage or current. + */ +enum regulator_type { + REGULATOR_VOLTAGE, + REGULATOR_CURRENT, +}; + +/** + * struct regulator_desc - Regulator descriptor + * + */ +struct regulator_desc { + const char *name; + int id; + struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; +}; + + +struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, + void *reg_data); +void regulator_unregister(struct regulator_dev *rdev); + +int regulator_notifier_call_chain(struct regulator_dev *rdev, + unsigned long event, void *data); + +void *rdev_get_drvdata(struct regulator_dev *rdev); +int rdev_get_id(struct regulator_dev *rdev); + +#endif diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h new file mode 100644 index 00000000000..1387a5d2190 --- /dev/null +++ b/include/linux/regulator/fixed.h @@ -0,0 +1,22 @@ +/* + * fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_FIXED_H +#define __REGULATOR_FIXED_H + +struct fixed_voltage_config { + const char *supply_name; + int microvolts; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h new file mode 100644 index 00000000000..11e737dbfcf --- /dev/null +++ b/include/linux/regulator/machine.h @@ -0,0 +1,104 @@ +/* + * machine.h -- SoC Regulator support, machine/board driver API. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Machine/Board Interface. + */ + +#ifndef __LINUX_REGULATOR_MACHINE_H_ +#define __LINUX_REGULATOR_MACHINE_H_ + +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> + +struct regulator; + +/* + * Regulator operation constraint flags. These flags are used to enable + * certain regulator operations and can be OR'ed together. + * + * VOLTAGE: Regulator output voltage can be changed by software on this + * board/machine. + * CURRENT: Regulator output current can be changed by software on this + * board/machine. + * MODE: Regulator operating mode can be changed by software on this + * board/machine. + * STATUS: Regulator can be enabled and disabled. + * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + */ + +#define REGULATOR_CHANGE_VOLTAGE 0x1 +#define REGULATOR_CHANGE_CURRENT 0x2 +#define REGULATOR_CHANGE_MODE 0x4 +#define REGULATOR_CHANGE_STATUS 0x8 +#define REGULATOR_CHANGE_DRMS 0x10 + +/** + * struct regulator_state - regulator state during low power syatem states + * + * This describes a regulators state during a system wide low power state. + */ +struct regulator_state { + int uV; /* suspend voltage */ + unsigned int mode; /* suspend regulator operating mode */ + int enabled; /* is regulator enabled in this suspend state */ +}; + +/** + * struct regulation_constraints - regulator operating constraints. + * + * This struct describes regulator and board/machine specific constraints. + */ +struct regulation_constraints { + + char *name; + + /* voltage output range (inclusive) - for voltage control */ + int min_uV; + int max_uV; + + /* current output range (inclusive) - for current control */ + int min_uA; + int max_uA; + + /* valid regulator operating modes for this machine */ + unsigned int valid_modes_mask; + + /* valid operations for regulator on this machine */ + unsigned int valid_ops_mask; + + /* regulator input voltage - only if supply is another regulator */ + int input_uV; + + /* regulator suspend states for global PMIC STANDBY/HIBERNATE */ + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; /* suspend state to set at init */ + + /* constriant flags */ + unsigned always_on:1; /* regulator never off when system is on */ + unsigned boot_on:1; /* bootloader/firmware enabled regulator */ + unsigned apply_uV:1; /* apply uV constraint iff min == max */ +}; + +int regulator_set_supply(const char *regulator, const char *regulator_supply); + +const char *regulator_get_supply(const char *regulator); + +int regulator_set_machine_constraints(const char *regulator, + struct regulation_constraints *constraints); + +int regulator_set_device_supply(const char *regulator, struct device *dev, + const char *supply); + +int regulator_suspend_prepare(suspend_state_t state); + +#endif diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index c5f6e54ec6a..741d1a62cc3 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -68,7 +68,8 @@ enum rfkill_state { * @user_claim_unsupported: Whether the hardware supports exclusive * RF-kill control by userspace. Set this before registering. * @user_claim: Set when the switch is controlled exlusively by userspace. - * @mutex: Guards switch state transitions + * @mutex: Guards switch state transitions. It serializes callbacks + * and also protects the state. * @data: Pointer to the RF button drivers private data which will be * passed along when toggling radio state. * @toggle_radio(): Mandatory handler to control state of the radio. @@ -89,12 +90,13 @@ struct rfkill { const char *name; enum rfkill_type type; - enum rfkill_state state; bool user_claim_unsupported; bool user_claim; + /* the mutex serializes callbacks and also protects + * the state */ struct mutex mutex; - + enum rfkill_state state; void *data; int (*toggle_radio)(void *data, enum rfkill_state state); int (*get_state)(void *data, enum rfkill_state *state); diff --git a/include/linux/sched.h b/include/linux/sched.h index 5270d449ff9..cfb0d87b99f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -87,6 +87,7 @@ struct sched_param { #include <linux/task_io_accounting.h> #include <linux/kobject.h> #include <linux/latencytop.h> +#include <linux/cred.h> #include <asm/processor.h> @@ -1551,16 +1552,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } @@ -1572,28 +1567,11 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } - -#ifdef CONFIG_NO_HZ -static inline void sched_clock_tick_stop(int cpu) -{ -} - -static inline void sched_clock_tick_start(int cpu) -{ -} -#endif - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); +#else extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#ifdef CONFIG_NO_HZ -extern void sched_clock_tick_stop(int cpu); -extern void sched_clock_tick_start(int cpu); #endif -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a66304a0995..a1783b229ef 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,8 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/mutex.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> struct seq_operations; struct file; @@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +{ + return seq_bitmap(m, mask->bits, NR_CPUS); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ea44f6621f..358661c9990 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -243,6 +243,7 @@ typedef unsigned char *sk_buff_data_t; * @tc_index: Traffic control index * @tc_verd: traffic control verdict * @ndisc_nodetype: router type (from link layer) + * @do_not_encrypt: set to prevent encryption of this frame * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -316,7 +317,10 @@ struct sk_buff { #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - /* 14 bit hole */ +#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) + __u8 do_not_encrypt:1; +#endif + /* 0/13/14 bit hole */ #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; @@ -897,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len-skb_headlen(skb))) + !__pskb_pull_tail(skb, len - skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -914,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } /** @@ -1317,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len-size); + return skb_pad(skb, len - size); } static inline int skb_add_data(struct sk_buff *skb, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5bad61a93f6..2f5c16b1aac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,6 +46,7 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 5df62ef1280..7a6e6bba4a7 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -214,6 +214,8 @@ enum LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ + LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ + LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ __LINUX_MIB_MAX }; diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h new file mode 100644 index 00000000000..b4d9fa6f797 --- /dev/null +++ b/include/linux/spi/orion_spi.h @@ -0,0 +1,17 @@ +/* + * orion_spi.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_SPI_ORION_SPI_H +#define __LINUX_SPI_ORION_SPI_H + +struct orion_spi_info { + u32 tclk; /* no <linux/clk.h> support yet */ +}; + + +#endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61e5610ad16..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -183,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce729..d79845d034b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000000..270d5c208a8 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,309 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/byteorder.h> + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define __const_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define __const_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define __const_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define __const_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define __const_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 ___swab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#elif defined(__arch_swab16p) + return __arch_swab16p(&val); +#else + return __const_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 ___swab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#elif defined(__arch_swab32p) + return __arch_swab32p(&val); +#else + return __const_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 ___swab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__arch_swab64p) + return __arch_swab64p(&val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); +#else + return __const_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#elif defined(__arch_swahw32p) + return __arch_swahw32p(&val); +#else + return __const_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#elif defined(__arch_swahb32p) + return __arch_swahb32p(&val); +#else + return __const_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + __const_swab16((x)) : \ + ___swab16((x))) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swab32((x)) : \ + ___swab32((x))) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + __const_swab64((x)) : \ + ___swab64((x))) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahw32((x)) : \ + ___swahw32((x))) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahb32((x)) : \ + ___swahb32((x))) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b1875582c1a..b48d8196957 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -280,7 +280,7 @@ static inline void tracehook_report_clone(int trace, struct pt_regs *regs, unsigned long clone_flags, pid_t pid, struct task_struct *child) { - if (unlikely(trace)) { + if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { /* * The child starts up with an immediate SIGSTOP. */ @@ -487,14 +487,20 @@ static inline int tracehook_notify_jctl(int notify, int why) return notify || (current->ptrace & PT_PTRACED); } +#define DEATH_REAP -1 +#define DEATH_DELAYED_GROUP_LEADER -2 + /** * tracehook_notify_death - task is dead, ready to notify parent * @task: @current task now exiting * @death_cookie: value to pass to tracehook_report_death() * @group_dead: nonzero if this was the last thread in the group to die * - * Return the signal number to send our parent with do_notify_parent(), or - * zero to send no signal and leave a zombie, or -1 to self-reap right now. + * A return value >= 0 means call do_notify_parent() with that signal + * number. Negative return value can be %DEATH_REAP to self-reap right + * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our + * parent. Note that a return value of 0 means a do_notify_parent() call + * that sends no signal, but still wakes up a parent blocked in wait*(). * * Called with write_lock_irq(&tasklist_lock) held. */ @@ -502,7 +508,7 @@ static inline int tracehook_notify_death(struct task_struct *task, void **death_cookie, int group_dead) { if (task->exit_signal == -1) - return task->ptrace ? SIGCHLD : -1; + return task->ptrace ? SIGCHLD : DEATH_REAP; /* * If something other than our normal parent is ptracing us, then @@ -512,21 +518,21 @@ static inline int tracehook_notify_death(struct task_struct *task, if (thread_group_empty(task) && !ptrace_reparented(task)) return task->exit_signal; - return task->ptrace ? SIGCHLD : 0; + return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; } /** * tracehook_report_death - task is dead and ready to be reaped * @task: @current task now exiting - * @signal: signal number sent to parent, or 0 or -1 + * @signal: return value from tracheook_notify_death() * @death_cookie: value passed back from tracehook_notify_death() * @group_dead: nonzero if this was the last thread in the group to die * * Thread has just become a zombie or is about to self-reap. If positive, * @signal is the signal number just sent to the parent (usually %SIGCHLD). - * If @signal is -1, this thread will self-reap. If @signal is 0, this is - * a delayed_group_leader() zombie. The @death_cookie was passed back by - * tracehook_notify_death(). + * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is + * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. + * The @death_cookie was passed back by tracehook_notify_death(). * * If normal reaping is not inhibited, @task->exit_state might be changing * in parallel. diff --git a/include/linux/usb.h b/include/linux/usb.h index 5811c5da69f..0924cd9c30f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -110,6 +110,8 @@ enum usb_interface_condition { * @sysfs_files_created: sysfs attributes exist * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. + * @needs_binding: flag set when the driver should be re-probed or unbound + * following a reset or suspend operation it doesn't support. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 00000000000..630962c04ca --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,98 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb_hdrc". It encapsulates + * key configuration differences between boards. + */ + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ + unsigned soft_con:1; /* soft connect required */ + unsigned utm_16:1; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1; /* supports DMA */ + unsigned vendor_req:1; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl; /* vendor control reg width */ + u8 vendor_stat; /* vendor status reg witdh */ + u8 dma_req_chan; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits; +}; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* Turn device clock on or off */ + int (*set_clock)(struct clk *clock, int is_on); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2 + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 09a3e6a7518..655341d0f53 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,8 @@ #include <linux/mutex.h> #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ -#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 8 diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 14c0e91be9b..1c78d56c57e 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -74,7 +74,7 @@ void con_protect_unimap(struct vc_data *vc, int rdonly); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - (vc)->vc_toggle_meta ? 0x80 : 0]) + ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else #define con_set_trans_old(arg) (0) #define con_get_trans_old(arg) (-EINVAL) @@ -86,6 +86,7 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define con_copy_unimap(d, s) (0) #define con_get_unimap(vc, ct, uct, list) (-EINVAL) #define con_free_unimap(vc) do { ; } while (0) +#define con_protect_unimap(vc, rdonly) do { ; } while (0) #define vc_translate(vc, c) (c) #endif |