diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/dma-mapping-broken.h | 4 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 25 | ||||
-rw-r--r-- | include/asm-generic/memory_model.h | 2 | ||||
-rw-r--r-- | include/asm-generic/mman.h | 1 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 13 |
5 files changed, 42 insertions, 3 deletions
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index a7f1a55ce6b..29413d3d460 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -3,7 +3,6 @@ /* This is used for archs that do not support DMA */ - static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) @@ -19,4 +18,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, BUG(); } +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h new file mode 100644 index 00000000000..2d0aab1d861 --- /dev/null +++ b/include/asm-generic/gpio.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_GPIO_H +#define _ASM_GENERIC_GPIO_H + +/* platforms that don't directly support access to GPIOs through I2C, SPI, + * or other blocking infrastructure can use these wrappers. + */ + +static inline int gpio_cansleep(unsigned gpio) +{ + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + might_sleep(); + return gpio_get_value(gpio); +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + might_sleep(); + gpio_set_value(gpio, value); +} + +#endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 8078cbd2c01..30d8d33491d 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -54,7 +54,7 @@ #define __page_to_pfn(pg) \ ({ struct page *__pg = (pg); \ int __sec = page_to_section(__pg); \ - __pg - __section_mem_map_addr(__nr_to_section(__sec)); \ + (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ }) #define __pfn_to_page(pfn) \ diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h index 3b41d2bb70d..5e3dde2ee5a 100644 --- a/include/asm-generic/mman.h +++ b/include/asm-generic/mman.h @@ -36,7 +36,6 @@ #define MADV_DOFORK 11 /* do inherit across fork */ /* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS #define MAP_FILE 0 #endif diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 9d774d07d95..00c23433b39 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -183,6 +183,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #endif /* + * A facility to provide batching of the reload of page tables with the + * actual context switch code for paravirtualized guests. By convention, + * only one of the lazy modes (CPU, MMU) should be active at any given + * time, entry should never be nested, and entry and exits should always + * be paired. This is for sanity of maintaining and reasoning about the + * kernel code. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE +#define arch_enter_lazy_cpu_mode() do {} while (0) +#define arch_leave_lazy_cpu_mode() do {} while (0) +#endif + +/* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |