diff options
author | Anton Arapov <anton@redhat.com> | 2012-06-08 12:58:00 +0200 |
---|---|---|
committer | Anton Arapov <anton@redhat.com> | 2012-06-08 12:58:00 +0200 |
commit | 6792a3f47a2e42d7164292bf7f1a55cfc4c91652 (patch) | |
tree | b90c002bfbbeaec92f5d8a2383dcabf6524016f7 /arch/arm/mm | |
parent | fe2895d3d55146cac65b273c0f83e2c7e543cd0e (diff) | |
download | kernel-uprobes-6792a3f47a2e42d7164292bf7f1a55cfc4c91652.tar.gz kernel-uprobes-6792a3f47a2e42d7164292bf7f1a55cfc4c91652.tar.xz kernel-uprobes-6792a3f47a2e42d7164292bf7f1a55cfc4c91652.zip |
fedora kernel: b920e9b748c595f970bf80ede7832d39f8d567dav3.4.1-2
Signed-off-by: Anton Arapov <anton@redhat.com>
Diffstat (limited to 'arch/arm/mm')
30 files changed, 259 insertions, 149 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 7edef912163..7c8a7d8467b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -723,7 +723,7 @@ config CPU_HIGH_VECTOR bool "Select the High exception vector" help Say Y here to select high exception vector(0xFFFF0000~). - The exception vector can be vary depending on the platform + The exception vector can vary depending on the platform design in nommu mode. If your platform needs to select high exception vector, say Y. Otherwise or if you are unsure, say N, and the low exception diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index caf14dc059e..9107231aacc 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,7 +22,8 @@ #include <linux/sched.h> #include <linux/uaccess.h> -#include <asm/system.h> +#include <asm/cp15.h> +#include <asm/system_info.h> #include <asm/unaligned.h> #include "fault.h" diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e0b0e7a4ec6..dd3d59122cc 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/highmem.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <plat/cache-feroceon-l2.h> /* diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index db7bcc07800..2a8e380501e 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -30,14 +30,14 @@ static void __iomem *l2x0_base; static DEFINE_RAW_SPINLOCK(l2x0_lock); -static uint32_t l2x0_way_mask; /* Bitmask of active ways */ -static uint32_t l2x0_size; +static u32 l2x0_way_mask; /* Bitmask of active ways */ +static u32 l2x0_size; static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; struct l2x0_regs l2x0_saved_regs; struct l2x0_of_data { - void (*setup)(const struct device_node *, __u32 *, __u32 *); + void (*setup)(const struct device_node *, u32 *, u32 *); void (*save)(void); void (*resume)(void); }; @@ -287,7 +287,7 @@ static void l2x0_disable(void) raw_spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_unlock(__u32 cache_id) +static void l2x0_unlock(u32 cache_id) { int lockregs; int i; @@ -306,11 +306,11 @@ static void l2x0_unlock(__u32 cache_id) } } -void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) +void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) { - __u32 aux; - __u32 cache_id; - __u32 way_size = 0; + u32 aux; + u32 cache_id; + u32 way_size = 0; int ways; const char *type; @@ -391,7 +391,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) #ifdef CONFIG_OF static void __init l2x0_of_setup(const struct device_node *np, - __u32 *aux_val, __u32 *aux_mask) + u32 *aux_val, u32 *aux_mask) { u32 data[2] = { 0, 0 }; u32 tag = 0; @@ -425,7 +425,7 @@ static void __init l2x0_of_setup(const struct device_node *np, } static void __init pl310_of_setup(const struct device_node *np, - __u32 *aux_val, __u32 *aux_mask) + u32 *aux_val, u32 *aux_mask) { u32 data[3] = { 0, 0, 0 }; u32 tag[3] = { 0, 0, 0 }; @@ -551,7 +551,7 @@ static const struct of_device_id l2x0_ids[] __initconst = { {} }; -int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) +int __init l2x0_of_init(u32 aux_val, u32 aux_mask) { struct device_node *np; struct l2x0_of_data *data; diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 50868651890..1fbca05fe90 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <asm/hardware/cache-tauros2.h> diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5a32020471e..6c3edeb66e7 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -18,7 +18,7 @@ */ #include <linux/init.h> #include <linux/highmem.h> -#include <asm/system.h> +#include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cacheflush.h> diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d2852e1635b..d130a5ece5d 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); fa_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from, */ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns fa_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index ac163de7dc0..49ee0c1a720 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); feroceon_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile ("\ mov r1, %2 \n\ mov r2, #0 \n\ @@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns feroceon_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index f72303e1d80..3935bddd476 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); v3_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from, */ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\n\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v3_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7d0a8c23034..1267e64133b 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -23,10 +23,6 @@ #include "mm.h" -/* - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture - * specific hacks for copying pages efficiently. - */ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) @@ -71,21 +67,20 @@ mc_copy_user_page(void *from, void *to) void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - void *kto = kmap_atomic(to, KM_USER1); + void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); - flush_tlb_kernel_page(0xffff8000); + set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); - mc_copy_user_page((void *)0xffff8000, kto); + mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); - kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kto); } /* @@ -93,7 +88,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, */ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -111,7 +106,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v4_mc_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index cb589cbb2b6..067d0fdd630 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); v4wb_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, */ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v4wb_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 30c7d048a32..b85c5da2e51 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); v4wt_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, */ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ @@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns v4wt_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 3d9a1552cef..b9bcc9d7917 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -24,9 +24,6 @@ #error FIX ME #endif -#define from_address (0xffff8000) -#define to_address (0xffffc000) - static DEFINE_RAW_SPINLOCK(v6_lock); /* @@ -38,11 +35,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, { void *kto, *kfrom; - kfrom = kmap_atomic(from, KM_USER0); - kto = kmap_atomic(to, KM_USER1); + kfrom = kmap_atomic(from); + kto = kmap_atomic(to); copy_page(kto, kfrom); - kunmap_atomic(kto, KM_USER1); - kunmap_atomic(kfrom, KM_USER0); + kunmap_atomic(kto); + kunmap_atomic(kfrom); } /* @@ -51,9 +48,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, */ static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); + void *kaddr = kmap_atomic(page); clear_page(kaddr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } /* @@ -90,14 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to, */ raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); - - kfrom = from_address + (offset << PAGE_SHIFT); - kto = to_address + (offset << PAGE_SHIFT); + kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); + kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); - flush_tlb_kernel_page(kfrom); - flush_tlb_kernel_page(kto); + set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); + set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); copy_page((void *)kto, (void *)kfrom); @@ -111,8 +105,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, */ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long to = to_address + (offset << PAGE_SHIFT); + unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); /* FIXME: not highmem safe */ discard_old_kernel_data(page_address(page)); @@ -123,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad */ raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); - flush_tlb_kernel_page(to); + set_top_pte(to, mk_pte(page, PAGE_KERNEL)); clear_page((void *)to); raw_spin_unlock(&v6_lock); diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index f9cde0702f1..03a2042aced 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, { void *kto, *kfrom; - kto = kmap_atomic(to, KM_USER0); - kfrom = kmap_atomic(from, KM_USER1); + kto = kmap_atomic(to); + kfrom = kmap_atomic(from); flush_cache_page(vma, vaddr, page_to_pfn(from)); xsc3_mc_copy_user_page(kto, kfrom); - kunmap_atomic(kfrom, KM_USER1); - kunmap_atomic(kto, KM_USER0); + kunmap_atomic(kfrom); + kunmap_atomic(kto); } /* @@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile ("\ mov r1, %2 \n\ mov r2, #0 \n\ @@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns xsc3_mc_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 610c24ced31..0fb85025344 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -23,12 +23,6 @@ #include "mm.h" -/* - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture - * specific hacks for copying pages efficiently. - */ -#define COPYPAGE_MINICACHE 0xffff8000 - #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) @@ -93,21 +87,20 @@ mc_copy_user_page(void *from, void *to) void xscale_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - void *kto = kmap_atomic(to, KM_USER1); + void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); - flush_tlb_kernel_page(COPYPAGE_MINICACHE); + set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); - kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kto); } /* @@ -116,7 +109,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page); asm volatile( "mov r1, %2 \n\ mov r2, #0 \n\ @@ -133,7 +126,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip"); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); } struct cpu_user_fns xscale_mc_user_fns __initdata = { diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1aa664a1999..db23ae4aaaa 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -214,7 +214,8 @@ static int __init consistent_init(void) core_initcall(consistent_init); static void * -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller) { struct arm_vmregion *c; size_t align; @@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) * Allocate a virtual address in the consistent mapping region. */ c = arm_vmregion_alloc(&consistent_head, align, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); + gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); if (c) { pte_t *pte; int idx = CONSISTENT_PTE_INDEX(c->vm_start); @@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size) #else /* !CONFIG_MMU */ -#define __dma_alloc_remap(page, size, gfp, prot) page_address(page) +#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) #define __dma_free_remap(addr, size) do { } while (0) #endif /* CONFIG_MMU */ static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, - pgprot_t prot) + pgprot_t prot, const void *caller) { struct page *page; void *addr; @@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, return NULL; if (!arch_is_coherent()) - addr = __dma_alloc_remap(page, size, gfp, prot); + addr = __dma_alloc_remap(page, size, gfp, prot, caller); else addr = page_address(page); @@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf return memory; return __dma_alloc(dev, size, handle, gfp, - pgprot_dmacoherent(pgprot_kernel)); + pgprot_dmacoherent(pgprot_kernel), + __builtin_return_address(0)); } EXPORT_SYMBOL(dma_alloc_coherent); @@ -386,7 +388,8 @@ void * dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { return __dma_alloc(dev, size, handle, gfp, - pgprot_writecombine(pgprot_kernel)); + pgprot_writecombine(pgprot_kernel), + __builtin_return_address(0)); } EXPORT_SYMBOL(dma_alloc_writecombine); @@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask); static int __init dma_debug_do_init(void) { +#ifdef CONFIG_MMU + arm_vmregion_create_proc("dma-mappings", &consistent_head); +#endif dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bb7eac381a8..5bb48356d21 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -21,8 +21,9 @@ #include <linux/perf_event.h> #include <asm/exception.h> -#include <asm/system.h> #include <asm/pgtable.h> +#include <asm/system_misc.h> +#include <asm/system_info.h> #include <asm/tlbflush.h> #include "fault.h" @@ -164,7 +165,8 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, struct siginfo si; #ifdef CONFIG_DEBUG_USER - if (user_debug & UDBG_SEGV) { + if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || + ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", tsk->comm, sig, addr, fsr); show_pte(tsk->mm, addr); @@ -245,7 +247,9 @@ good_area: return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + /* Don't allow expansion below FIRST_USER_ADDRESS */ + if (vma->vm_flags & VM_GROWSDOWN && + addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) goto good_area; out: return fault; @@ -318,7 +322,7 @@ retry: */ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1a8d4aa821b..77458548e03 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -16,22 +16,18 @@ #include <asm/cachetype.h> #include <asm/highmem.h> #include <asm/smp_plat.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include "mm.h" #ifdef CONFIG_CPU_CACHE_VIPT -#define ALIAS_FLUSH_START 0xffff4000 - static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) { - unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); + unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); const int zero = 0; - set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); - flush_tlb_kernel_page(to); + set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); asm( "mcrr p15, 0, %1, %0, c14\n" " mcr p15, 0, %2, c7, c10, 4" @@ -42,13 +38,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) { - unsigned long colour = CACHE_COLOUR(vaddr); + unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); unsigned long offset = vaddr & (PAGE_SIZE - 1); unsigned long to; - set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); - to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; - flush_tlb_kernel_page(to); + set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); + to = va + offset; flush_icache_range(to, to + len); } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 807c0573abb..21b9e1bf9b7 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -36,7 +36,7 @@ void kunmap(struct page *page) } EXPORT_SYMBOL(kunmap); -void *__kmap_atomic(struct page *page) +void *kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; @@ -69,19 +69,18 @@ void *__kmap_atomic(struct page *page) * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ - BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); + BUG_ON(!pte_none(get_top_pte(vaddr))); #endif - set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping - * in place, so this TLB flush ensures the TLB is updated with the - * new mapping. + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. */ - local_flush_tlb_kernel_page(vaddr); + set_top_pte(vaddr, mk_pte(page, kmap_prot)); return (void *)vaddr; } -EXPORT_SYMBOL(__kmap_atomic); +EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { @@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); - local_flush_tlb_kernel_page(vaddr); + set_top_pte(vaddr, __pte(0)); #else (void) idx; /* to kill a warning */ #endif @@ -121,10 +119,9 @@ void *kmap_atomic_pfn(unsigned long pfn) idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); + BUG_ON(!pte_none(get_top_pte(vaddr))); #endif - set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); - local_flush_tlb_kernel_page(vaddr); + set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); return (void *)vaddr; } @@ -132,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn) struct page *kmap_atomic_to_page(const void *ptr) { unsigned long vaddr = (unsigned long)ptr; - pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); - pte = TOP_PTE(vaddr); - return pte_page(*pte); + return pte_page(get_top_pte(vaddr)); } diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index feacf4c7671..ab88ed4f8e0 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -5,6 +5,7 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sections.h> +#include <asm/system_info.h> pgd_t *idmap_pgd; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 5dc7d127a40..8f5813bbffb 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -32,7 +32,6 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> -#include <asm/memblock.h> #include "mm.h" @@ -294,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM -static void arm_memory_present(void) +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(void) +static void __init arm_memory_present(void) { struct memblock_region *reg; @@ -659,7 +658,9 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif +#ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif " .text : 0x%p" " - 0x%p" " (%4d kB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" @@ -678,7 +679,9 @@ void __init mem_init(void) MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * (PAGE_SIZE)), #endif +#ifdef CONFIG_MODULES MLM(MODULES_VADDR, MODULES_END), +#endif MLK_ROUNDUP(_text, _etext), MLK_ROUNDUP(__init_begin, __init_end), diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c index e62956e1203..4614208369f 100644 --- a/arch/arm/mm/iomap.c +++ b/arch/arm/mm/iomap.c @@ -32,9 +32,6 @@ EXPORT_SYMBOL(pcibios_min_io); unsigned long pcibios_min_mem = 0x01000000; EXPORT_SYMBOL(pcibios_min_mem); -unsigned int pci_flags = PCI_REASSIGN_ALL_RSRC; -EXPORT_SYMBOL(pci_flags); - void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { if ((unsigned long)addr >= VMALLOC_START && diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80632e8d753..4f55f5062ab 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -26,12 +26,14 @@ #include <linux/vmalloc.h> #include <linux/io.h> +#include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/sizes.h> +#include <asm/system_info.h> #include <asm/mach/map.h> #include "mm.h" @@ -306,11 +308,15 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, } EXPORT_SYMBOL(__arm_ioremap_pfn); +void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, + unsigned int, void *) = + __arm_ioremap_caller; + void __iomem * __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { - return __arm_ioremap_caller(phys_addr, size, mtype, - __builtin_return_address(0)); + return arch_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap); @@ -369,4 +375,11 @@ void __iounmap(volatile void __iomem *io_addr) vunmap(addr); } -EXPORT_SYMBOL(__iounmap); + +void (*arch_iounmap)(volatile void __iomem *) = __iounmap; + +void __arm_iounmap(volatile void __iomem *io_addr) +{ + arch_iounmap(io_addr); +} +EXPORT_SYMBOL(__arm_iounmap); diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 70f6d3ea483..27f4a619b35 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -3,7 +3,31 @@ /* the upper-most page table pointer */ extern pmd_t *top_pmd; -#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) +/* + * 0xffff8000 to 0xffffffff is reserved for any ARM architecture + * specific hacks for copying pages efficiently, while 0xffff4000 + * is reserved for VIPT aliasing flushing by generic code. + * + * Note that we don't allow VIPT aliasing caches with SMP. + */ +#define COPYPAGE_MINICACHE 0xffff8000 +#define COPYPAGE_V6_FROM 0xffff8000 +#define COPYPAGE_V6_TO 0xffffc000 +/* PFN alias flushing, for VIPT caches */ +#define FLUSH_ALIAS_START 0xffff4000 + +static inline void set_top_pte(unsigned long va, pte_t pte) +{ + pte_t *ptep = pte_offset_kernel(top_pmd, va); + set_pte_ext(ptep, pte, 0); + local_flush_tlb_kernel_page(va); +} + +static inline pte_t get_top_pte(unsigned long va) +{ + pte_t *ptep = pte_offset_kernel(top_pmd, va); + return *ptep; +} static inline pmd_t *pmd_off_k(unsigned long virt) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 94c5a0c94f5..aa78de8bfdd 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -17,6 +17,7 @@ #include <linux/fs.h> #include <linux/vmalloc.h> +#include <asm/cp15.h> #include <asm/cputype.h> #include <asm/sections.h> #include <asm/cachetype.h> @@ -25,6 +26,7 @@ #include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> +#include <asm/system_info.h> #include <asm/traps.h> #include <asm/mach/arch.h> @@ -487,7 +489,8 @@ static void __init build_mem_type_table(void) */ for (i = 0; i < ARRAY_SIZE(mem_types); i++) { mem_types[i].prot_pte |= PTE_EXT_AF; - mem_types[i].prot_sect |= PMD_SECT_AF; + if (mem_types[i].prot_sect) + mem_types[i].prot_sect |= PMD_SECT_AF; } kern_pgprot |= PTE_EXT_AF; vecs_pgprot |= PTE_EXT_AF; @@ -616,8 +619,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, } } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, const struct mem_type *type) +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys, const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; @@ -997,11 +1000,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; + void *vectors; /* * Allocate the vector page early. */ - vectors_page = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE); + + early_trap_init(vectors); for (addr = VMALLOC_START; addr; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); @@ -1041,7 +1047,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ - map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); + map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 4fc6794cca4..d51225f90ae 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -13,6 +13,7 @@ #include <asm/sections.h> #include <asm/page.h> #include <asm/setup.h> +#include <asm/traps.h> #include <asm/mach/arch.h> #include "mm.h" @@ -39,6 +40,7 @@ void __init sanity_check_meminfo(void) */ void __init paging_init(struct machine_desc *mdesc) { + early_trap_init((void *)CONFIG_VECTORS_BASE); bootmem_init(); } @@ -86,13 +88,17 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, } EXPORT_SYMBOL(__arm_ioremap); +void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); + void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, unsigned int mtype, void *caller) { return __arm_ioremap(phys_addr, size, mtype); } -void __iounmap(volatile void __iomem *addr) +void (*arch_iounmap)(volatile void __iomem *); + +void __arm_iounmap(volatile void __iomem *addr) { } -EXPORT_SYMBOL(__iounmap); +EXPORT_SYMBOL(__arm_iounmap); diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index a3e78ccabd6..0acb089d0f7 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -12,6 +12,7 @@ #include <linux/highmem.h> #include <linux/slab.h> +#include <asm/cp15.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/tlbflush.h> diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 272558a133a..d217e9795d7 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -22,7 +22,6 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "proc-macros.S" diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 036fdbfdd62..a631016e1f8 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c @@ -1,5 +1,8 @@ +#include <linux/fs.h> #include <linux/spinlock.h> #include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include "vmregion.h" @@ -36,7 +39,7 @@ struct arm_vmregion * arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, - size_t size, gfp_t gfp) + size_t size, gfp_t gfp, const void *caller) { unsigned long start = head->vm_start, addr = head->vm_end; unsigned long flags; @@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, if (!new) goto out; + new->caller = caller; + spin_lock_irqsave(&head->vm_lock, flags); addr = rounddown(addr - size, align); @@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) kfree(c); } + +#ifdef CONFIG_PROC_FS +static int arm_vmregion_show(struct seq_file *m, void *p) +{ + struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); + + seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, + c->vm_end - c->vm_start); + if (c->caller) + seq_printf(m, " %pS", (void *)c->caller); + seq_putc(m, '\n'); + return 0; +} + +static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) +{ + struct arm_vmregion_head *h = m->private; + spin_lock_irq(&h->vm_lock); + return seq_list_start(&h->vm_list, *pos); +} + +static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct arm_vmregion_head *h = m->private; + return seq_list_next(p, &h->vm_list, pos); +} + +static void arm_vmregion_stop(struct seq_file *m, void *p) +{ + struct arm_vmregion_head *h = m->private; + spin_unlock_irq(&h->vm_lock); +} + +static const struct seq_operations arm_vmregion_ops = { + .start = arm_vmregion_start, + .stop = arm_vmregion_stop, + .next = arm_vmregion_next, + .show = arm_vmregion_show, +}; + +static int arm_vmregion_open(struct inode *inode, struct file *file) +{ + struct arm_vmregion_head *h = PDE(inode)->data; + int ret = seq_open(file, &arm_vmregion_ops); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = h; + } + return ret; +} + +static const struct file_operations arm_vmregion_fops = { + .open = arm_vmregion_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) +{ + proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); + return 0; +} +#else +int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) +{ + return 0; +} +#endif diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 15e9f044db9..162be662c08 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h @@ -19,11 +19,14 @@ struct arm_vmregion { unsigned long vm_end; struct page *vm_pages; int vm_active; + const void *caller; }; -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); +struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); +int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); + #endif |