diff options
Diffstat (limited to 'include/asm-ia64')
36 files changed, 311 insertions, 315 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index f7a51765430..d734585a23c 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -111,7 +111,11 @@ extern int additional_cpus; #ifdef CONFIG_ACPI_NUMA /* Proximity bitmap length; _PXM is at most 255 (8 bit)*/ +#ifdef CONFIG_IA64_NR_NODES +#define MAX_PXM_DOMAINS CONFIG_IA64_NR_NODES +#else #define MAX_PXM_DOMAINS (256) +#endif extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h index 77af457f4ad..edf2cebb296 100644 --- a/include/asm-ia64/asmmacro.h +++ b/include/asm-ia64/asmmacro.h @@ -38,6 +38,10 @@ name: /* * Helper macros for accessing user memory. + * + * When adding any new .section/.previous entries here, make sure to + * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or + * unpleasant things will happen. */ .section "__ex_table", "a" // declare section & section attributes @@ -51,6 +55,17 @@ name: [99:] x /* + * Tag MCA recoverable instruction ranges. + */ + + .section "__mca_table", "a" // declare section & section attributes + .previous + +# define MCA_RECOVER_RANGE(y) \ + .xdata4 "__mca_table", y-., 99f-.; \ + [99:] + +/* * Mark instructions that need a load of a virtual address patched to be * a load of a physical address. We use this either in critical performance * path (ivt.S - TLB miss processing) or in places where it might not be diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index d3e0dfa99e1..569ec7574ba 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 36d0fb95ea8..90921e16279 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -5,8 +5,8 @@ * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * - * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) - * scheduler patch + * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 + * O(1) scheduler patch */ #include <linux/compiler.h> @@ -25,9 +25,9 @@ * restricted to acting on a single-word quantity. * * The address must be (at least) "long" aligned. - * Note that there are driver (e.g., eepro100) which use these operations to operate on - * hw-defined data-structures, so we can't easily change these operations to force a - * bigger alignment. + * Note that there are driver (e.g., eepro100) which use these operations to + * operate on hw-defined data-structures, so we can't easily change these + * operations to force a bigger alignment. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ @@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr) * ffz - find the first zero bit in a long word * @x: The long word to find the bit in * - * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if - * no zero exists, so code should check against ~0UL first... + * Returns the bit-number (0..63) of the first (least significant) zero bit. + * Undefined if no zero exists, so code should check against ~0UL first... */ static inline unsigned long ffz (unsigned long x) @@ -345,13 +345,14 @@ fls (int t) x |= x >> 16; return ia64_popcnt(x); } -#define fls64(x) generic_fls64(x) + +#include <asm-generic/bitops/fls64.h> /* - * ffs: find first bit set. This is defined the same way as the libc and compiler builtin - * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on - * "int" values only and the result value is the bit number + 1. ffs(0) is defined to - * return zero. + * ffs: find first bit set. This is defined the same way as the libc and + * compiler builtin ffs routines, therefore differs in spirit from the above + * ffz (man ffs): it operates on "int" values only and the result value is the + * bit number + 1. ffs(0) is defined to return zero. */ #define ffs(x) __builtin_ffs(x) @@ -373,51 +374,17 @@ hweight64 (unsigned long x) #endif /* __KERNEL__ */ -extern int __find_next_zero_bit (const void *addr, unsigned long size, - unsigned long offset); -extern int __find_next_bit(const void *addr, unsigned long size, - unsigned long offset); - -#define find_next_zero_bit(addr, size, offset) \ - __find_next_zero_bit((addr), (size), (offset)) -#define find_next_bit(addr, size, offset) \ - __find_next_bit((addr), (size), (offset)) - -/* - * The optimizer actually does good code for this case.. - */ -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) - -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ -#define __clear_bit(nr, addr) clear_bit(nr, addr) +#include <asm-generic/bitops/ext2-non-atomic.h> -#define ext2_set_bit test_and_set_bit #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit test_and_clear_bit #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) -static inline int -sched_find_first_bit (unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return 64 + __ffs(b[1]); - return __ffs(b[2]) + 128; -} +#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/sched.h> #endif /* __KERNEL__ */ diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h index 40dd25195d6..f0a104db8f2 100644 --- a/include/asm-ia64/cache.h +++ b/include/asm-ia64/cache.h @@ -25,4 +25,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif /* _ASM_IA64_CACHE_H */ diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index c0b19106665..40d01d80610 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h @@ -189,6 +189,12 @@ compat_ptr (compat_uptr_t uptr) return (void __user *) (unsigned long) uptr; } +static inline compat_uptr_t +ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + static __inline__ void __user * compat_alloc_user_space (long len) { diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h new file mode 100644 index 00000000000..f3efaa22952 --- /dev/null +++ b/include/asm-ia64/dmi.h @@ -0,0 +1,6 @@ +#ifndef _ASM_DMI_H +#define _ASM_DMI_H 1 + +#include <asm/io.h> + +#endif diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index a7122d85017..d069b6acddc 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h @@ -5,113 +5,10 @@ * * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> + * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com> * */ -#include <asm/types.h> - -void __lfetch(int lfhint, void *y); -void __lfetch_excl(int lfhint, void *y); -void __lfetch_fault(int lfhint, void *y); -void __lfetch_fault_excl(int lfhint, void *y); - -/* In the following, whichFloatReg should be an integer from 0-127 */ -void __ldfs(const int whichFloatReg, void *src); -void __ldfd(const int whichFloatReg, void *src); -void __ldfe(const int whichFloatReg, void *src); -void __ldf8(const int whichFloatReg, void *src); -void __ldf_fill(const int whichFloatReg, void *src); -void __stfs(void *dst, const int whichFloatReg); -void __stfd(void *dst, const int whichFloatReg); -void __stfe(void *dst, const int whichFloatReg); -void __stf8(void *dst, const int whichFloatReg); -void __stf_spill(void *dst, const int whichFloatReg); - -void __st1_rel(void *dst, const __s8 value); -void __st2_rel(void *dst, const __s16 value); -void __st4_rel(void *dst, const __s32 value); -void __st8_rel(void *dst, const __s64 value); -__u8 __ld1_acq(void *src); -__u16 __ld2_acq(void *src); -__u32 __ld4_acq(void *src); -__u64 __ld8_acq(void *src); - -__u64 __fetchadd4_acq(__u32 *addend, const int increment); -__u64 __fetchadd4_rel(__u32 *addend, const int increment); -__u64 __fetchadd8_acq(__u64 *addend, const int increment); -__u64 __fetchadd8_rel(__u64 *addend, const int increment); - -__u64 __getf_exp(double d); - -/* OS Related Itanium(R) Intrinsics */ - -/* The names to use for whichReg and whichIndReg below come from - the include file asm/ia64regs.h */ - -__u64 __getIndReg(const int whichIndReg, __s64 index); -__u64 __getReg(const int whichReg); - -void __setIndReg(const int whichIndReg, __s64 index, __u64 value); -void __setReg(const int whichReg, __u64 value); - -void __mf(void); -void __mfa(void); -void __synci(void); -void __itcd(__s64 pa); -void __itci(__s64 pa); -void __itrd(__s64 whichTransReg, __s64 pa); -void __itri(__s64 whichTransReg, __s64 pa); -void __ptce(__s64 va); -void __ptcl(__s64 va, __s64 pagesz); -void __ptcg(__s64 va, __s64 pagesz); -void __ptcga(__s64 va, __s64 pagesz); -void __ptri(__s64 va, __s64 pagesz); -void __ptrd(__s64 va, __s64 pagesz); -void __invala (void); -void __invala_gr(const int whichGeneralReg /* 0-127 */ ); -void __invala_fr(const int whichFloatReg /* 0-127 */ ); -void __nop(const int); -void __fc(__u64 *addr); -void __sum(int mask); -void __rum(int mask); -void __ssm(int mask); -void __rsm(int mask); -__u64 __thash(__s64); -__u64 __ttag(__s64); -__s64 __tpa(__s64); - -/* Intrinsics for implementing get/put_user macros */ -void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val); -void __ld_user(const char *tableName, __u64 addr, char size, char relocType); - -/* This intrinsic does not generate code, it creates a barrier across which - * the compiler will not schedule data access instructions. - */ -void __memory_barrier(void); - -void __isrlz(void); -void __dsrlz(void); - -__u64 _m64_mux1(__u64 a, const int n); -__u64 __thash(__u64); - -/* Lock and Atomic Operation Related Intrinsics */ -__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value); -__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value); -__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value); -__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value); - -__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp); -__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp); - -__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len); -__s64 _m64_shrp(__s64 a, __s64 b, const int count); -__s64 _m64_popcnt(__s64 a); +#include <ia64intrin.h> #define ia64_barrier() __memory_barrier() @@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a); #define ia64_getreg __getReg #define ia64_setreg __setReg -#define ia64_hint(x) +#define ia64_hint __hint +#define ia64_hint_pause __hint_pause -#define ia64_mux1_brcst 0 -#define ia64_mux1_mix 8 -#define ia64_mux1_shuf 9 -#define ia64_mux1_alt 10 -#define ia64_mux1_rev 11 +#define ia64_mux1_brcst _m64_mux1_brcst +#define ia64_mux1_mix _m64_mux1_mix +#define ia64_mux1_shuf _m64_mux1_shuf +#define ia64_mux1_alt _m64_mux1_alt +#define ia64_mux1_rev _m64_mux1_rev -#define ia64_mux1 _m64_mux1 +#define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v))) #define ia64_popcnt _m64_popcnt #define ia64_getf_exp __getf_exp #define ia64_shrp _m64_shrp @@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a); #define ia64_stf8 __stf8 #define ia64_stf_spill __stf_spill -#define ia64_mf __mf +#define ia64_mf __mf #define ia64_mfa __mfa #define ia64_fetchadd4_acq __fetchadd4_acq @@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a); /* Values for lfhint in __lfetch and __lfetch_fault */ -#define ia64_lfhint_none 0 -#define ia64_lfhint_nt1 1 -#define ia64_lfhint_nt2 2 -#define ia64_lfhint_nta 3 +#define ia64_lfhint_none __lfhint_none +#define ia64_lfhint_nt1 __lfhint_nt1 +#define ia64_lfhint_nt2 __lfhint_nt2 +#define ia64_lfhint_nta __lfhint_nta #define ia64_lfetch __lfetch #define ia64_lfetch_excl __lfetch_excl @@ -254,4 +152,6 @@ do { \ } \ } while (0) +#define __builtin_trap() __break(0); + #endif /* _ASM_IA64_INTEL_INTRIN_H */ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index b64fdb98549..c2e3742108b 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -88,8 +88,8 @@ phys_to_virt (unsigned long address) } #define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ -extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); +extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ +extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); /* * The following two macros are deprecated and scheduled for removal. @@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr) # define outl_p outl #endif -/* - * An "address" in IO memory space is not clearly either an integer or a pointer. We will - * accept both, thus the casts. - * - * On ia-64, we access the physical I/O memory space through the uncached kernel region. - */ -static inline void __iomem * -ioremap (unsigned long offset, unsigned long size) -{ - return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); -} +extern void __iomem * ioremap(unsigned long offset, unsigned long size); +extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); static inline void iounmap (volatile void __iomem *addr) { } -#define ioremap_nocache(o,s) ioremap(o,s) +/* Use normal IO mappings for DMI */ +#define dmi_ioremap ioremap +#define dmi_iounmap(x,l) iounmap(x) +#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) # ifdef __KERNEL__ diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 8b01a083dde..218c458ab60 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h @@ -40,7 +40,7 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); -extern struct notifier_block *ia64die_chain; +extern struct atomic_notifier_head ia64die_chain; enum die_val { DIE_BREAK = 1, @@ -81,7 +81,7 @@ static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, .signr = sig }; - return notifier_call_chain(&ia64die_chain, val, &args); + return atomic_notifier_call_chain(&ia64die_chain, val, &args); } #endif diff --git a/include/asm-ia64/linkage.h b/include/asm-ia64/linkage.h index 14cd72cd800..ef22a45c189 100644 --- a/include/asm-ia64/linkage.h +++ b/include/asm-ia64/linkage.h @@ -1,6 +1,14 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H +#ifndef __ASSEMBLY__ + #define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage)) +#else + +#include <asm/asmmacro.h> + +#endif + #endif diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index ca5ea994d68..c3e4ed8a3e1 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -20,6 +20,7 @@ struct scatterlist; struct page; struct mm_struct; struct pci_bus; +struct task_struct; typedef void ia64_mv_setup_t (char **); typedef void ia64_mv_cpu_init_t (void); @@ -34,6 +35,7 @@ typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, u8 size); typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, u8 size); +typedef void ia64_mv_migrate_t(struct task_struct * task); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); @@ -85,6 +87,11 @@ machvec_noop_mm (struct mm_struct *mm) { } +static inline void +machvec_noop_task (struct task_struct *task) +{ +} + extern void machvec_setup (char **); extern void machvec_timer_interrupt (int, void *, struct pt_regs *); extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); @@ -146,6 +153,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_readw_relaxed ia64_mv.readw_relaxed # define platform_readl_relaxed ia64_mv.readl_relaxed # define platform_readq_relaxed ia64_mv.readq_relaxed +# define platform_migrate ia64_mv.migrate # endif /* __attribute__((__aligned__(16))) is required to make size of the @@ -194,6 +202,7 @@ struct ia64_machine_vector { ia64_mv_readw_relaxed_t *readw_relaxed; ia64_mv_readl_relaxed_t *readl_relaxed; ia64_mv_readq_relaxed_t *readq_relaxed; + ia64_mv_migrate_t *migrate; } __attribute__((__aligned__(16))); /* align attrib? see above comment */ #define MACHVEC_INIT(name) \ @@ -238,6 +247,7 @@ struct ia64_machine_vector { platform_readw_relaxed, \ platform_readl_relaxed, \ platform_readq_relaxed, \ + platform_migrate, \ } extern struct ia64_machine_vector ia64_mv; @@ -386,5 +396,8 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; #ifndef platform_readq_relaxed # define platform_readq_relaxed __ia64_readq_relaxed #endif +#ifndef platform_migrate +# define platform_migrate machvec_noop_task +#endif #endif /* _ASM_IA64_MACHVEC_H */ diff --git a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h index 4dc8522c974..8a0752f4098 100644 --- a/include/asm-ia64/machvec_dig.h +++ b/include/asm-ia64/machvec_dig.h @@ -2,7 +2,6 @@ #define _ASM_IA64_MACHVEC_DIG_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_irq_init_t dig_irq_init; /* * This stuff has dual use! @@ -13,6 +12,5 @@ extern ia64_mv_irq_init_t dig_irq_init; */ #define platform_name "dig" #define platform_setup dig_setup -#define platform_irq_init dig_irq_init #endif /* _ASM_IA64_MACHVEC_DIG_h */ diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index 03d00faf03b..da1d43755af 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License @@ -66,6 +66,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; extern ia64_mv_dma_mapping_error sn_dma_mapping_error; extern ia64_mv_dma_supported sn_dma_supported; +extern ia64_mv_migrate_t sn_migrate; /* * This stuff has dual use! @@ -115,6 +116,7 @@ extern ia64_mv_dma_supported sn_dma_supported; #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device #define platform_dma_mapping_error sn_dma_mapping_error #define platform_dma_supported sn_dma_supported +#define platform_migrate sn_migrate #include <asm/sn/io.h> diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index c7d9c9ed38b..bfbbb8da79c 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h @@ -131,6 +131,8 @@ struct ia64_mca_cpu { /* Array of physical addresses of each CPU's MCA area. */ extern unsigned long __per_cpu_mca[NR_CPUS]; +extern int cpe_vector; +extern int ia64_cpe_irq; extern void ia64_mca_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h index 458c1f7fbc1..5a3224f6af3 100644 --- a/include/asm-ia64/mutex.h +++ b/include/asm-ia64/mutex.h @@ -1,9 +1,92 @@ /* - * Pull in the generic implementation for the mutex fastpath. + * ia64 implementation of the mutex fastpath. * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) + * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com> + * + */ + +#ifndef _ASM_MUTEX_H +#define _ASM_MUTEX_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, + * or anything the slow path function returns. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) + return fail_fn(count); + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + * + * If the implementation sets it to a value of lower than 1, then the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int ret = ia64_fetchadd4_rel(count, 1); + if (unlikely(ret < 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * <fail_fn> spinlock-based trylock variant unconditionally. */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(cmpxchg_acq(count, 1, 0)) == 1) + return 1; + return 0; +} -#include <asm-generic/mutex-dec.h> +#endif diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 3ae128fe082..dae6aeb7b11 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h @@ -23,7 +23,7 @@ #include <asm/mmzone.h> -extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; +extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; /* Stuff below this line could be architecture independent */ diff --git a/include/asm-ia64/numnodes.h b/include/asm-ia64/numnodes.h index 21cff4da548..e9d356f549d 100644 --- a/include/asm-ia64/numnodes.h +++ b/include/asm-ia64/numnodes.h @@ -3,13 +3,18 @@ #ifdef CONFIG_IA64_DIG /* Max 8 Nodes */ -#define NODES_SHIFT 3 +# define NODES_SHIFT 3 #elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) /* Max 32 Nodes */ -#define NODES_SHIFT 5 +# define NODES_SHIFT 5 #elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) -/* Max 256 Nodes */ -#define NODES_SHIFT 8 +# if CONFIG_IA64_NR_NODES == 256 +# define NODES_SHIFT 8 +# elif CONFIG_IA64_NR_NODES <= 512 +# define NODES_SHIFT 9 +# elif CONFIG_IA64_NR_NODES <= 1024 +# define NODES_SHIFT 10 +# endif #endif #endif /* _ASM_MAX_NUMNODES_H */ diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 5e6362a786b..2087825eefa 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -57,6 +57,8 @@ # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA # define ARCH_HAS_HUGEPAGE_ONLY_RANGE +# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE +# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE #endif /* CONFIG_HUGETLB_PAGE */ #ifdef __ASSEMBLY__ @@ -104,17 +106,25 @@ extern int ia64_pfn_valid (unsigned long pfn); # define ia64_pfn_valid(pfn) 1 #endif +#ifdef CONFIG_VIRTUAL_MEM_MAP +extern struct page *vmem_map; +#ifdef CONFIG_DISCONTIGMEM +# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) +# define pfn_to_page(pfn) (vmem_map + (pfn)) +#endif +#endif + +#if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM) +/* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */ +#include <asm-generic/memory_model.h> +#endif + #ifdef CONFIG_FLATMEM # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) -# define page_to_pfn(page) ((unsigned long) (page - mem_map)) -# define pfn_to_page(pfn) (mem_map + (pfn)) #elif defined(CONFIG_DISCONTIGMEM) -extern struct page *vmem_map; extern unsigned long min_low_pfn; extern unsigned long max_low_pfn; # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) -# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) -# define pfn_to_page(pfn) (vmem_map + (pfn)) #endif #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) @@ -147,7 +157,7 @@ typedef union ia64_va { | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) # define is_hugepage_only_range(mm, addr, len) \ - (REGION_NUMBER(addr) == RGN_HPAGE && \ + (REGION_NUMBER(addr) == RGN_HPAGE || \ REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE) extern unsigned int hpage_shift; #endif diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 7708ec669a3..4e7e6f23b08 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -1640,8 +1640,7 @@ ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping) if (iprv.status == PAL_STATUS_SUCCESS) { - if (proc_number == 0) - mapping->overview.overview_data = iprv.v0; + mapping->overview.overview_data = iprv.v0; mapping->ppli1.ppli1_data = iprv.v1; mapping->ppli2.ppli2_data = iprv.v2; } diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e2560c58384..c0f8144f234 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) -#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) +#define pte_mkhuge(pte) (__pte(pte_val(pte))) /* * Macro to a page protection value as "uncacheable". Note that "protection" is really a @@ -505,9 +505,6 @@ extern struct page *zero_page_memmap_ptr; #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) -struct mmu_gather; -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, - unsigned long end, unsigned long floor, unsigned long ceiling); #endif /* diff --git a/include/asm-ia64/poll.h b/include/asm-ia64/poll.h index 160258a0528..bcaf9f14024 100644 --- a/include/asm-ia64/poll.h +++ b/include/asm-ia64/poll.h @@ -21,6 +21,7 @@ #define POLLWRBAND 0x0200 #define POLLMSG 0x0400 #define POLLREMOVE 0x1000 +#define POLLRDHUP 0x2000 struct pollfd { int fd; diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 23c8e1be191..b3bd58e8069 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -50,7 +50,8 @@ #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ - /* bit 5 is currently unused */ +#define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration + sync at ctx sw */ #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ @@ -180,7 +181,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) -extern void identify_cpu (struct cpuinfo_ia64 *); extern void print_cpu_info (struct cpuinfo_ia64 *); typedef struct { diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 608168d713d..5e328ed5d01 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -158,8 +158,6 @@ struct k_sigaction { #define ptrace_signal_deliver(regs, cookie) do { } while (0) -void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr); - #endif /* __KERNEL__ */ # endif /* !__ASSEMBLY__ */ diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h index 2c32e4b77b5..1d9efe54166 100644 --- a/include/asm-ia64/sn/addrs.h +++ b/include/asm-ia64/sn/addrs.h @@ -283,5 +283,13 @@ #define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) #define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) +/* + * Coretalk address breakdown + */ +#define CTALK_NASID_SHFT 40 +#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT) +#define CTALK_CID_SHFT 38 +#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT) +#define CTALK_NODE_OFFSET 0x3FFFFFFFFF #endif /* _ASM_IA64_SN_ADDRS_H */ diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h index e3b819110d4..344bf44bb35 100644 --- a/include/asm-ia64/sn/l1.h +++ b/include/asm-ia64/sn/l1.h @@ -34,6 +34,8 @@ #define L1_BRICKTYPE_IA 0x6b /* k */ #define L1_BRICKTYPE_ATHENA 0x2b /* + */ #define L1_BRICKTYPE_DAYTONA 0x7a /* z */ +#define L1_BRICKTYPE_1932 0x2c /* . */ +#define L1_BRICKTYPE_191010 0x2e /* , */ /* board type response codes */ #define L1_BOARDTYPE_IP69 0x0100 /* CA */ @@ -46,5 +48,4 @@ #define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */ #define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */ - #endif /* _ASM_IA64_SN_L1_H */ diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h index a601d3af39b..51260ab70d9 100644 --- a/include/asm-ia64/sn/pcibr_provider.h +++ b/include/asm-ia64/sn/pcibr_provider.h @@ -144,4 +144,5 @@ extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp); extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, void *resp); +extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus); #endif diff --git a/include/asm-ia64/sn/pcidev.h b/include/asm-ia64/sn/pcidev.h index 38cdffbc4c7..eac3561574b 100644 --- a/include/asm-ia64/sn/pcidev.h +++ b/include/asm-ia64/sn/pcidev.h @@ -76,6 +76,7 @@ extern void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus); extern void sn_bus_store_sysdata(struct pci_dev *dev); extern void sn_bus_free_sysdata(void); +extern void sn_generate_path(struct pci_bus *pci_bus, char *address); extern void sn_pci_fixup_slot(struct pci_dev *dev); extern void sn_pci_unfixup_slot(struct pci_dev *dev); extern void sn_irq_lh_init(void); diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h index f40fd1a5510..2d78f4c5a45 100644 --- a/include/asm-ia64/sn/rw_mmr.h +++ b/include/asm-ia64/sn/rw_mmr.h @@ -3,15 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved. */ #ifndef _ASM_IA64_SN_RW_MMR_H #define _ASM_IA64_SN_RW_MMR_H /* - * This file contains macros used to access MMR registers via - * uncached physical addresses. + * This file that access MMRs via uncached physical addresses. * pio_phys_read_mmr - read an MMR * pio_phys_write_mmr - write an MMR * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 @@ -22,53 +21,8 @@ */ -extern inline long -pio_phys_read_mmr(volatile long *mmr) -{ - long val; - asm volatile - ("mov r2=psr;;" - "rsm psr.i | psr.dt;;" - "srlz.i;;" - "ld8.acq %0=[%1];;" - "mov psr.l=r2;;" - "srlz.i;;" - : "=r"(val) - : "r"(mmr) - : "r2"); - return val; -} - - - -extern inline void -pio_phys_write_mmr(volatile long *mmr, long val) -{ - asm volatile - ("mov r2=psr;;" - "rsm psr.i | psr.dt;;" - "srlz.i;;" - "st8.rel [%0]=%1;;" - "mov psr.l=r2;;" - "srlz.i;;" - :: "r"(mmr), "r"(val) - : "r2", "memory"); -} - -extern inline void -pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2) -{ - asm volatile - ("mov r2=psr;;" - "rsm psr.i | psr.dt | psr.ic;;" - "cmp.ne p9,p0=%2,r0;" - "srlz.i;;" - "st8.rel [%0]=%1;" - "(p9) st8.rel [%2]=%3;;" - "mov psr.l=r2;;" - "srlz.i;;" - :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2) - : "p9", "r2", "memory"); -} +extern long pio_phys_read_mmr(volatile long *mmr); +extern void pio_phys_write_mmr(volatile long *mmr, long val); +extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); #endif /* _ASM_IA64_SN_RW_MMR_H */ diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h index ff33e3bd3f8..30dcfa442e5 100644 --- a/include/asm-ia64/sn/sn_feature_sets.h +++ b/include/asm-ia64/sn/sn_feature_sets.h @@ -30,8 +30,7 @@ extern int sn_prom_feature_available(int id); #define PRF_PAL_CACHE_FLUSH_SAFE 0 #define PRF_DEVICE_FLUSH_LIST 1 - - +#define PRF_HOTPLUG_SUPPORT 2 /* --------------------- OS Features -------------------------------*/ diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index e77f0c9b7d3..bf4cc867a69 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -159,7 +159,7 @@ static inline u32 sn_sal_rev(void) { - struct ia64_sal_systab *systab = efi.sal_systab; + struct ia64_sal_systab *systab = __va(efi.sal_systab); return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); } @@ -907,18 +907,22 @@ ia64_sn_sysctl_tio_clock_reset(nasid_t nasid) /* * Get the associated ioboard type for a given nasid. */ -static inline int -ia64_sn_sysctl_ioboard_get(nasid_t nasid) +static inline s64 +ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard) { - struct ia64_sal_retval rv; - SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD, - nasid, 0, 0, 0, 0, 0); - if (rv.v0 != 0) - return (int)rv.v0; - if (rv.v1 != 0) - return (int)rv.v1; - - return 0; + struct ia64_sal_retval isrv; + SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD, + nasid, 0, 0, 0, 0, 0); + if (isrv.v0 != 0) { + *ioboard = isrv.v0; + return isrv.status; + } + if (isrv.v1 != 0) { + *ioboard = isrv.v1; + return isrv.status; + } + + return isrv.status; } /** @@ -1037,7 +1041,7 @@ ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, /***** BEGIN HACK - temp til old proms no longer supported ********/ if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { - int nasid = get_sapicid() & 0xfff;; + int nasid = get_sapicid() & 0xfff; #define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL #define SH_SHUB_ID_NODES_PER_BIT_SHFT 48 if (shubtype) *shubtype = 0; diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h index d4c990712ea..893468e1b41 100644 --- a/include/asm-ia64/sn/tioce.h +++ b/include/asm-ia64/sn/tioce.h @@ -11,7 +11,7 @@ /* CE ASIC part & mfgr information */ #define TIOCE_PART_NUM 0xCE00 -#define TIOCE_MFGR_NUM 0x36 +#define TIOCE_SRC_ID 0x01 #define TIOCE_REV_A 0x1 /* CE Virtual PPB Vendor/Device IDs */ @@ -20,7 +20,7 @@ /* CE Host Bridge Vendor/Device IDs */ #define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 -#define CE_HOST_BRIDGE_DEVICE_ID 0x4003 +#define CE_HOST_BRIDGE_DEVICE_ID 0x4001 #define TIOCE_NUM_M40_ATES 4096 @@ -463,6 +463,25 @@ typedef volatile struct tioce { u64 ce_end_of_struct; /* 0x044400 */ } tioce_t; +/* ce_lsiX_gb_cfg1 register bit masks & shifts */ +#define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0 +#define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0) +#define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8 +#define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8); +#define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12 +#define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12) +#define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15 +#define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15) +#define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16 +#define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16) +#define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18 +#define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18) +#define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19 +#define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19) +#define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20 +#define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20) +#define CE_LSI_GB_CFG1_SLF_TS_SHFT 24 +#define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24) /* ce_adm_int_mask/ce_adm_int_status register bit defines */ #define CE_ADM_INT_CE_ERROR_SHFT 0 @@ -592,6 +611,11 @@ typedef volatile struct tioce { #define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) #define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) #define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) +#define CE_URE_WRT_MRG_TIMER_SHFT 12 +#define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT) +#define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \ + CE_URE_WRT_MRG_TIMER_SHFT) & \ + CE_URE_WRT_MRG_TIMER_MASK) #define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) #define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) #define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) @@ -653,8 +677,12 @@ typedef volatile struct tioce { #define CE_URE_SI (0x1ULL << 0) #define CE_URE_ELAL_SHFT 4 #define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) +#define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \ + CE_URE_ELAL_MASK) #define CE_URE_ELAL1_SHFT 8 #define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) +#define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \ + CE_URE_ELAL1_MASK) #define CE_URE_SCC (0x1ULL << 12) #define CE_URE_PN1_SHFT 16 #define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) @@ -675,8 +703,12 @@ typedef volatile struct tioce { #define CE_URE_HPC (0x1ULL << 6) #define CE_URE_SPLV_SHFT 7 #define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) +#define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \ + CE_URE_SPLV_MASK) #define CE_URE_SPLS_SHFT 15 #define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) +#define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \ + CE_URE_SPLS_MASK) #define CE_URE_PSN1_SHFT 19 #define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) #define CE_URE_PSN2_SHFT 32 diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index df7f5f4f3cd..aa3b8ace903 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error) -static inline void * -xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) -{ - /* see if kmalloc will give us cachline aligned memory by default */ - *base = kmalloc(size, flags); - if (*base == NULL) { - return NULL; - } - if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { - return *base; - } - kfree(*base); - - /* nope, we'll have to do it ourselves */ - *base = kmalloc(size + L1_CACHE_BYTES, flags); - if (*base == NULL) { - return NULL; - } - return (void *) L1_CACHE_ALIGN((u64) *base); -} - - /* * Check to see if there is any channel activity to/from the specified * partition. diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 06253871562..2f362059368 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -244,6 +244,13 @@ extern void ia64_load_extra (struct task_struct *task); __ia64_save_fpu((prev)->thread.fph); \ } \ __switch_to(prev, next, last); \ + /* "next" in old context is "current" in new context */ \ + if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ + (task_cpu(current) != \ + task_thread_info(current)->last_cpu))) { \ + platform_migrate(current); \ + task_thread_info(current)->last_cpu = task_cpu(current); \ + } \ } while (0) #else # define switch_to(prev,next,last) __switch_to(prev, next, last) @@ -258,6 +265,8 @@ void sched_cacheflush(void); #define arch_align_stack(x) (x) +void default_idle(void); + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 1d6518fe1f0..56394a2c705 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -26,16 +26,10 @@ struct thread_info { struct exec_domain *exec_domain;/* execution domain */ __u32 flags; /* thread_info flags (see TIF_*) */ __u32 cpu; /* current CPU */ + __u32 last_cpu; /* Last CPU thread ran on */ mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; - struct { - int signo; - int code; - void __user *addr; - unsigned long start_time; - pid_t pid; - } sigdelayed; /* Saved information for TIF_SIGDELAYED */ }; #define THREAD_SIZE KERNEL_STACK_SIZE @@ -89,7 +83,6 @@ struct thread_info { #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ -#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ @@ -101,13 +94,12 @@ struct thread_info { #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) /* "work to do on user-return" bits */ -#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) +#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 019956c613e..36070c1014d 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -285,12 +285,13 @@ #define __NR_faccessat 1293 /* 1294, 1295 reserved for pselect/ppoll */ #define __NR_unshare 1296 +#define __NR_splice 1297 #ifdef __KERNEL__ #include <linux/config.h> -#define NR_syscalls 273 /* length of syscall table */ +#define NR_syscalls 274 /* length of syscall table */ #define __ARCH_WANT_SYS_RT_SIGACTION |