diff options
author | Tony Luck <tony.luck@intel.com> | 2008-07-17 10:53:37 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-07-17 10:53:37 -0700 |
commit | fca515fbfa5ecd9f7b54db311317e2c877d7831a (patch) | |
tree | 66b44028b3ab5be068be78650932812520d78865 /include/asm-ia64 | |
parent | 2b04be7e8ab5756ea36e137dd03c8773d184e67e (diff) | |
parent | 4d58bbcc89e267d52b4df572acbf209a60a8a497 (diff) | |
download | kernel-crypto-fca515fbfa5ecd9f7b54db311317e2c877d7831a.tar.gz kernel-crypto-fca515fbfa5ecd9f7b54db311317e2c877d7831a.tar.xz kernel-crypto-fca515fbfa5ecd9f7b54db311317e2c877d7831a.zip |
Pull pvops into release branch
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/Kbuild | 2 | ||||
-rw-r--r-- | include/asm-ia64/gcc_intrin.h | 24 | ||||
-rw-r--r-- | include/asm-ia64/hw_irq.h | 23 | ||||
-rw-r--r-- | include/asm-ia64/intel_intrin.h | 41 | ||||
-rw-r--r-- | include/asm-ia64/intrinsics.h | 55 | ||||
-rw-r--r-- | include/asm-ia64/iosapic.h | 18 | ||||
-rw-r--r-- | include/asm-ia64/irq.h | 9 | ||||
-rw-r--r-- | include/asm-ia64/mmu_context.h | 6 | ||||
-rw-r--r-- | include/asm-ia64/native/inst.h | 175 | ||||
-rw-r--r-- | include/asm-ia64/native/irq.h | 35 | ||||
-rw-r--r-- | include/asm-ia64/paravirt.h | 255 | ||||
-rw-r--r-- | include/asm-ia64/paravirt_privop.h | 114 | ||||
-rw-r--r-- | include/asm-ia64/smp.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/system.h | 11 |
14 files changed, 716 insertions, 54 deletions
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index eb24a3f47ca..ccbe8ae47a6 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild @@ -5,12 +5,12 @@ header-y += fpu.h header-y += fpswa.h header-y += ia64regs.h header-y += intel_intrin.h -header-y += intrinsics.h header-y += perfmon_default_smpl.h header-y += ptrace_offsets.h header-y += rse.h header-y += ucontext.h unifdef-y += gcc_intrin.h +unifdef-y += intrinsics.h unifdef-y += perfmon.h unifdef-y += ustack.h diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index 2fe292c275f..0f5b5592175 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h @@ -32,7 +32,7 @@ extern void ia64_bad_param_for_getreg (void); register unsigned long ia64_r13 asm ("r13") __used; #endif -#define ia64_setreg(regnum, val) \ +#define ia64_native_setreg(regnum, val) \ ({ \ switch (regnum) { \ case _IA64_REG_PSR_L: \ @@ -61,7 +61,7 @@ register unsigned long ia64_r13 asm ("r13") __used; } \ }) -#define ia64_getreg(regnum) \ +#define ia64_native_getreg(regnum) \ ({ \ __u64 ia64_intri_res; \ \ @@ -385,7 +385,7 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_invala() asm volatile ("invala" ::: "memory") -#define ia64_thash(addr) \ +#define ia64_native_thash(addr) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ @@ -438,10 +438,10 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_set_pmd(index, val) \ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_rr(index, val) \ +#define ia64_native_set_rr(index, val) \ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); -#define ia64_get_cpuid(index) \ +#define ia64_native_get_cpuid(index) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ @@ -477,33 +477,33 @@ register unsigned long ia64_r13 asm ("r13") __used; }) -#define ia64_get_pmd(index) \ +#define ia64_native_get_pmd(index) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) -#define ia64_get_rr(index) \ +#define ia64_native_get_rr(index) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ ia64_intri_res; \ }) -#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") +#define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") -#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") -#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") +#define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") +#define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) -#define ia64_ptcga(addr, size) \ +#define ia64_native_ptcga(addr, size) \ do { \ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ ia64_dv_serialize_data(); \ @@ -608,7 +608,7 @@ do { \ } \ }) -#define ia64_intrin_local_irq_restore(x) \ +#define ia64_native_intrin_local_irq_restore(x) \ do { \ asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ "(p6) ssm psr.i;" \ diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 76366dc9c1a..5c99cbcb8a0 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -15,7 +15,11 @@ #include <asm/ptrace.h> #include <asm/smp.h> +#ifndef CONFIG_PARAVIRT typedef u8 ia64_vector; +#else +typedef u16 ia64_vector; +#endif /* * 0 special @@ -104,13 +108,24 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ +#ifdef CONFIG_PARAVIRT_GUEST +#include <asm/paravirt.h> +#else +#define ia64_register_ipi ia64_native_register_ipi +#define assign_irq_vector ia64_native_assign_irq_vector +#define free_irq_vector ia64_native_free_irq_vector +#define register_percpu_irq ia64_native_register_percpu_irq +#define ia64_resend_irq ia64_native_resend_irq +#endif + +extern void ia64_native_register_ipi(void); extern int bind_irq_vector(int irq, int vector, cpumask_t domain); -extern int assign_irq_vector (int irq); /* allocate a free vector */ -extern void free_irq_vector (int vector); +extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */ +extern void ia64_native_free_irq_vector (int vector); extern int reserve_irq_vector (int vector); extern void __setup_vector_irq(int cpu); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); -extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); +extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action); extern int check_irq_used (int irq); extern void destroy_and_reserve_irq (unsigned int irq); @@ -122,7 +137,7 @@ static inline int irq_prepare_move(int irq, int cpu) { return 0; } static inline void irq_complete_move(unsigned int irq) {} #endif -static inline void ia64_resend_irq(unsigned int vector) +static inline void ia64_native_resend_irq(unsigned int vector) { platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); } diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index a520d103d80..53cec577558 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h @@ -16,8 +16,8 @@ * intrinsic */ -#define ia64_getreg __getReg -#define ia64_setreg __setReg +#define ia64_native_getreg __getReg +#define ia64_native_setreg __setReg #define ia64_hint __hint #define ia64_hint_pause __hint_pause @@ -39,10 +39,10 @@ #define ia64_invala_fr __invala_fr #define ia64_nop __nop #define ia64_sum __sum -#define ia64_ssm __ssm +#define ia64_native_ssm __ssm #define ia64_rum __rum -#define ia64_rsm __rsm -#define ia64_fc __fc +#define ia64_native_rsm __rsm +#define ia64_native_fc __fc #define ia64_ldfs __ldfs #define ia64_ldfd __ldfd @@ -88,16 +88,17 @@ __setIndReg(_IA64_REG_INDR_PMC, index, val) #define ia64_set_pmd(index, val) \ __setIndReg(_IA64_REG_INDR_PMD, index, val) -#define ia64_set_rr(index, val) \ +#define ia64_native_set_rr(index, val) \ __setIndReg(_IA64_REG_INDR_RR, index, val) -#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) -#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) -#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) -#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) -#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) -#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) -#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) +#define ia64_native_get_cpuid(index) \ + __getIndReg(_IA64_REG_INDR_CPUID, index) +#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) +#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) +#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) +#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) +#define ia64_native_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) +#define ia64_native_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) #define ia64_srlz_d __dsrlz #define ia64_srlz_i __isrlz @@ -119,16 +120,16 @@ #define ia64_ld8_acq __ld8_acq #define ia64_sync_i __synci -#define ia64_thash __thash -#define ia64_ttag __ttag +#define ia64_native_thash __thash +#define ia64_native_ttag __ttag #define ia64_itcd __itcd #define ia64_itci __itci #define ia64_itrd __itrd #define ia64_itri __itri #define ia64_ptce __ptce #define ia64_ptcl __ptcl -#define ia64_ptcg __ptcg -#define ia64_ptcga __ptcga +#define ia64_native_ptcg __ptcg +#define ia64_native_ptcga __ptcga #define ia64_ptri __ptri #define ia64_ptrd __ptrd #define ia64_dep_mi _m64_dep_mi @@ -145,13 +146,13 @@ #define ia64_lfetch_fault __lfetch_fault #define ia64_lfetch_fault_excl __lfetch_fault_excl -#define ia64_intrin_local_irq_restore(x) \ +#define ia64_native_intrin_local_irq_restore(x) \ do { \ if ((x) != 0) { \ - ia64_ssm(IA64_PSR_I); \ + ia64_native_ssm(IA64_PSR_I); \ ia64_srlz_d(); \ } else { \ - ia64_rsm(IA64_PSR_I); \ + ia64_native_rsm(IA64_PSR_I); \ } \ } while (0) diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index f1135b5b94c..47d686dba1e 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -18,6 +18,17 @@ # include <asm/gcc_intrin.h> #endif +#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) + +#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ +do { \ + ia64_native_set_rr(0x0000000000000000UL, (val0)); \ + ia64_native_set_rr(0x2000000000000000UL, (val1)); \ + ia64_native_set_rr(0x4000000000000000UL, (val2)); \ + ia64_native_set_rr(0x6000000000000000UL, (val3)); \ + ia64_native_set_rr(0x8000000000000000UL, (val4)); \ +} while (0) + /* * Force an unresolved reference if someone tries to use * ia64_fetch_and_add() with a bad value. @@ -183,4 +194,48 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ #endif + +#ifdef __KERNEL__ +#include <asm/paravirt_privop.h> +#endif + +#ifndef __ASSEMBLY__ +#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) +#define IA64_INTRINSIC_API(name) pv_cpu_ops.name +#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name +#else +#define IA64_INTRINSIC_API(name) ia64_native_ ## name +#define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name +#endif + +/************************************************/ +/* Instructions paravirtualized for correctness */ +/************************************************/ +/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */ +/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" + * is not currently used (though it may be in a long-format VHPT system!) + */ +#define ia64_fc IA64_INTRINSIC_API(fc) +#define ia64_thash IA64_INTRINSIC_API(thash) +#define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid) +#define ia64_get_pmd IA64_INTRINSIC_API(get_pmd) + + +/************************************************/ +/* Instructions paravirtualized for performance */ +/************************************************/ +#define ia64_ssm IA64_INTRINSIC_MACRO(ssm) +#define ia64_rsm IA64_INTRINSIC_MACRO(rsm) +#define ia64_getreg IA64_INTRINSIC_API(getreg) +#define ia64_setreg IA64_INTRINSIC_API(setreg) +#define ia64_set_rr IA64_INTRINSIC_API(set_rr) +#define ia64_get_rr IA64_INTRINSIC_API(get_rr) +#define ia64_ptcga IA64_INTRINSIC_API(ptcga) +#define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i) +#define ia64_intrin_local_irq_restore \ + IA64_INTRINSIC_API(intrin_local_irq_restore) +#define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4) + +#endif /* !__ASSEMBLY__ */ + #endif /* _ASM_IA64_INTRINSICS_H */ diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index a3a4288daae..b9c102e15f2 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -55,13 +55,27 @@ #define NR_IOSAPICS 256 -static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg) +#ifdef CONFIG_PARAVIRT_GUEST +#include <asm/paravirt.h> +#else +#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init +#define __iosapic_read __ia64_native_iosapic_read +#define __iosapic_write __ia64_native_iosapic_write +#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip +#endif + +extern void __init ia64_native_iosapic_pcat_compat_init(void); +extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger); + +static inline unsigned int +__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg) { writel(reg, iosapic + IOSAPIC_REG_SELECT); return readl(iosapic + IOSAPIC_WINDOW); } -static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +static inline void +__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) { writel(reg, iosapic + IOSAPIC_REG_SELECT); writel(val, iosapic + IOSAPIC_WINDOW); diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index a66d26827cb..3627116fb0e 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h @@ -13,14 +13,7 @@ #include <linux/types.h> #include <linux/cpumask.h> - -#define NR_VECTORS 256 - -#if (NR_VECTORS + 32 * NR_CPUS) < 1024 -#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS) -#else -#define NR_IRQS 1024 -#endif +#include <asm-ia64/nr-irqs.h> static __inline__ int irq_canonicalize (int irq) diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index cef2400983f..040bc87db93 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -152,11 +152,7 @@ reload_context (nv_mm_context_t context) # endif #endif - ia64_set_rr(0x0000000000000000UL, rr0); - ia64_set_rr(0x2000000000000000UL, rr1); - ia64_set_rr(0x4000000000000000UL, rr2); - ia64_set_rr(0x6000000000000000UL, rr3); - ia64_set_rr(0x8000000000000000UL, rr4); + ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4); ia64_srlz_i(); /* srlz.i implies srlz.d */ } diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h new file mode 100644 index 00000000000..c953a2ca4fc --- /dev/null +++ b/include/asm-ia64/native/inst.h @@ -0,0 +1,175 @@ +/****************************************************************************** + * include/asm-ia64/native/inst.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN + +#define __paravirt_switch_to ia64_native_switch_to +#define __paravirt_leave_syscall ia64_native_leave_syscall +#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall +#define __paravirt_leave_kernel ia64_native_leave_kernel +#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end +#define __paravirt_work_processed_syscall_target \ + ia64_work_processed_syscall + +#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK +# define PARAVIRT_POISON 0xdeadbeefbaadf00d +# define CLOBBER(clob) \ + ;; \ + movl clob = PARAVIRT_POISON; \ + ;; +#else +# define CLOBBER(clob) /* nothing */ +#endif + +#define MOV_FROM_IFA(reg) \ + mov reg = cr.ifa + +#define MOV_FROM_ITIR(reg) \ + mov reg = cr.itir + +#define MOV_FROM_ISR(reg) \ + mov reg = cr.isr + +#define MOV_FROM_IHA(reg) \ + mov reg = cr.iha + +#define MOV_FROM_IPSR(pred, reg) \ +(pred) mov reg = cr.ipsr + +#define MOV_FROM_IIM(reg) \ + mov reg = cr.iim + +#define MOV_FROM_IIP(reg) \ + mov reg = cr.iip + +#define MOV_FROM_IVR(reg, clob) \ + mov reg = cr.ivr \ + CLOBBER(clob) + +#define MOV_FROM_PSR(pred, reg, clob) \ +(pred) mov reg = psr \ + CLOBBER(clob) + +#define MOV_TO_IFA(reg, clob) \ + mov cr.ifa = reg \ + CLOBBER(clob) + +#define MOV_TO_ITIR(pred, reg, clob) \ +(pred) mov cr.itir = reg \ + CLOBBER(clob) + +#define MOV_TO_IHA(pred, reg, clob) \ +(pred) mov cr.iha = reg \ + CLOBBER(clob) + +#define MOV_TO_IPSR(pred, reg, clob) \ +(pred) mov cr.ipsr = reg \ + CLOBBER(clob) + +#define MOV_TO_IFS(pred, reg, clob) \ +(pred) mov cr.ifs = reg \ + CLOBBER(clob) + +#define MOV_TO_IIP(reg, clob) \ + mov cr.iip = reg \ + CLOBBER(clob) + +#define MOV_TO_KR(kr, reg, clob0, clob1) \ + mov IA64_KR(kr) = reg \ + CLOBBER(clob0) \ + CLOBBER(clob1) + +#define ITC_I(pred, reg, clob) \ +(pred) itc.i reg \ + CLOBBER(clob) + +#define ITC_D(pred, reg, clob) \ +(pred) itc.d reg \ + CLOBBER(clob) + +#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ +(pred_i) itc.i reg; \ +(pred_d) itc.d reg \ + CLOBBER(clob) + +#define THASH(pred, reg0, reg1, clob) \ +(pred) thash reg0 = reg1 \ + CLOBBER(clob) + +#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ + ssm psr.ic | PSR_DEFAULT_BITS \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + ;; \ + srlz.i /* guarantee that interruption collectin is on */ \ + ;; + +#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ + ssm psr.ic \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + ;; \ + srlz.d + +#define RSM_PSR_IC(clob) \ + rsm psr.ic \ + CLOBBER(clob) + +#define SSM_PSR_I(pred, pred_clob, clob) \ +(pred) ssm psr.i \ + CLOBBER(clob) + +#define RSM_PSR_I(pred, clob0, clob1) \ +(pred) rsm psr.i \ + CLOBBER(clob0) \ + CLOBBER(clob1) + +#define RSM_PSR_I_IC(clob0, clob1, clob2) \ + rsm psr.i | psr.ic \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + CLOBBER(clob2) + +#define RSM_PSR_DT \ + rsm psr.dt + +#define SSM_PSR_DT_AND_SRLZ_I \ + ssm psr.dt \ + ;; \ + srlz.i + +#define BSW_0(clob0, clob1, clob2) \ + bsw.0 \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + CLOBBER(clob2) + +#define BSW_1(clob0, clob1) \ + bsw.1 \ + CLOBBER(clob0) \ + CLOBBER(clob1) + +#define COVER \ + cover + +#define RFI \ + rfi diff --git a/include/asm-ia64/native/irq.h b/include/asm-ia64/native/irq.h new file mode 100644 index 00000000000..efe9ff74a3c --- /dev/null +++ b/include/asm-ia64/native/irq.h @@ -0,0 +1,35 @@ +/****************************************************************************** + * include/asm-ia64/native/irq.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * moved from linux/include/asm-ia64/irq.h. + */ + +#ifndef _ASM_IA64_NATIVE_IRQ_H +#define _ASM_IA64_NATIVE_IRQ_H + +#define NR_VECTORS 256 + +#if (NR_VECTORS + 32 * NR_CPUS) < 1024 +#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS) +#else +#define IA64_NATIVE_NR_IRQS 1024 +#endif + +#endif /* _ASM_IA64_NATIVE_IRQ_H */ diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h new file mode 100644 index 00000000000..1b4df129f57 --- /dev/null +++ b/include/asm-ia64/paravirt.h @@ -0,0 +1,255 @@ +/****************************************************************************** + * include/asm-ia64/paravirt.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_PARAVIRT_H +#define __ASM_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT_GUEST + +#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 +#define PARAVIRT_HYPERVISOR_TYPE_XEN 1 + +#ifndef __ASSEMBLY__ + +#include <asm/hw_irq.h> +#include <asm/meminit.h> + +/****************************************************************************** + * general info + */ +struct pv_info { + unsigned int kernel_rpl; + int paravirt_enabled; + const char *name; +}; + +extern struct pv_info pv_info; + +static inline int paravirt_enabled(void) +{ + return pv_info.paravirt_enabled; +} + +static inline unsigned int get_kernel_rpl(void) +{ + return pv_info.kernel_rpl; +} + +/****************************************************************************** + * initialization hooks. + */ +struct rsvd_region; + +struct pv_init_ops { + void (*banner)(void); + + int (*reserve_memory)(struct rsvd_region *region); + + void (*arch_setup_early)(void); + void (*arch_setup_console)(char **cmdline_p); + int (*arch_setup_nomca)(void); + + void (*post_smp_prepare_boot_cpu)(void); +}; + +extern struct pv_init_ops pv_init_ops; + +static inline void paravirt_banner(void) +{ + if (pv_init_ops.banner) + pv_init_ops.banner(); +} + +static inline int paravirt_reserve_memory(struct rsvd_region *region) +{ + if (pv_init_ops.reserve_memory) + return pv_init_ops.reserve_memory(region); + return 0; +} + +static inline void paravirt_arch_setup_early(void) +{ + if (pv_init_ops.arch_setup_early) + pv_init_ops.arch_setup_early(); +} + +static inline void paravirt_arch_setup_console(char **cmdline_p) +{ + if (pv_init_ops.arch_setup_console) + pv_init_ops.arch_setup_console(cmdline_p); +} + +static inline int paravirt_arch_setup_nomca(void) +{ + if (pv_init_ops.arch_setup_nomca) + return pv_init_ops.arch_setup_nomca(); + return 0; +} + +static inline void paravirt_post_smp_prepare_boot_cpu(void) +{ + if (pv_init_ops.post_smp_prepare_boot_cpu) + pv_init_ops.post_smp_prepare_boot_cpu(); +} + +/****************************************************************************** + * replacement of iosapic operations. + */ + +struct pv_iosapic_ops { + void (*pcat_compat_init)(void); + + struct irq_chip *(*get_irq_chip)(unsigned long trigger); + + unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); + void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); +}; + +extern struct pv_iosapic_ops pv_iosapic_ops; + +static inline void +iosapic_pcat_compat_init(void) +{ + if (pv_iosapic_ops.pcat_compat_init) + pv_iosapic_ops.pcat_compat_init(); +} + +static inline struct irq_chip* +iosapic_get_irq_chip(unsigned long trigger) +{ + return pv_iosapic_ops.get_irq_chip(trigger); +} + +static inline unsigned int +__iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + return pv_iosapic_ops.__read(iosapic, reg); +} + +static inline void +__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + return pv_iosapic_ops.__write(iosapic, reg, val); +} + +/****************************************************************************** + * replacement of irq operations. + */ + +struct pv_irq_ops { + void (*register_ipi)(void); + + int (*assign_irq_vector)(int irq); + void (*free_irq_vector)(int vector); + + void (*register_percpu_irq)(ia64_vector vec, + struct irqaction *action); + + void (*resend_irq)(unsigned int vector); +}; + +extern struct pv_irq_ops pv_irq_ops; + +static inline void +ia64_register_ipi(void) +{ + pv_irq_ops.register_ipi(); +} + +static inline int +assign_irq_vector(int irq) +{ + return pv_irq_ops.assign_irq_vector(irq); +} + +static inline void +free_irq_vector(int vector) +{ + return pv_irq_ops.free_irq_vector(vector); +} + +static inline void +register_percpu_irq(ia64_vector vec, struct irqaction *action) +{ + pv_irq_ops.register_percpu_irq(vec, action); +} + +static inline void +ia64_resend_irq(unsigned int vector) +{ + pv_irq_ops.resend_irq(vector); +} + +/****************************************************************************** + * replacement of time operations. + */ + +extern struct itc_jitter_data_t itc_jitter_data; +extern volatile int time_keeper_id; + +struct pv_time_ops { + void (*init_missing_ticks_accounting)(int cpu); + int (*do_steal_accounting)(unsigned long *new_itm); + + void (*clocksource_resume)(void); +}; + +extern struct pv_time_ops pv_time_ops; + +static inline void +paravirt_init_missing_ticks_accounting(int cpu) +{ + if (pv_time_ops.init_missing_ticks_accounting) + pv_time_ops.init_missing_ticks_accounting(cpu); +} + +static inline int +paravirt_do_steal_accounting(unsigned long *new_itm) +{ + return pv_time_ops.do_steal_accounting(new_itm); +} + +#endif /* !__ASSEMBLY__ */ + +#else +/* fallback for native case */ + +#ifndef __ASSEMBLY__ + +#define paravirt_banner() do { } while (0) +#define paravirt_reserve_memory(region) 0 + +#define paravirt_arch_setup_early() do { } while (0) +#define paravirt_arch_setup_console(cmdline_p) do { } while (0) +#define paravirt_arch_setup_nomca() 0 +#define paravirt_post_smp_prepare_boot_cpu() do { } while (0) + +#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0) +#define paravirt_do_steal_accounting(new_itm) 0 + +#endif /* __ASSEMBLY__ */ + + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_PARAVIRT_H */ diff --git a/include/asm-ia64/paravirt_privop.h b/include/asm-ia64/paravirt_privop.h new file mode 100644 index 00000000000..52482e6940a --- /dev/null +++ b/include/asm-ia64/paravirt_privop.h @@ -0,0 +1,114 @@ +/****************************************************************************** + * include/asm-ia64/paravirt_privops.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H +#define _ASM_IA64_PARAVIRT_PRIVOP_H + +#ifdef CONFIG_PARAVIRT + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/kregs.h> /* for IA64_PSR_I */ + +/****************************************************************************** + * replacement of intrinsics operations. + */ + +struct pv_cpu_ops { + void (*fc)(unsigned long addr); + unsigned long (*thash)(unsigned long addr); + unsigned long (*get_cpuid)(int index); + unsigned long (*get_pmd)(int index); + unsigned long (*getreg)(int reg); + void (*setreg)(int reg, unsigned long val); + void (*ptcga)(unsigned long addr, unsigned long size); + unsigned long (*get_rr)(unsigned long index); + void (*set_rr)(unsigned long index, unsigned long val); + void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4); + void (*ssm_i)(void); + void (*rsm_i)(void); + unsigned long (*get_psr_i)(void); + void (*intrin_local_irq_restore)(unsigned long flags); +}; + +extern struct pv_cpu_ops pv_cpu_ops; + +extern void ia64_native_setreg_func(int regnum, unsigned long val); +extern unsigned long ia64_native_getreg_func(int regnum); + +/************************************************/ +/* Instructions paravirtualized for performance */ +/************************************************/ + +/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). + * static inline function doesn't satisfy it. */ +#define paravirt_ssm(mask) \ + do { \ + if ((mask) == IA64_PSR_I) \ + pv_cpu_ops.ssm_i(); \ + else \ + ia64_native_ssm(mask); \ + } while (0) + +#define paravirt_rsm(mask) \ + do { \ + if ((mask) == IA64_PSR_I) \ + pv_cpu_ops.rsm_i(); \ + else \ + ia64_native_rsm(mask); \ + } while (0) + +/****************************************************************************** + * replacement of hand written assembly codes. + */ +struct pv_cpu_asm_switch { + unsigned long switch_to; + unsigned long leave_syscall; + unsigned long work_processed_syscall; + unsigned long leave_kernel; +}; +void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); + +#endif /* __ASSEMBLY__ */ + +#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name + +#else + +/* fallback for native case */ +#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name + +#endif /* CONFIG_PARAVIRT */ + +/* these routines utilize privilege-sensitive or performance-sensitive + * privileged instructions so the code must be replaced with + * paravirtualized versions */ +#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to) +#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall) +#define ia64_work_processed_syscall \ + IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) +#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) + +#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 27731e032ee..12d96e0cd51 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/cpumask.h> #include <linux/bitops.h> +#include <linux/irqreturn.h> #include <asm/io.h> #include <asm/param.h> @@ -120,6 +121,7 @@ extern void __init smp_build_cpu_map(void); extern void __init init_smp_config (void); extern void smp_do_timer (struct pt_regs *regs); +extern irqreturn_t handle_IPI(int irq, void *dev_id); extern void smp_send_reschedule (int cpu); extern void identify_siblings (struct cpuinfo_ia64 *); extern int is_multithreading_enabled(void); diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 26e250bfb91..927a381c20c 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -26,6 +26,7 @@ */ #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) +#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) #ifndef __ASSEMBLY__ @@ -122,10 +123,16 @@ extern struct ia64_boot_param { * write a floating-point register right before reading the PSR * and that writes to PSR.mfl */ +#ifdef CONFIG_PARAVIRT +#define __local_save_flags() ia64_get_psr_i() +#else +#define __local_save_flags() ia64_getreg(_IA64_REG_PSR) +#endif + #define __local_irq_save(x) \ do { \ ia64_stop(); \ - (x) = ia64_getreg(_IA64_REG_PSR); \ + (x) = __local_save_flags(); \ ia64_stop(); \ ia64_rsm(IA64_PSR_I); \ } while (0) @@ -173,7 +180,7 @@ do { \ #endif /* !CONFIG_IA64_DEBUG_IRQ */ #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) +#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); }) #define irqs_disabled() \ ({ \ |