diff options
Diffstat (limited to 'arch/powerpc/include')
306 files changed, 47700 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h new file mode 100644 index 00000000000..bdf0563ba42 --- /dev/null +++ b/arch/powerpc/include/asm/8xx_immap.h @@ -0,0 +1,564 @@ +/* + * MPC8xx Internal Memory Map + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * The I/O on the MPC860 is comprised of blocks of special registers + * and the dual port ram for the Communication Processor Module. + * Within this space are functional units such as the SIU, memory + * controller, system timers, and other control functions. It is + * a combination that I found difficult to separate into logical + * functional files.....but anyone else is welcome to try. -- Dan + */ +#ifdef __KERNEL__ +#ifndef __IMMAP_8XX__ +#define __IMMAP_8XX__ + +/* System configuration registers. +*/ +typedef struct sys_conf { + uint sc_siumcr; + uint sc_sypcr; + uint sc_swt; + char res1[2]; + ushort sc_swsr; + uint sc_sipend; + uint sc_simask; + uint sc_siel; + uint sc_sivec; + uint sc_tesr; + char res2[0xc]; + uint sc_sdcr; + char res3[0x4c]; +} sysconf8xx_t; + +/* PCMCIA configuration registers. +*/ +typedef struct pcmcia_conf { + uint pcmc_pbr0; + uint pcmc_por0; + uint pcmc_pbr1; + uint pcmc_por1; + uint pcmc_pbr2; + uint pcmc_por2; + uint pcmc_pbr3; + uint pcmc_por3; + uint pcmc_pbr4; + uint pcmc_por4; + uint pcmc_pbr5; + uint pcmc_por5; + uint pcmc_pbr6; + uint pcmc_por6; + uint pcmc_pbr7; + uint pcmc_por7; + char res1[0x20]; + uint pcmc_pgcra; + uint pcmc_pgcrb; + uint pcmc_pscr; + char res2[4]; + uint pcmc_pipr; + char res3[4]; + uint pcmc_per; + char res4[4]; +} pcmconf8xx_t; + +/* Memory controller registers. +*/ +typedef struct mem_ctlr { + uint memc_br0; + uint memc_or0; + uint memc_br1; + uint memc_or1; + uint memc_br2; + uint memc_or2; + uint memc_br3; + uint memc_or3; + uint memc_br4; + uint memc_or4; + uint memc_br5; + uint memc_or5; + uint memc_br6; + uint memc_or6; + uint memc_br7; + uint memc_or7; + char res1[0x24]; + uint memc_mar; + uint memc_mcr; + char res2[4]; + uint memc_mamr; + uint memc_mbmr; + ushort memc_mstat; + ushort memc_mptpr; + uint memc_mdr; + char res3[0x80]; +} memctl8xx_t; + +/*----------------------------------------------------------------------- + * BR - Memory Controller: Base Register 16-9 + */ +#define BR_BA_MSK 0xffff8000 /* Base Address Mask */ +#define BR_AT_MSK 0x00007000 /* Address Type Mask */ +#define BR_PS_MSK 0x00000c00 /* Port Size Mask */ +#define BR_PS_32 0x00000000 /* 32 bit port size */ +#define BR_PS_16 0x00000800 /* 16 bit port size */ +#define BR_PS_8 0x00000400 /* 8 bit port size */ +#define BR_PARE 0x00000200 /* Parity Enable */ +#define BR_WP 0x00000100 /* Write Protect */ +#define BR_MS_MSK 0x000000c0 /* Machine Select Mask */ +#define BR_MS_GPCM 0x00000000 /* G.P.C.M. Machine Select */ +#define BR_MS_UPMA 0x00000080 /* U.P.M.A Machine Select */ +#define BR_MS_UPMB 0x000000c0 /* U.P.M.B Machine Select */ +#define BR_V 0x00000001 /* Bank Valid */ + +/*----------------------------------------------------------------------- + * OR - Memory Controller: Option Register 16-11 + */ +#define OR_AM_MSK 0xffff8000 /* Address Mask Mask */ +#define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */ +#define OR_CSNT_SAM 0x00000800 /* Chip Select Negation Time/ Start */ + /* Address Multiplex */ +#define OR_ACS_MSK 0x00000600 /* Address to Chip Select Setup mask */ +#define OR_ACS_DIV1 0x00000000 /* CS is output at the same time */ +#define OR_ACS_DIV4 0x00000400 /* CS is output 1/4 a clock later */ +#define OR_ACS_DIV2 0x00000600 /* CS is output 1/2 a clock later */ +#define OR_G5LA 0x00000400 /* Output #GPL5 on #GPL_A5 */ +#define OR_G5LS 0x00000200 /* Drive #GPL high on falling edge of...*/ +#define OR_BI 0x00000100 /* Burst inhibit */ +#define OR_SCY_MSK 0x000000f0 /* Cycle Length in Clocks */ +#define OR_SCY_0_CLK 0x00000000 /* 0 clock cycles wait states */ +#define OR_SCY_1_CLK 0x00000010 /* 1 clock cycles wait states */ +#define OR_SCY_2_CLK 0x00000020 /* 2 clock cycles wait states */ +#define OR_SCY_3_CLK 0x00000030 /* 3 clock cycles wait states */ +#define OR_SCY_4_CLK 0x00000040 /* 4 clock cycles wait states */ +#define OR_SCY_5_CLK 0x00000050 /* 5 clock cycles wait states */ +#define OR_SCY_6_CLK 0x00000060 /* 6 clock cycles wait states */ +#define OR_SCY_7_CLK 0x00000070 /* 7 clock cycles wait states */ +#define OR_SCY_8_CLK 0x00000080 /* 8 clock cycles wait states */ +#define OR_SCY_9_CLK 0x00000090 /* 9 clock cycles wait states */ +#define OR_SCY_10_CLK 0x000000a0 /* 10 clock cycles wait states */ +#define OR_SCY_11_CLK 0x000000b0 /* 11 clock cycles wait states */ +#define OR_SCY_12_CLK 0x000000c0 /* 12 clock cycles wait states */ +#define OR_SCY_13_CLK 0x000000d0 /* 13 clock cycles wait states */ +#define OR_SCY_14_CLK 0x000000e0 /* 14 clock cycles wait states */ +#define OR_SCY_15_CLK 0x000000f0 /* 15 clock cycles wait states */ +#define OR_SETA 0x00000008 /* External Transfer Acknowledge */ +#define OR_TRLX 0x00000004 /* Timing Relaxed */ +#define OR_EHTR 0x00000002 /* Extended Hold Time on Read */ + +/* System Integration Timers. +*/ +typedef struct sys_int_timers { + ushort sit_tbscr; + char res0[0x02]; + uint sit_tbreff0; + uint sit_tbreff1; + char res1[0x14]; + ushort sit_rtcsc; + char res2[0x02]; + uint sit_rtc; + uint sit_rtsec; + uint sit_rtcal; + char res3[0x10]; + ushort sit_piscr; + char res4[2]; + uint sit_pitc; + uint sit_pitr; + char res5[0x34]; +} sit8xx_t; + +#define TBSCR_TBIRQ_MASK ((ushort)0xff00) +#define TBSCR_REFA ((ushort)0x0080) +#define TBSCR_REFB ((ushort)0x0040) +#define TBSCR_REFAE ((ushort)0x0008) +#define TBSCR_REFBE ((ushort)0x0004) +#define TBSCR_TBF ((ushort)0x0002) +#define TBSCR_TBE ((ushort)0x0001) + +#define RTCSC_RTCIRQ_MASK ((ushort)0xff00) +#define RTCSC_SEC ((ushort)0x0080) +#define RTCSC_ALR ((ushort)0x0040) +#define RTCSC_38K ((ushort)0x0010) +#define RTCSC_SIE ((ushort)0x0008) +#define RTCSC_ALE ((ushort)0x0004) +#define RTCSC_RTF ((ushort)0x0002) +#define RTCSC_RTE ((ushort)0x0001) + +#define PISCR_PIRQ_MASK ((ushort)0xff00) +#define PISCR_PS ((ushort)0x0080) +#define PISCR_PIE ((ushort)0x0004) +#define PISCR_PTF ((ushort)0x0002) +#define PISCR_PTE ((ushort)0x0001) + +/* Clocks and Reset. +*/ +typedef struct clk_and_reset { + uint car_sccr; + uint car_plprcr; + uint car_rsr; + char res[0x74]; /* Reserved area */ +} car8xx_t; + +/* System Integration Timers keys. +*/ +typedef struct sitk { + uint sitk_tbscrk; + uint sitk_tbreff0k; + uint sitk_tbreff1k; + uint sitk_tbk; + char res1[0x10]; + uint sitk_rtcsck; + uint sitk_rtck; + uint sitk_rtseck; + uint sitk_rtcalk; + char res2[0x10]; + uint sitk_piscrk; + uint sitk_pitck; + char res3[0x38]; +} sitk8xx_t; + +/* Clocks and reset keys. +*/ +typedef struct cark { + uint cark_sccrk; + uint cark_plprcrk; + uint cark_rsrk; + char res[0x474]; +} cark8xx_t; + +/* The key to unlock registers maintained by keep-alive power. +*/ +#define KAPWR_KEY ((unsigned int)0x55ccaa33) + +/* Video interface. MPC823 Only. +*/ +typedef struct vid823 { + ushort vid_vccr; + ushort res1; + u_char vid_vsr; + u_char res2; + u_char vid_vcmr; + u_char res3; + uint vid_vbcb; + uint res4; + uint vid_vfcr0; + uint vid_vfaa0; + uint vid_vfba0; + uint vid_vfcr1; + uint vid_vfaa1; + uint vid_vfba1; + u_char res5[0x18]; +} vid823_t; + +/* LCD interface. 823 Only. +*/ +typedef struct lcd { + uint lcd_lccr; + uint lcd_lchcr; + uint lcd_lcvcr; + char res1[4]; + uint lcd_lcfaa; + uint lcd_lcfba; + char lcd_lcsr; + char res2[0x7]; +} lcd823_t; + +/* I2C +*/ +typedef struct i2c { + u_char i2c_i2mod; + char res1[3]; + u_char i2c_i2add; + char res2[3]; + u_char i2c_i2brg; + char res3[3]; + u_char i2c_i2com; + char res4[3]; + u_char i2c_i2cer; + char res5[3]; + u_char i2c_i2cmr; + char res6[0x8b]; +} i2c8xx_t; + +/* DMA control/status registers. +*/ +typedef struct sdma_csr { + char res1[4]; + uint sdma_sdar; + u_char sdma_sdsr; + char res3[3]; + u_char sdma_sdmr; + char res4[3]; + u_char sdma_idsr1; + char res5[3]; + u_char sdma_idmr1; + char res6[3]; + u_char sdma_idsr2; + char res7[3]; + u_char sdma_idmr2; + char res8[0x13]; +} sdma8xx_t; + +/* Communication Processor Module Interrupt Controller. +*/ +typedef struct cpm_ic { + ushort cpic_civr; + char res[0xe]; + uint cpic_cicr; + uint cpic_cipr; + uint cpic_cimr; + uint cpic_cisr; +} cpic8xx_t; + +/* Input/Output Port control/status registers. +*/ +typedef struct io_port { + ushort iop_padir; + ushort iop_papar; + ushort iop_paodr; + ushort iop_padat; + char res1[8]; + ushort iop_pcdir; + ushort iop_pcpar; + ushort iop_pcso; + ushort iop_pcdat; + ushort iop_pcint; + char res2[6]; + ushort iop_pddir; + ushort iop_pdpar; + char res3[2]; + ushort iop_pddat; + uint utmode; + char res4[4]; +} iop8xx_t; + +/* Communication Processor Module Timers +*/ +typedef struct cpm_timers { + ushort cpmt_tgcr; + char res1[0xe]; + ushort cpmt_tmr1; + ushort cpmt_tmr2; + ushort cpmt_trr1; + ushort cpmt_trr2; + ushort cpmt_tcr1; + ushort cpmt_tcr2; + ushort cpmt_tcn1; + ushort cpmt_tcn2; + ushort cpmt_tmr3; + ushort cpmt_tmr4; + ushort cpmt_trr3; + ushort cpmt_trr4; + ushort cpmt_tcr3; + ushort cpmt_tcr4; + ushort cpmt_tcn3; + ushort cpmt_tcn4; + ushort cpmt_ter1; + ushort cpmt_ter2; + ushort cpmt_ter3; + ushort cpmt_ter4; + char res2[8]; +} cpmtimer8xx_t; + +/* Finally, the Communication Processor stuff..... +*/ +typedef struct scc { /* Serial communication channels */ + uint scc_gsmrl; + uint scc_gsmrh; + ushort scc_psmr; + char res1[2]; + ushort scc_todr; + ushort scc_dsr; + ushort scc_scce; + char res2[2]; + ushort scc_sccm; + char res3; + u_char scc_sccs; + char res4[8]; +} scc_t; + +typedef struct smc { /* Serial management channels */ + char res1[2]; + ushort smc_smcmr; + char res2[2]; + u_char smc_smce; + char res3[3]; + u_char smc_smcm; + char res4[5]; +} smc_t; + +/* MPC860T Fast Ethernet Controller. It isn't part of the CPM, but + * it fits within the address space. + */ + +typedef struct fec { + uint fec_addr_low; /* lower 32 bits of station address */ + ushort fec_addr_high; /* upper 16 bits of station address */ + ushort res1; /* reserved */ + uint fec_grp_hash_table_high; /* upper 32-bits of hash table */ + uint fec_grp_hash_table_low; /* lower 32-bits of hash table */ + uint fec_r_des_start; /* beginning of Rx descriptor ring */ + uint fec_x_des_start; /* beginning of Tx descriptor ring */ + uint fec_r_buff_size; /* Rx buffer size */ + uint res2[9]; /* reserved */ + uint fec_ecntrl; /* ethernet control register */ + uint fec_ievent; /* interrupt event register */ + uint fec_imask; /* interrupt mask register */ + uint fec_ivec; /* interrupt level and vector status */ + uint fec_r_des_active; /* Rx ring updated flag */ + uint fec_x_des_active; /* Tx ring updated flag */ + uint res3[10]; /* reserved */ + uint fec_mii_data; /* MII data register */ + uint fec_mii_speed; /* MII speed control register */ + uint res4[17]; /* reserved */ + uint fec_r_bound; /* end of RAM (read-only) */ + uint fec_r_fstart; /* Rx FIFO start address */ + uint res5[6]; /* reserved */ + uint fec_x_fstart; /* Tx FIFO start address */ + uint res6[17]; /* reserved */ + uint fec_fun_code; /* fec SDMA function code */ + uint res7[3]; /* reserved */ + uint fec_r_cntrl; /* Rx control register */ + uint fec_r_hash; /* Rx hash register */ + uint res8[14]; /* reserved */ + uint fec_x_cntrl; /* Tx control register */ + uint res9[0x1e]; /* reserved */ +} fec_t; + +/* The FEC and LCD color map share the same address space.... + * I guess we will never see an 823T :-). + */ +union fec_lcd { + fec_t fl_un_fec; + u_char fl_un_cmap[0x200]; +}; + +typedef struct comm_proc { + /* General control and status registers. + */ + ushort cp_cpcr; + u_char res1[2]; + ushort cp_rccr; + u_char res2; + u_char cp_rmds; + u_char res3[4]; + ushort cp_cpmcr1; + ushort cp_cpmcr2; + ushort cp_cpmcr3; + ushort cp_cpmcr4; + u_char res4[2]; + ushort cp_rter; + u_char res5[2]; + ushort cp_rtmr; + u_char res6[0x14]; + + /* Baud rate generators. + */ + uint cp_brgc1; + uint cp_brgc2; + uint cp_brgc3; + uint cp_brgc4; + + /* Serial Communication Channels. + */ + scc_t cp_scc[4]; + + /* Serial Management Channels. + */ + smc_t cp_smc[2]; + + /* Serial Peripheral Interface. + */ + ushort cp_spmode; + u_char res7[4]; + u_char cp_spie; + u_char res8[3]; + u_char cp_spim; + u_char res9[2]; + u_char cp_spcom; + u_char res10[2]; + + /* Parallel Interface Port. + */ + u_char res11[2]; + ushort cp_pipc; + u_char res12[2]; + ushort cp_ptpr; + uint cp_pbdir; + uint cp_pbpar; + u_char res13[2]; + ushort cp_pbodr; + uint cp_pbdat; + + /* Port E - MPC87x/88x only. + */ + uint cp_pedir; + uint cp_pepar; + uint cp_peso; + uint cp_peodr; + uint cp_pedat; + + /* Communications Processor Timing Register - + Contains RMII Timing for the FECs on MPC87x/88x only. + */ + uint cp_cptr; + + /* Serial Interface and Time Slot Assignment. + */ + uint cp_simode; + u_char cp_sigmr; + u_char res15; + u_char cp_sistr; + u_char cp_sicmr; + u_char res16[4]; + uint cp_sicr; + uint cp_sirp; + u_char res17[0xc]; + + /* 256 bytes of MPC823 video controller RAM array. + */ + u_char cp_vcram[0x100]; + u_char cp_siram[0x200]; + + /* The fast ethernet controller is not really part of the CPM, + * but it resides in the address space. + * The LCD color map is also here. + */ + union fec_lcd fl_un; +#define cp_fec fl_un.fl_un_fec +#define lcd_cmap fl_un.fl_un_cmap + char res18[0xE00]; + + /* The DUET family has a second FEC here */ + fec_t cp_fec2; +#define cp_fec1 cp_fec /* consistency macro */ + + /* Dual Ported RAM follows. + * There are many different formats for this memory area + * depending upon the devices used and options chosen. + * Some processors don't have all of it populated. + */ + u_char cp_dpmem[0x1C00]; /* BD / Data / ucode */ + u_char cp_dparam[0x400]; /* Parameter RAM */ +} cpm8xx_t; + +/* Internal memory map. +*/ +typedef struct immap { + sysconf8xx_t im_siu_conf; /* SIU Configuration */ + pcmconf8xx_t im_pcmcia; /* PCMCIA Configuration */ + memctl8xx_t im_memctl; /* Memory Controller */ + sit8xx_t im_sit; /* System integration timers */ + car8xx_t im_clkrst; /* Clocks and reset */ + sitk8xx_t im_sitk; /* Sys int timer keys */ + cark8xx_t im_clkrstk; /* Clocks and reset keys */ + vid823_t im_vid; /* Video (823 only) */ + lcd823_t im_lcd; /* LCD (823 only) */ + i2c8xx_t im_i2c; /* I2C control/status */ + sdma8xx_t im_sdma; /* SDMA control/status */ + cpic8xx_t im_cpic; /* CPM Interrupt Controller */ + iop8xx_t im_ioport; /* IO Port control/status */ + cpmtimer8xx_t im_cpmtimer; /* CPM timers */ + cpm8xx_t im_cpm; /* Communication processor */ +} immap_t; + +#endif /* __IMMAP_8XX__ */ +#endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild new file mode 100644 index 00000000000..7e313f1ed18 --- /dev/null +++ b/arch/powerpc/include/asm/Kbuild @@ -0,0 +1,38 @@ +include include/asm-generic/Kbuild.asm + +header-y += auxvec.h +header-y += bootx.h +header-y += byteorder.h +header-y += cputable.h +header-y += elf.h +header-y += errno.h +header-y += fcntl.h +header-y += ioctl.h +header-y += ioctls.h +header-y += ipcbuf.h +header-y += linkage.h +header-y += msgbuf.h +header-y += nvram.h +header-y += param.h +header-y += poll.h +header-y += posix_types.h +header-y += ps3fb.h +header-y += resource.h +header-y += seccomp.h +header-y += sembuf.h +header-y += shmbuf.h +header-y += sigcontext.h +header-y += siginfo.h +header-y += signal.h +header-y += socket.h +header-y += sockios.h +header-y += spu_info.h +header-y += stat.h +header-y += statfs.h +header-y += termbits.h +header-y += termios.h +header-y += types.h +header-y += ucontext.h +header-y += unistd.h + +generic-y += rwsem.h diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h new file mode 100644 index 00000000000..5ab0b71531b --- /dev/null +++ b/arch/powerpc/include/asm/abs_addr.h @@ -0,0 +1,75 @@ +#ifndef _ASM_POWERPC_ABS_ADDR_H +#define _ASM_POWERPC_ABS_ADDR_H +#ifdef __KERNEL__ + + +/* + * c 2001 PPC 64 Team, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/memblock.h> + +#include <asm/types.h> +#include <asm/page.h> +#include <asm/prom.h> +#include <asm/firmware.h> + +struct mschunks_map { + unsigned long num_chunks; + unsigned long chunk_size; + unsigned long chunk_shift; + unsigned long chunk_mask; + u32 *mapping; +}; + +extern struct mschunks_map mschunks_map; + +/* Chunks are 256 KB */ +#define MSCHUNKS_CHUNK_SHIFT (18) +#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT) +#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1) + +static inline unsigned long chunk_to_addr(unsigned long chunk) +{ + return chunk << MSCHUNKS_CHUNK_SHIFT; +} + +static inline unsigned long addr_to_chunk(unsigned long addr) +{ + return addr >> MSCHUNKS_CHUNK_SHIFT; +} + +static inline unsigned long phys_to_abs(unsigned long pa) +{ + unsigned long chunk; + + /* This is a no-op on non-iSeries */ + if (!firmware_has_feature(FW_FEATURE_ISERIES)) + return pa; + + chunk = addr_to_chunk(pa); + + if (chunk < mschunks_map.num_chunks) + chunk = mschunks_map.mapping[chunk]; + + return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK); +} + +/* Convenience macros */ +#define virt_to_abs(va) phys_to_abs(__pa(va)) +#define abs_to_virt(aa) __va(aa) + +/* + * Converts Virtual Address to Real Address for + * Legacy iSeries Hypervisor calls + */ +#define iseries_hv_addr(virtaddr) \ + (0x8000000000000000UL | virt_to_abs(virtaddr)) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_ABS_ADDR_H */ diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h new file mode 100644 index 00000000000..416e12c2d50 --- /dev/null +++ b/arch/powerpc/include/asm/agp.h @@ -0,0 +1,18 @@ +#ifndef _ASM_POWERPC_AGP_H +#define _ASM_POWERPC_AGP_H +#ifdef __KERNEL__ + +#include <asm/io.h> + +#define map_page_into_agp(page) +#define unmap_page_from_agp(page) +#define flush_agp_cache() mb() + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_AGP_H */ diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h new file mode 100644 index 00000000000..decad950f11 --- /dev/null +++ b/arch/powerpc/include/asm/asm-compat.h @@ -0,0 +1,76 @@ +#ifndef _ASM_POWERPC_ASM_COMPAT_H +#define _ASM_POWERPC_ASM_COMPAT_H + +#include <asm/types.h> +#include <asm/ppc-opcode.h> + +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +# define ASM_CONST(x) x +#else +/* This version of stringify will deal with commas... */ +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +# define __ASM_CONST(x) x##UL +# define ASM_CONST(x) __ASM_CONST(x) +#endif + + +#ifdef __powerpc64__ + +/* operations for longs and pointers */ +#define PPC_LL stringify_in_c(ld) +#define PPC_STL stringify_in_c(std) +#define PPC_STLU stringify_in_c(stdu) +#define PPC_LCMPI stringify_in_c(cmpdi) +#define PPC_LONG stringify_in_c(.llong) +#define PPC_LONG_ALIGN stringify_in_c(.balign 8) +#define PPC_TLNEI stringify_in_c(tdnei) +#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) +#define PPC_STLCX stringify_in_c(stdcx.) +#define PPC_CNTLZL stringify_in_c(cntlzd) +#define PPC_LR_STKOFF 16 +#define PPC_MIN_STKFRM 112 + +/* Move to CR, single-entry optimized version. Only available + * on POWER4 and later. + */ +#ifdef CONFIG_POWER4_ONLY +#define PPC_MTOCRF stringify_in_c(mtocrf) +#else +#define PPC_MTOCRF stringify_in_c(mtcrf) +#endif + +#else /* 32-bit */ + +/* operations for longs and pointers */ +#define PPC_LL stringify_in_c(lwz) +#define PPC_STL stringify_in_c(stw) +#define PPC_STLU stringify_in_c(stwu) +#define PPC_LCMPI stringify_in_c(cmpwi) +#define PPC_LONG stringify_in_c(.long) +#define PPC_LONG_ALIGN stringify_in_c(.balign 4) +#define PPC_TLNEI stringify_in_c(twnei) +#define PPC_LLARX(t, a, b, eh) PPC_LWARX(t, a, b, eh) +#define PPC_STLCX stringify_in_c(stwcx.) +#define PPC_CNTLZL stringify_in_c(cntlzw) +#define PPC_MTOCRF stringify_in_c(mtcrf) +#define PPC_LR_STKOFF 4 +#define PPC_MIN_STKFRM 16 + +#endif + +#ifdef __KERNEL__ +#ifdef CONFIG_IBM405_ERR77 +/* Erratum #77 on the 405 means we need a sync or dcbt before every + * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this. + */ +#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;) +#define PPC405_ERR77_SYNC stringify_in_c(sync;) +#else +#define PPC405_ERR77(ra,rb) +#define PPC405_ERR77_SYNC +#endif +#endif + +#endif /* _ASM_POWERPC_ASM_COMPAT_H */ diff --git a/arch/powerpc/include/asm/asm-offsets.h b/arch/powerpc/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/powerpc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/powerpc/include/asm/async_tx.h b/arch/powerpc/include/asm/async_tx.h new file mode 100644 index 00000000000..8b2dc55d01a --- /dev/null +++ b/arch/powerpc/include/asm/async_tx.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008-2009 DENX Software Engineering. + * + * Author: Yuri Tikhonov <yur@emcraft.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef _ASM_POWERPC_ASYNC_TX_H_ +#define _ASM_POWERPC_ASYNC_TX_H_ + +#if defined(CONFIG_440SPe) || defined(CONFIG_440SP) +extern struct dma_chan * +ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap, + struct page **dst_lst, int dst_cnt, struct page **src_lst, + int src_cnt, size_t src_sz); + +#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \ + src_cnt, src_sz) \ + ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \ + src_cnt, src_sz) +#else + +#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ + __async_tx_find_channel(dep, type) + +struct dma_chan * +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); + +#endif + +#endif diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h new file mode 100644 index 00000000000..02e41b53488 --- /dev/null +++ b/arch/powerpc/include/asm/atomic.h @@ -0,0 +1,475 @@ +#ifndef _ASM_POWERPC_ATOMIC_H_ +#define _ASM_POWERPC_ATOMIC_H_ + +/* + * PowerPC atomic operations + */ + +#include <linux/types.h> + +#ifdef __KERNEL__ +#include <linux/compiler.h> +#include <asm/synch.h> +#include <asm/asm-compat.h> +#include <asm/system.h> + +#define ATOMIC_INIT(i) { (i) } + +static __inline__ int atomic_read(const atomic_t *v) +{ + int t; + + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + + return t; +} + +static __inline__ void atomic_set(atomic_t *v, int i) +{ + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); +} + +static __inline__ void atomic_add(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%3 # atomic_add\n\ + add %0,%2,%0\n" + PPC405_ERR77(0,%3) +" stwcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (a), "r" (&v->counter) + : "cc"); +} + +static __inline__ int atomic_add_return(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%2 # atomic_add_return\n\ + add %0,%1,%0\n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%2 \n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) + +static __inline__ void atomic_sub(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%3 # atomic_sub\n\ + subf %0,%2,%0\n" + PPC405_ERR77(0,%3) +" stwcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (a), "r" (&v->counter) + : "cc"); +} + +static __inline__ int atomic_sub_return(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%2 # atomic_sub_return\n\ + subf %0,%1,%0\n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%2 \n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +static __inline__ void atomic_inc(atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 # atomic_inc\n\ + addic %0,%0,1\n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%2 \n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (&v->counter) + : "cc", "xer"); +} + +static __inline__ int atomic_inc_return(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%1 # atomic_inc_return\n\ + addic %0,%0,1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1 \n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (&v->counter) + : "cc", "xer", "memory"); + + return t; +} + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +static __inline__ void atomic_dec(atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 # atomic_dec\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%2)\ +" stwcx. %0,0,%2\n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (&v->counter) + : "cc", "xer"); +} + +static __inline__ int atomic_dec_return(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%1 # atomic_dec_return\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (&v->counter) + : "cc", "xer", "memory"); + + return t; +} + +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * __atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int t; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%1 # __atomic_add_unless\n\ + cmpw 0,%0,%3 \n\ + beq- 2f \n\ + add %0,%2,%0 \n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%1 \n\ + bne- 1b \n" + PPC_ATOMIC_EXIT_BARRIER +" subf %0,%2,%0 \n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (a), "r" (u) + : "cc", "memory"); + + return t; +} + + +#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) + +/* + * Atomically test *v and decrement if it is greater than 0. + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static __inline__ int atomic_dec_if_positive(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ + cmpwi %0,1\n\ + addi %0,%0,-1\n\ + blt- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + "\n\ +2:" : "=&b" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +#ifdef __powerpc64__ + +#define ATOMIC64_INIT(i) { (i) } + +static __inline__ long atomic64_read(const atomic64_t *v) +{ + long t; + + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + + return t; +} + +static __inline__ void atomic64_set(atomic64_t *v, long i) +{ + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); +} + +static __inline__ void atomic64_add(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # atomic64_add\n\ + add %0,%2,%0\n\ + stdcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (a), "r" (&v->counter) + : "cc"); +} + +static __inline__ long atomic64_add_return(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%2 # atomic64_add_return\n\ + add %0,%1,%0\n\ + stdcx. %0,0,%2 \n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + +static __inline__ void atomic64_sub(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # atomic64_sub\n\ + subf %0,%2,%0\n\ + stdcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (a), "r" (&v->counter) + : "cc"); +} + +static __inline__ long atomic64_sub_return(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%2 # atomic64_sub_return\n\ + subf %0,%1,%0\n\ + stdcx. %0,0,%2 \n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +static __inline__ void atomic64_inc(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%2 # atomic64_inc\n\ + addic %0,%0,1\n\ + stdcx. %0,0,%2 \n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (&v->counter) + : "cc", "xer"); +} + +static __inline__ long atomic64_inc_return(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%1 # atomic64_inc_return\n\ + addic %0,%0,1\n\ + stdcx. %0,0,%1 \n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (&v->counter) + : "cc", "xer", "memory"); + + return t; +} + +/* + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + +static __inline__ void atomic64_dec(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%2 # atomic64_dec\n\ + addic %0,%0,-1\n\ + stdcx. %0,0,%2\n\ + bne- 1b" + : "=&r" (t), "+m" (v->counter) + : "r" (&v->counter) + : "cc", "xer"); +} + +static __inline__ long atomic64_dec_return(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%1 # atomic64_dec_return\n\ + addic %0,%0,-1\n\ + stdcx. %0,0,%1\n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + : "=&r" (t) + : "r" (&v->counter) + : "cc", "xer", "memory"); + + return t; +} + +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + +/* + * Atomically test *v and decrement if it is greater than 0. + * The function returns the old value of *v minus 1. + */ +static __inline__ long atomic64_dec_if_positive(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ + addic. %0,%0,-1\n\ + blt- 2f\n\ + stdcx. %0,0,%1\n\ + bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + "\n\ +2:" : "=&r" (t) + : "r" (&v->counter) + : "cc", "xer", "memory"); + + return t; +} + +#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long t; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: ldarx %0,0,%1 # __atomic_add_unless\n\ + cmpd 0,%0,%3 \n\ + beq- 2f \n\ + add %0,%2,%0 \n" +" stdcx. %0,0,%1 \n\ + bne- 1b \n" + PPC_ATOMIC_EXIT_BARRIER +" subf %0,%2,%0 \n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (a), "r" (u) + : "cc", "memory"); + + return t != u; +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +#endif /* __powerpc64__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/arch/powerpc/include/asm/auxvec.h b/arch/powerpc/include/asm/auxvec.h new file mode 100644 index 00000000000..19a099b62cd --- /dev/null +++ b/arch/powerpc/include/asm/auxvec.h @@ -0,0 +1,19 @@ +#ifndef _ASM_POWERPC_AUXVEC_H +#define _ASM_POWERPC_AUXVEC_H + +/* + * We need to put in some extra aux table entries to tell glibc what + * the cache block size is, so it can use the dcbz instruction safely. + */ +#define AT_DCACHEBSIZE 19 +#define AT_ICACHEBSIZE 20 +#define AT_UCACHEBSIZE 21 +/* A special ignored type value for PPC, for glibc compatibility. */ +#define AT_IGNOREPPC 22 + +/* The vDSO location. We have to use the same value as x86 for glibc's + * sake :-) + */ +#define AT_SYSINFO_EHDR 33 + +#endif diff --git a/arch/powerpc/include/asm/backlight.h b/arch/powerpc/include/asm/backlight.h new file mode 100644 index 00000000000..8cf5c37c381 --- /dev/null +++ b/arch/powerpc/include/asm/backlight.h @@ -0,0 +1,41 @@ +/* + * Routines for handling backlight control on PowerBooks + * + * For now, implementation resides in + * arch/powerpc/platforms/powermac/backlight.c + * + */ +#ifndef __ASM_POWERPC_BACKLIGHT_H +#define __ASM_POWERPC_BACKLIGHT_H +#ifdef __KERNEL__ + +#include <linux/fb.h> +#include <linux/mutex.h> + +/* For locking instructions, see the implementation file */ +extern struct backlight_device *pmac_backlight; +extern struct mutex pmac_backlight_mutex; + +extern int pmac_backlight_curve_lookup(struct fb_info *info, int value); + +extern int pmac_has_backlight_type(const char *type); + +extern void pmac_backlight_key(int direction); +static inline void pmac_backlight_key_up(void) +{ + pmac_backlight_key(0); +} +static inline void pmac_backlight_key_down(void) +{ + pmac_backlight_key(1); +} + +extern void pmac_backlight_set_legacy_brightness_pmu(int brightness); +extern int pmac_backlight_set_legacy_brightness(int brightness); +extern int pmac_backlight_get_legacy_brightness(void); + +extern void pmac_backlight_enable(void); +extern void pmac_backlight_disable(void); + +#endif /* __KERNEL__ */ +#endif diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h new file mode 100644 index 00000000000..efdc92618b3 --- /dev/null +++ b/arch/powerpc/include/asm/bitops.h @@ -0,0 +1,336 @@ +/* + * PowerPC atomic bit operations. + * + * Merged version by David Gibson <david@gibson.dropbear.id.au>. + * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don + * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They + * originally took it from the ppc32 code. + * + * Within a word, bits are numbered LSB first. Lot's of places make + * this assumption by directly testing bits with (val & (1<<nr)). + * This can cause confusion for large (> 1 word) bitmaps on a + * big-endian system because, unlike little endian, the number of each + * bit depends on the word size. + * + * The bitop functions are defined to work on unsigned longs, so for a + * ppc64 system the bits end up numbered: + * |63..............0|127............64|191...........128|255...........196| + * and on ppc32: + * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| + * + * There are a few little-endian macros used mostly for filesystem + * bitmaps, these work on similar bit arrays layouts, but + * byte-oriented: + * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| + * + * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit + * number field needs to be reversed compared to the big-endian bit + * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_BITOPS_H +#define _ASM_POWERPC_BITOPS_H + +#ifdef __KERNEL__ + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <linux/compiler.h> +#include <asm/asm-compat.h> +#include <asm/synch.h> + +/* + * clear_bit doesn't imply a memory barrier + */ +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +/* Macro for generating the ***_bits() functions */ +#define DEFINE_BITOP(fn, op, prefix, postfix) \ +static __inline__ void fn(unsigned long mask, \ + volatile unsigned long *_p) \ +{ \ + unsigned long old; \ + unsigned long *p = (unsigned long *)_p; \ + __asm__ __volatile__ ( \ + prefix \ +"1:" PPC_LLARX(%0,0,%3,0) "\n" \ + stringify_in_c(op) "%0,%0,%2\n" \ + PPC405_ERR77(0,%3) \ + PPC_STLCX "%0,0,%3\n" \ + "bne- 1b\n" \ + postfix \ + : "=&r" (old), "+m" (*p) \ + : "r" (mask), "r" (p) \ + : "cc", "memory"); \ +} + +DEFINE_BITOP(set_bits, or, "", "") +DEFINE_BITOP(clear_bits, andc, "", "") +DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "") +DEFINE_BITOP(change_bits, xor, "", "") + +static __inline__ void set_bit(int nr, volatile unsigned long *addr) +{ + set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); +} + +static __inline__ void clear_bit(int nr, volatile unsigned long *addr) +{ + clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); +} + +static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) +{ + clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr)); +} + +static __inline__ void change_bit(int nr, volatile unsigned long *addr) +{ + change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); +} + +/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output + * operands. */ +#define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ +static __inline__ unsigned long fn( \ + unsigned long mask, \ + volatile unsigned long *_p) \ +{ \ + unsigned long old, t; \ + unsigned long *p = (unsigned long *)_p; \ + __asm__ __volatile__ ( \ + prefix \ +"1:" PPC_LLARX(%0,0,%3,eh) "\n" \ + stringify_in_c(op) "%1,%0,%2\n" \ + PPC405_ERR77(0,%3) \ + PPC_STLCX "%1,0,%3\n" \ + "bne- 1b\n" \ + postfix \ + : "=&r" (old), "=&r" (t) \ + : "r" (mask), "r" (p) \ + : "cc", "memory"); \ + return (old & mask); \ +} + +DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) +DEFINE_TESTOP(test_and_set_bits_lock, or, "", + PPC_ACQUIRE_BARRIER, 1) +DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) +DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, + PPC_ATOMIC_EXIT_BARRIER, 0) + +static __inline__ int test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; +} + +static __inline__ int test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) +{ + return test_and_set_bits_lock(BITOP_MASK(nr), + addr + BITOP_WORD(nr)) != 0; +} + +static __inline__ int test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr) +{ + return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; +} + +static __inline__ int test_and_change_bit(unsigned long nr, + volatile unsigned long *addr) +{ + return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; +} + +#include <asm-generic/bitops/non-atomic.h> + +static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) +{ + __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); + __clear_bit(nr, addr); +} + +/* + * Return the zero-based bit position (LE, not IBM bit numbering) of + * the most significant 1-bit in a double word. + */ +static __inline__ __attribute__((const)) +int __ilog2(unsigned long x) +{ + int lz; + + asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x)); + return BITS_PER_LONG - 1 - lz; +} + +static inline __attribute__((const)) +int __ilog2_u32(u32 n) +{ + int bit; + asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); + return 31 - bit; +} + +#ifdef __powerpc64__ +static inline __attribute__((const)) +int __ilog2_u64(u64 n) +{ + int bit; + asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); + return 63 - bit; +} +#endif + +/* + * Determines the bit position of the least significant 0 bit in the + * specified double word. The returned bit position will be + * zero-based, starting from the right side (63/31 - 0). + */ +static __inline__ unsigned long ffz(unsigned long x) +{ + /* no zero exists anywhere in the 8 byte area. */ + if ((x = ~x) == 0) + return BITS_PER_LONG; + + /* + * Calculate the bit position of the least significant '1' bit in x + * (since x has been changed this will actually be the least significant + * '0' bit in * the original x). Note: (x & -x) gives us a mask that + * is the least significant * (RIGHT-most) 1-bit of the value in x. + */ + return __ilog2(x & -x); +} + +static __inline__ int __ffs(unsigned long x) +{ + return __ilog2(x & -x); +} + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __inline__ int ffs(int x) +{ + unsigned long i = (unsigned long)x; + return __ilog2(i & -i) + 1; +} + +/* + * fls: find last (most-significant) bit set. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __inline__ int fls(unsigned int x) +{ + int lz; + + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; +} + +static __inline__ unsigned long __fls(unsigned long x) +{ + return __ilog2(x); +} + +/* + * 64-bit can do this using one cntlzd (count leading zeroes doubleword) + * instruction; for 32-bit we use the generic version, which does two + * 32-bit fls calls. + */ +#ifdef __powerpc64__ +static __inline__ int fls64(__u64 x) +{ + int lz; + + asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); + return 64 - lz; +} +#else +#include <asm-generic/bitops/fls64.h> +#endif /* __powerpc64__ */ + +#ifdef CONFIG_PPC64 +unsigned int __arch_hweight8(unsigned int w); +unsigned int __arch_hweight16(unsigned int w); +unsigned int __arch_hweight32(unsigned int w); +unsigned long __arch_hweight64(__u64 w); +#include <asm-generic/bitops/const_hweight.h> +#else +#include <asm-generic/bitops/hweight.h> +#endif + +#include <asm-generic/bitops/find.h> + +/* Little-endian versions */ + +static __inline__ int test_bit_le(unsigned long nr, + __const__ void *addr) +{ + __const__ unsigned char *tmp = (__const__ unsigned char *) addr; + return (tmp[nr >> 3] >> (nr & 7)) & 1; +} + +static inline void __set_bit_le(int nr, void *addr) +{ + __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __clear_bit_le(int nr, void *addr) +{ + __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_set_bit_le(int nr, void *addr) +{ + return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_clear_bit_le(int nr, void *addr) +{ + return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_set_bit_le(int nr, void *addr) +{ + return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_clear_bit_le(int nr, void *addr) +{ + return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +#define find_first_zero_bit_le(addr, size) \ + find_next_zero_bit_le((addr), (size), 0) +unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset); + +unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset); +/* Bitmap functions for the ext2 filesystem */ + +#include <asm-generic/bitops/ext2-atomic-setbit.h> + +#include <asm-generic/bitops/sched.h> + +#endif /* __KERNEL__ */ + +#endif /* _ASM_POWERPC_BITOPS_H */ diff --git a/arch/powerpc/include/asm/bitsperlong.h b/arch/powerpc/include/asm/bitsperlong.h new file mode 100644 index 00000000000..5f1659032c4 --- /dev/null +++ b/arch/powerpc/include/asm/bitsperlong.h @@ -0,0 +1,12 @@ +#ifndef __ASM_POWERPC_BITSPERLONG_H +#define __ASM_POWERPC_BITSPERLONG_H + +#if defined(__powerpc64__) +# define __BITS_PER_LONG 64 +#else +# define __BITS_PER_LONG 32 +#endif + +#include <asm-generic/bitsperlong.h> + +#endif /* __ASM_POWERPC_BITSPERLONG_H */ diff --git a/arch/powerpc/include/asm/bootx.h b/arch/powerpc/include/asm/bootx.h new file mode 100644 index 00000000000..60a3c9ef301 --- /dev/null +++ b/arch/powerpc/include/asm/bootx.h @@ -0,0 +1,171 @@ +/* + * This file describes the structure passed from the BootX application + * (for MacOS) when it is used to boot Linux. + * + * Written by Benjamin Herrenschmidt. + */ + + +#ifndef __ASM_BOOTX_H__ +#define __ASM_BOOTX_H__ + +#include <linux/types.h> + +#ifdef macintosh +#include <Types.h> +#include "linux_type_defs.h" +#endif + +#ifdef macintosh +/* All this requires PowerPC alignment */ +#pragma options align=power +#endif + +/* On kernel entry: + * + * r3 = 0x426f6f58 ('BooX') + * r4 = pointer to boot_infos + * r5 = NULL + * + * Data and instruction translation disabled, interrupts + * disabled, kernel loaded at physical 0x00000000 on PCI + * machines (will be different on NuBus). + */ + +#define BOOT_INFO_VERSION 5 +#define BOOT_INFO_COMPATIBLE_VERSION 1 + +/* Bit in the architecture flag mask. More to be defined in + future versions. Note that either BOOT_ARCH_PCI or + BOOT_ARCH_NUBUS is set. The other BOOT_ARCH_NUBUS_xxx are + set additionally when BOOT_ARCH_NUBUS is set. + */ +#define BOOT_ARCH_PCI 0x00000001UL +#define BOOT_ARCH_NUBUS 0x00000002UL +#define BOOT_ARCH_NUBUS_PDM 0x00000010UL +#define BOOT_ARCH_NUBUS_PERFORMA 0x00000020UL +#define BOOT_ARCH_NUBUS_POWERBOOK 0x00000040UL + +/* Maximum number of ranges in phys memory map */ +#define MAX_MEM_MAP_SIZE 26 + +/* This is the format of an element in the physical memory map. Note that + the map is optional and current BootX will only build it for pre-PCI + machines */ +typedef struct boot_info_map_entry +{ + __u32 physAddr; /* Physical starting address */ + __u32 size; /* Size in bytes */ +} boot_info_map_entry_t; + + +/* Here are the boot informations that are passed to the bootstrap + * Note that the kernel arguments and the device tree are appended + * at the end of this structure. */ +typedef struct boot_infos +{ + /* Version of this structure */ + __u32 version; + /* backward compatible down to version: */ + __u32 compatible_version; + + /* NEW (vers. 2) this holds the current _logical_ base addr of + the frame buffer (for use by early boot message) */ + __u8* logicalDisplayBase; + + /* NEW (vers. 4) Apple's machine identification */ + __u32 machineID; + + /* NEW (vers. 4) Detected hw architecture */ + __u32 architecture; + + /* The device tree (internal addresses relative to the beginning of the tree, + * device tree offset relative to the beginning of this structure). + * On pre-PCI macintosh (BOOT_ARCH_PCI bit set to 0 in architecture), this + * field is 0. + */ + __u32 deviceTreeOffset; /* Device tree offset */ + __u32 deviceTreeSize; /* Size of the device tree */ + + /* Some infos about the current MacOS display */ + __u32 dispDeviceRect[4]; /* left,top,right,bottom */ + __u32 dispDeviceDepth; /* (8, 16 or 32) */ + __u8* dispDeviceBase; /* base address (physical) */ + __u32 dispDeviceRowBytes; /* rowbytes (in bytes) */ + __u32 dispDeviceColorsOffset; /* Colormap (8 bits only) or 0 (*) */ + /* Optional offset in the registry to the current + * MacOS display. (Can be 0 when not detected) */ + __u32 dispDeviceRegEntryOffset; + + /* Optional pointer to boot ramdisk (offset from this structure) */ + __u32 ramDisk; + __u32 ramDiskSize; /* size of ramdisk image */ + + /* Kernel command line arguments (offset from this structure) */ + __u32 kernelParamsOffset; + + /* ALL BELOW NEW (vers. 4) */ + + /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag + (non-PCI) only. On PCI, memory is contiguous and it's size is in the + device-tree. */ + boot_info_map_entry_t + physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */ + __u32 physMemoryMapSize; /* How many entries in map */ + + + /* The framebuffer size (optional, currently 0) */ + __u32 frameBufferSize; /* Represents a max size, can be 0. */ + + /* NEW (vers. 5) */ + + /* Total params size (args + colormap + device tree + ramdisk) */ + __u32 totalParamsSize; + +} boot_infos_t; + +#ifdef __KERNEL__ +/* (*) The format of the colormap is 256 * 3 * 2 bytes. Each color index + * is represented by 3 short words containing a 16 bits (unsigned) color + * component. Later versions may contain the gamma table for direct-color + * devices here. + */ +#define BOOTX_COLORTABLE_SIZE (256UL*3UL*2UL) + +/* BootX passes the device-tree using a format that comes from earlier + * ppc32 kernels. This used to match what is in prom.h, but not anymore + * so we now define it here + */ +struct bootx_dt_prop { + u32 name; + int length; + u32 value; + u32 next; +}; + +struct bootx_dt_node { + u32 unused0; + u32 unused1; + u32 phandle; /* not really available */ + u32 unused2; + u32 unused3; + u32 unused4; + u32 unused5; + u32 full_name; + u32 properties; + u32 parent; + u32 child; + u32 sibling; + u32 next; + u32 allnext; +}; + +extern void bootx_init(unsigned long r4, unsigned long phys); + +#endif /* __KERNEL__ */ + +#ifdef macintosh +#pragma options align=reset +#endif + +#endif diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h new file mode 100644 index 00000000000..906f46e3100 --- /dev/null +++ b/arch/powerpc/include/asm/btext.h @@ -0,0 +1,28 @@ +/* + * Definitions for using the procedures in btext.c. + * + * Benjamin Herrenschmidt <benh@kernel.crashing.org> + */ +#ifndef __PPC_BTEXT_H +#define __PPC_BTEXT_H +#ifdef __KERNEL__ + +extern int btext_find_display(int allow_nonstdout); +extern void btext_update_display(unsigned long phys, int width, int height, + int depth, int pitch); +extern void btext_setup_display(int width, int height, int depth, int pitch, + unsigned long address); +extern void btext_prepare_BAT(void); +extern void btext_unmap(void); + +extern void btext_drawchar(char c); +extern void btext_drawstring(const char *str); +extern void btext_drawhex(unsigned long v); +extern void btext_drawtext(const char *c, unsigned int len); + +extern void btext_clearscreen(void); +extern void btext_flushscreen(void); +extern void btext_flushline(void); + +#endif /* __KERNEL__ */ +#endif /* __PPC_BTEXT_H */ diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h new file mode 100644 index 00000000000..065c590c991 --- /dev/null +++ b/arch/powerpc/include/asm/bug.h @@ -0,0 +1,130 @@ +#ifndef _ASM_POWERPC_BUG_H +#define _ASM_POWERPC_BUG_H +#ifdef __KERNEL__ + +#include <asm/asm-compat.h> + +/* + * Define an illegal instr to trap on the bug. + * We don't use 0 because that marks the end of a function + * in the ELF ABI. That's "Boo Boo" in case you wonder... + */ +#define BUG_OPCODE .long 0x00b00b00 /* For asm */ +#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */ + +#ifdef CONFIG_BUG + +#ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> +#ifdef CONFIG_DEBUG_BUGVERBOSE +.macro EMIT_BUG_ENTRY addr,file,line,flags + .section __bug_table,"a" +5001: PPC_LONG \addr, 5002f + .short \line, \flags + .org 5001b+BUG_ENTRY_SIZE + .previous + .section .rodata,"a" +5002: .asciz "\file" + .previous +.endm +#else +.macro EMIT_BUG_ENTRY addr,file,line,flags + .section __bug_table,"a" +5001: PPC_LONG \addr + .short \flags + .org 5001b+BUG_ENTRY_SIZE + .previous +.endm +#endif /* verbose */ + +#else /* !__ASSEMBLY__ */ +/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and + sizeof(struct bug_entry), respectively */ +#ifdef CONFIG_DEBUG_BUGVERBOSE +#define _EMIT_BUG_ENTRY \ + ".section __bug_table,\"a\"\n" \ + "2:\t" PPC_LONG "1b, %0\n" \ + "\t.short %1, %2\n" \ + ".org 2b+%3\n" \ + ".previous\n" +#else +#define _EMIT_BUG_ENTRY \ + ".section __bug_table,\"a\"\n" \ + "2:\t" PPC_LONG "1b\n" \ + "\t.short %2\n" \ + ".org 2b+%3\n" \ + ".previous\n" +#endif + +/* + * BUG_ON() and WARN_ON() do their best to cooperate with compile-time + * optimisations. However depending on the complexity of the condition + * some compiler versions may not produce optimal results. + */ + +#define BUG() do { \ + __asm__ __volatile__( \ + "1: twi 31,0,0\n" \ + _EMIT_BUG_ENTRY \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (0), "i" (sizeof(struct bug_entry))); \ + unreachable(); \ +} while (0) + +#define BUG_ON(x) do { \ + if (__builtin_constant_p(x)) { \ + if (x) \ + BUG(); \ + } else { \ + __asm__ __volatile__( \ + "1: "PPC_TLNEI" %4,0\n" \ + _EMIT_BUG_ENTRY \ + : : "i" (__FILE__), "i" (__LINE__), "i" (0), \ + "i" (sizeof(struct bug_entry)), \ + "r" ((__force long)(x))); \ + } \ +} while (0) + +#define __WARN_TAINT(taint) do { \ + __asm__ __volatile__( \ + "1: twi 31,0,0\n" \ + _EMIT_BUG_ENTRY \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (BUGFLAG_TAINT(taint)), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#define WARN_ON(x) ({ \ + int __ret_warn_on = !!(x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ + __WARN(); \ + } else { \ + __asm__ __volatile__( \ + "1: "PPC_TLNEI" %4,0\n" \ + _EMIT_BUG_ENTRY \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (BUGFLAG_TAINT(TAINT_WARN)), \ + "i" (sizeof(struct bug_entry)), \ + "r" (__ret_warn_on)); \ + } \ + unlikely(__ret_warn_on); \ +}) + +#define HAVE_ARCH_BUG +#define HAVE_ARCH_BUG_ON +#define HAVE_ARCH_WARN_ON +#endif /* __ASSEMBLY __ */ +#else +#ifdef __ASSEMBLY__ +.macro EMIT_BUG_ENTRY addr,file,line,flags +.endm +#else /* !__ASSEMBLY__ */ +#define _EMIT_BUG_ENTRY +#endif +#endif /* CONFIG_BUG */ + +#include <asm-generic/bug.h> + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_BUG_H */ diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h new file mode 100644 index 00000000000..42fdb73e306 --- /dev/null +++ b/arch/powerpc/include/asm/bugs.h @@ -0,0 +1,18 @@ +#ifndef _ASM_POWERPC_BUGS_H +#define _ASM_POWERPC_BUGS_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * This file is included by 'init/main.c' to check for + * architecture-dependent bugs. + */ + +static inline void check_bugs(void) { } + +#endif /* _ASM_POWERPC_BUGS_H */ diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h new file mode 100644 index 00000000000..aa6cc4fac96 --- /dev/null +++ b/arch/powerpc/include/asm/byteorder.h @@ -0,0 +1,12 @@ +#ifndef _ASM_POWERPC_BYTEORDER_H +#define _ASM_POWERPC_BYTEORDER_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/byteorder/big_endian.h> + +#endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h new file mode 100644 index 00000000000..4b509411ad8 --- /dev/null +++ b/arch/powerpc/include/asm/cache.h @@ -0,0 +1,49 @@ +#ifndef _ASM_POWERPC_CACHE_H +#define _ASM_POWERPC_CACHE_H + +#ifdef __KERNEL__ + + +/* bytes per L1 cache line */ +#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +#define L1_CACHE_SHIFT 4 +#define MAX_COPY_PREFETCH 1 +#elif defined(CONFIG_PPC_E500MC) +#define L1_CACHE_SHIFT 6 +#define MAX_COPY_PREFETCH 4 +#elif defined(CONFIG_PPC32) +#define MAX_COPY_PREFETCH 4 +#if defined(CONFIG_PPC_47x) +#define L1_CACHE_SHIFT 7 +#else +#define L1_CACHE_SHIFT 5 +#endif +#else /* CONFIG_PPC64 */ +#define L1_CACHE_SHIFT 7 +#endif + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#if defined(__powerpc64__) && !defined(__ASSEMBLY__) +struct ppc64_caches { + u32 dsize; /* L1 d-cache size */ + u32 dline_size; /* L1 d-cache line size */ + u32 log_dline_size; + u32 dlines_per_page; + u32 isize; /* L1 i-cache size */ + u32 iline_size; /* L1 i-cache line size */ + u32 log_iline_size; + u32 ilines_per_page; +}; + +extern struct ppc64_caches ppc64_caches; +#endif /* __powerpc64__ && ! __ASSEMBLY__ */ + +#if !defined(__ASSEMBLY__) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_CACHE_H */ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h new file mode 100644 index 00000000000..ab9e402518e --- /dev/null +++ b/arch/powerpc/include/asm/cacheflush.h @@ -0,0 +1,76 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_POWERPC_CACHEFLUSH_H +#define _ASM_POWERPC_CACHEFLUSH_H + +#ifdef __KERNEL__ + +#include <linux/mm.h> +#include <asm/cputable.h> + +/* + * No cache flushing is required when address mappings are changed, + * because the caches on PowerPCs are physically addressed. + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_icache_page(vma, page) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *page); +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +extern void __flush_icache_range(unsigned long, unsigned long); +static inline void flush_icache_range(unsigned long start, unsigned long stop) +{ + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + __flush_icache_range(start, stop); +} + +extern void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, unsigned long addr, + int len); +extern void __flush_dcache_icache(void *page_va); +extern void flush_dcache_icache_page(struct page *page); +#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) +extern void __flush_dcache_icache_phys(unsigned long physaddr); +#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */ + +extern void flush_dcache_range(unsigned long start, unsigned long stop); +#ifdef CONFIG_PPC32 +extern void clean_dcache_range(unsigned long start, unsigned long stop); +extern void invalidate_dcache_range(unsigned long start, unsigned long stop); +#endif /* CONFIG_PPC32 */ +#ifdef CONFIG_PPC64 +extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); +extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); +#endif + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + + + +#ifdef CONFIG_DEBUG_PAGEALLOC +/* internal debugging function */ +void kernel_map_pages(struct page *page, int numpages, int enable); +#endif + +#endif /* __KERNEL__ */ + +#endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h new file mode 100644 index 00000000000..b4b7338ad79 --- /dev/null +++ b/arch/powerpc/include/asm/cell-pmu.h @@ -0,0 +1,107 @@ +/* + * Cell Broadband Engine Performance Monitor + * + * (C) Copyright IBM Corporation 2006 + * + * Author: + * David Erb (djerb@us.ibm.com) + * Kevin Corry (kevcorry@us.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ASM_CELL_PMU_H__ +#define __ASM_CELL_PMU_H__ + +/* The Cell PMU has four hardware performance counters, which can be + * configured as four 32-bit counters or eight 16-bit counters. + */ +#define NR_PHYS_CTRS 4 +#define NR_CTRS (NR_PHYS_CTRS * 2) + +/* Macros for the pm_control register. */ +#define CBE_PM_16BIT_CTR(ctr) (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1)))) +#define CBE_PM_ENABLE_PERF_MON 0x80000000 +#define CBE_PM_STOP_AT_MAX 0x40000000 +#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) +#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) +#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17) +#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) +#define CBE_PM_FREEZE_ALL_CTRS 0x00100000 +#define CBE_PM_ENABLE_EXT_TRACE 0x00008000 +#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9) + +/* Macros for the trace_address register. */ +#define CBE_PM_TRACE_BUF_FULL 0x00000800 +#define CBE_PM_TRACE_BUF_EMPTY 0x00000400 +#define CBE_PM_TRACE_BUF_DATA_COUNT(ta) ((ta) & 0x3ff) +#define CBE_PM_TRACE_BUF_MAX_COUNT 0x400 + +/* Macros for the pm07_control registers. */ +#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f) +#define CBE_PM_CTR_INPUT_CONTROL 0x02000000 +#define CBE_PM_CTR_POLARITY 0x01000000 +#define CBE_PM_CTR_COUNT_CYCLES 0x00800000 +#define CBE_PM_CTR_ENABLE 0x00400000 +#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26) +#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25) +#define PM07_CTR_POLARITY(x) (((x) & 1) << 24) +#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23) +#define PM07_CTR_ENABLE(x) (((x) & 1) << 22) + +/* Macros for the pm_status register. */ +#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7))) + +enum pm_reg_name { + group_control, + debug_bus_control, + trace_address, + ext_tr_timer, + pm_status, + pm_control, + pm_interval, + pm_start_stop, +}; + +/* Routines for reading/writing the PMU registers. */ +extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr); +extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val); +extern u32 cbe_read_ctr(u32 cpu, u32 ctr); +extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val); + +extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr); +extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val); +extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg); +extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val); + +extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr); +extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size); + +extern void cbe_enable_pm(u32 cpu); +extern void cbe_disable_pm(u32 cpu); + +extern void cbe_read_trace_buffer(u32 cpu, u64 *buf); + +extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask); +extern void cbe_disable_pm_interrupts(u32 cpu); +extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); +extern void cbe_sync_irq(int node); + +#define CBE_COUNT_SUPERVISOR_MODE 0 +#define CBE_COUNT_HYPERVISOR_MODE 1 +#define CBE_COUNT_PROBLEM_MODE 2 +#define CBE_COUNT_ALL_MODES 3 + +#endif /* __ASM_CELL_PMU_H__ */ diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h new file mode 100644 index 00000000000..fdf64fd2595 --- /dev/null +++ b/arch/powerpc/include/asm/cell-regs.h @@ -0,0 +1,326 @@ +/* + * cbe_regs.h + * + * This file is intended to hold the various register definitions for CBE + * on-chip system devices (memory controller, IO controller, etc...) + * + * (C) Copyright IBM Corporation 2001,2006 + * + * Authors: Maximino Aguilar (maguilar@us.ibm.com) + * David J. Erb (djerb@us.ibm.com) + * + * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + */ + +#ifndef CBE_REGS_H +#define CBE_REGS_H + +#include <asm/cell-pmu.h> + +/* + * + * Some HID register definitions + * + */ + +/* CBE specific HID0 bits */ +#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul +#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul +#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul +#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul + +#define MAX_CBE 2 + +/* + * + * Pervasive unit register definitions + * + */ + +union spe_reg { + u64 val; + u8 spe[8]; +}; + +union ppe_spe_reg { + u64 val; + struct { + u32 ppe; + u32 spe; + }; +}; + + +struct cbe_pmd_regs { + /* Debug Bus Control */ + u64 pad_0x0000; /* 0x0000 */ + + u64 group_control; /* 0x0008 */ + + u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */ + + u64 debug_bus_control; /* 0x00a8 */ + + u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */ + + u64 trace_aux_data; /* 0x0100 */ + u64 trace_buffer_0_63; /* 0x0108 */ + u64 trace_buffer_64_127; /* 0x0110 */ + u64 trace_address; /* 0x0118 */ + u64 ext_tr_timer; /* 0x0120 */ + + u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */ + + /* Performance Monitor */ + u64 pm_status; /* 0x0400 */ + u64 pm_control; /* 0x0408 */ + u64 pm_interval; /* 0x0410 */ + u64 pm_ctr[4]; /* 0x0418 */ + u64 pm_start_stop; /* 0x0438 */ + u64 pm07_control[8]; /* 0x0440 */ + + u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */ + + /* Thermal Sensor Registers */ + union spe_reg ts_ctsr1; /* 0x0800 */ + u64 ts_ctsr2; /* 0x0808 */ + union spe_reg ts_mtsr1; /* 0x0810 */ + u64 ts_mtsr2; /* 0x0818 */ + union spe_reg ts_itr1; /* 0x0820 */ + u64 ts_itr2; /* 0x0828 */ + u64 ts_gitr; /* 0x0830 */ + u64 ts_isr; /* 0x0838 */ + u64 ts_imr; /* 0x0840 */ + union spe_reg tm_cr1; /* 0x0848 */ + u64 tm_cr2; /* 0x0850 */ + u64 tm_simr; /* 0x0858 */ + union ppe_spe_reg tm_tpr; /* 0x0860 */ + union spe_reg tm_str1; /* 0x0868 */ + u64 tm_str2; /* 0x0870 */ + union ppe_spe_reg tm_tsr; /* 0x0878 */ + + /* Power Management */ + u64 pmcr; /* 0x0880 */ +#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000 + u64 pmsr; /* 0x0888 */ + + /* Time Base Register */ + u64 tbr; /* 0x0890 */ + + u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */ + + /* Fault Isolation Registers */ + u64 checkstop_fir; /* 0x0c00 */ + u64 recoverable_fir; /* 0x0c08 */ + u64 spec_att_mchk_fir; /* 0x0c10 */ + u32 fir_mode_reg; /* 0x0c18 */ + u8 pad_0x0c1c_0x0c20 [4]; /* 0x0c1c */ +#define CBE_PMD_FIR_MODE_M8 0x00800 + u64 fir_enable_mask; /* 0x0c20 */ + + u8 pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28]; /* 0x0c28 */ + u64 ras_esc_0; /* 0x0ca8 */ + u8 pad_0x0cb0_0x1000 [0x1000 - 0x0cb0]; /* 0x0cb0 */ +}; + +extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np); +extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu); + +/* + * PMU shadow registers + * + * Many of the registers in the performance monitoring unit are write-only, + * so we need to save a copy of what we write to those registers. + * + * The actual data counters are read/write. However, writing to the counters + * only takes effect if the PMU is enabled. Otherwise the value is stored in + * a hardware latch until the next time the PMU is enabled. So we save a copy + * of the counter values if we need to read them back while the PMU is + * disabled. The counter_value_in_latch field is a bitmap indicating which + * counters currently have a value waiting to be written. + */ + +struct cbe_pmd_shadow_regs { + u32 group_control; + u32 debug_bus_control; + u32 trace_address; + u32 ext_tr_timer; + u32 pm_status; + u32 pm_control; + u32 pm_interval; + u32 pm_start_stop; + u32 pm07_control[NR_CTRS]; + + u32 pm_ctr[NR_PHYS_CTRS]; + u32 counter_value_in_latch; +}; + +extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np); +extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu); + +/* + * + * IIC unit register definitions + * + */ + +struct cbe_iic_pending_bits { + u32 data; + u8 flags; + u8 class; + u8 source; + u8 prio; +}; + +#define CBE_IIC_IRQ_VALID 0x80 +#define CBE_IIC_IRQ_IPI 0x40 + +struct cbe_iic_thread_regs { + struct cbe_iic_pending_bits pending; + struct cbe_iic_pending_bits pending_destr; + u64 generate; + u64 prio; +}; + +struct cbe_iic_regs { + u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */ + + /* IIC interrupt registers */ + struct cbe_iic_thread_regs thread[2]; /* 0x0400 */ + + u64 iic_ir; /* 0x0440 */ +#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12) +#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4) +#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf) +#define CBE_IIC_IR_IOC_0 0x0 +#define CBE_IIC_IR_IOC_1S 0xb +#define CBE_IIC_IR_PT_0 0xe +#define CBE_IIC_IR_PT_1 0xf + + u64 iic_is; /* 0x0448 */ +#define CBE_IIC_IS_PMI 0x2 + + u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */ + + /* IOC FIR */ + u64 ioc_fir_reset; /* 0x0500 */ + u64 ioc_fir_set; /* 0x0508 */ + u64 ioc_checkstop_enable; /* 0x0510 */ + u64 ioc_fir_error_mask; /* 0x0518 */ + u64 ioc_syserr_enable; /* 0x0520 */ + u64 ioc_fir; /* 0x0528 */ + + u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */ +}; + +extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np); +extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu); + + +struct cbe_mic_tm_regs { + u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */ + + u64 mic_ctl_cnfg2; /* 0x0040 */ +#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL +#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL +#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL +#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL + + u64 pad_0x0048; /* 0x0048 */ + + u64 mic_aux_trc_base; /* 0x0050 */ + u64 mic_aux_trc_max_addr; /* 0x0058 */ + u64 mic_aux_trc_cur_addr; /* 0x0060 */ + u64 mic_aux_trc_grf_addr; /* 0x0068 */ + u64 mic_aux_trc_grf_data; /* 0x0070 */ + + u64 pad_0x0078; /* 0x0078 */ + + u64 mic_ctl_cnfg_0; /* 0x0080 */ +#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL + + u64 pad_0x0088; /* 0x0088 */ + + u64 slow_fast_timer_0; /* 0x0090 */ + u64 slow_next_timer_0; /* 0x0098 */ + + u8 pad_0x00a0_0x00f8[0x00f8 - 0x00a0]; /* 0x00a0 */ + u64 mic_df_ecc_address_0; /* 0x00f8 */ + + u8 pad_0x0100_0x01b8[0x01b8 - 0x0100]; /* 0x0100 */ + u64 mic_df_ecc_address_1; /* 0x01b8 */ + + u64 mic_ctl_cnfg_1; /* 0x01c0 */ +#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL + + u64 pad_0x01c8; /* 0x01c8 */ + + u64 slow_fast_timer_1; /* 0x01d0 */ + u64 slow_next_timer_1; /* 0x01d8 */ + + u8 pad_0x01e0_0x0208[0x0208 - 0x01e0]; /* 0x01e0 */ + u64 mic_exc; /* 0x0208 */ +#define CBE_MIC_EXC_BLOCK_SCRUB 0x0800000000000000ULL +#define CBE_MIC_EXC_FAST_SCRUB 0x0100000000000000ULL + + u64 mic_mnt_cfg; /* 0x0210 */ +#define CBE_MIC_MNT_CFG_CHAN_0_POP 0x0002000000000000ULL +#define CBE_MIC_MNT_CFG_CHAN_1_POP 0x0004000000000000ULL + + u64 mic_df_config; /* 0x0218 */ +#define CBE_MIC_ECC_DISABLE_0 0x4000000000000000ULL +#define CBE_MIC_ECC_REP_SINGLE_0 0x2000000000000000ULL +#define CBE_MIC_ECC_DISABLE_1 0x0080000000000000ULL +#define CBE_MIC_ECC_REP_SINGLE_1 0x0040000000000000ULL + + u8 pad_0x0220_0x0230[0x0230 - 0x0220]; /* 0x0220 */ + u64 mic_fir; /* 0x0230 */ +#define CBE_MIC_FIR_ECC_SINGLE_0_ERR 0x0200000000000000ULL +#define CBE_MIC_FIR_ECC_MULTI_0_ERR 0x0100000000000000ULL +#define CBE_MIC_FIR_ECC_SINGLE_1_ERR 0x0080000000000000ULL +#define CBE_MIC_FIR_ECC_MULTI_1_ERR 0x0040000000000000ULL +#define CBE_MIC_FIR_ECC_ERR_MASK 0xffff000000000000ULL +#define CBE_MIC_FIR_ECC_SINGLE_0_CTE 0x0000020000000000ULL +#define CBE_MIC_FIR_ECC_MULTI_0_CTE 0x0000010000000000ULL +#define CBE_MIC_FIR_ECC_SINGLE_1_CTE 0x0000008000000000ULL +#define CBE_MIC_FIR_ECC_MULTI_1_CTE 0x0000004000000000ULL +#define CBE_MIC_FIR_ECC_CTE_MASK 0x0000ffff00000000ULL +#define CBE_MIC_FIR_ECC_SINGLE_0_RESET 0x0000000002000000ULL +#define CBE_MIC_FIR_ECC_MULTI_0_RESET 0x0000000001000000ULL +#define CBE_MIC_FIR_ECC_SINGLE_1_RESET 0x0000000000800000ULL +#define CBE_MIC_FIR_ECC_MULTI_1_RESET 0x0000000000400000ULL +#define CBE_MIC_FIR_ECC_RESET_MASK 0x00000000ffff0000ULL +#define CBE_MIC_FIR_ECC_SINGLE_0_SET 0x0000000000000200ULL +#define CBE_MIC_FIR_ECC_MULTI_0_SET 0x0000000000000100ULL +#define CBE_MIC_FIR_ECC_SINGLE_1_SET 0x0000000000000080ULL +#define CBE_MIC_FIR_ECC_MULTI_1_SET 0x0000000000000040ULL +#define CBE_MIC_FIR_ECC_SET_MASK 0x000000000000ffffULL + u64 mic_fir_debug; /* 0x0238 */ + + u8 pad_0x0240_0x1000[0x1000 - 0x0240]; /* 0x0240 */ +}; + +extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np); +extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu); + + +/* Cell page table entries */ +#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */ +#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */ +#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */ +#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */ +#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */ +#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */ +#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */ +#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */ + +/* some utility functions to deal with SMT */ +extern u32 cbe_get_hw_thread_id(int cpu); +extern u32 cbe_cpu_to_node(int cpu); +extern u32 cbe_node_to_cpu(int node); + +/* Init this module early */ +extern void cbe_regs_init(void); + + +#endif /* CBE_REGS_H */ diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h new file mode 100644 index 00000000000..ce0c28495f9 --- /dev/null +++ b/arch/powerpc/include/asm/checksum.h @@ -0,0 +1,127 @@ +#ifndef _ASM_POWERPC_CHECKSUM_H +#define _ASM_POWERPC_CHECKSUM_H +#ifdef __KERNEL__ + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. ihl is the number + * of 32-bit words and is always >= 5. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * Computes the checksum of a memory block at src, length len, + * and adds in "sum" (32-bit), while copying the block to dst. + * If an access exception occurs on src or dst, it stores -EFAULT + * to *src_err or *dst_err respectively (if that pointer is not + * NULL), and, for an error on src, zeroes the rest of dst. + * + * Like csum_partial, this must be called with even lengths, + * except for the last fragment. + */ +extern __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err, int *dst_err); + +#ifdef __powerpc64__ +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *err_ptr); +#define HAVE_CSUM_COPY_USER +extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, + int len, __wsum sum, int *err_ptr); +#else +/* + * the same as csum_partial, but copies from src to dst while it + * checksums. + */ +#define csum_partial_copy_from_user(src, dst, len, sum, errp) \ + csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL) +#endif + +#define csum_partial_copy_nocheck(src, dst, len, sum) \ + csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) + + +/* + * turns a 32-bit partial checksum (e.g. from csum_partial) into a + * 1's complement 16-bit checksum. + */ +static inline __sum16 csum_fold(__wsum sum) +{ + unsigned int tmp; + + /* swap the two 16-bit halves of sum */ + __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); + /* if there is a carry from adding the two 16-bit halves, + it will carry from the lower half into the upper half, + giving us the correct sum in the upper half. */ + return (__force __sum16)(~((__force u32)sum + tmp) >> 16); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) +{ +#ifdef __powerpc64__ + unsigned long s = (__force u32)sum; + + s += (__force u32)saddr; + s += (__force u32)daddr; + s += proto + len; + s += (s >> 32); + return (__force __wsum) s; +#else + __asm__("\n\ + addc %0,%0,%1 \n\ + adde %0,%0,%2 \n\ + adde %0,%0,%3 \n\ + addze %0,%0 \n\ + " + : "=r" (sum) + : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum)); + return sum; +#endif +} +#endif /* __KERNEL__ */ +#endif diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h new file mode 100644 index 00000000000..ab1882c1e17 --- /dev/null +++ b/arch/powerpc/include/asm/clk_interface.h @@ -0,0 +1,20 @@ +#ifndef __ASM_POWERPC_CLK_INTERFACE_H +#define __ASM_POWERPC_CLK_INTERFACE_H + +#include <linux/clk.h> + +struct clk_interface { + struct clk* (*clk_get) (struct device *dev, const char *id); + int (*clk_enable) (struct clk *clk); + void (*clk_disable) (struct clk *clk); + unsigned long (*clk_get_rate) (struct clk *clk); + void (*clk_put) (struct clk *clk); + long (*clk_round_rate) (struct clk *clk, unsigned long rate); + int (*clk_set_rate) (struct clk *clk, unsigned long rate); + int (*clk_set_parent) (struct clk *clk, struct clk *parent); + struct clk* (*clk_get_parent) (struct clk *clk); +}; + +extern struct clk_interface clk_functions; + +#endif /* __ASM_POWERPC_CLK_INTERFACE_H */ diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h new file mode 100644 index 00000000000..37c32aba79b --- /dev/null +++ b/arch/powerpc/include/asm/code-patching.h @@ -0,0 +1,52 @@ +#ifndef _ASM_POWERPC_CODE_PATCHING_H +#define _ASM_POWERPC_CODE_PATCHING_H + +/* + * Copyright 2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/types.h> +#include <asm/ppc-opcode.h> + +/* Flags for create_branch: + * "b" == create_branch(addr, target, 0); + * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); + * "bl" == create_branch(addr, target, BRANCH_SET_LINK); + * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); + */ +#define BRANCH_SET_LINK 0x1 +#define BRANCH_ABSOLUTE 0x2 + +unsigned int create_branch(const unsigned int *addr, + unsigned long target, int flags); +unsigned int create_cond_branch(const unsigned int *addr, + unsigned long target, int flags); +void patch_branch(unsigned int *addr, unsigned long target, int flags); +void patch_instruction(unsigned int *addr, unsigned int instr); + +int instr_is_relative_branch(unsigned int instr); +int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); +unsigned long branch_target(const unsigned int *instr); +unsigned int translate_branch(const unsigned int *dest, + const unsigned int *src); + +static inline unsigned long ppc_function_entry(void *func) +{ +#ifdef CONFIG_PPC64 + /* + * On PPC64 the function pointer actually points to the function's + * descriptor. The first entry in the descriptor is the address + * of the function text. + */ + return ((func_descr_t *)func)->entry; +#else + return (unsigned long)func; +#endif +} + +#endif /* _ASM_POWERPC_CODE_PATCHING_H */ diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h new file mode 100644 index 00000000000..88e602f6430 --- /dev/null +++ b/arch/powerpc/include/asm/compat.h @@ -0,0 +1,221 @@ +#ifndef _ASM_POWERPC_COMPAT_H +#define _ASM_POWERPC_COMPAT_H +#ifdef __KERNEL__ +/* + * Architecture specific compatibility types + */ +#include <linux/types.h> +#include <linux/sched.h> + +#define COMPAT_USER_HZ 100 +#define COMPAT_UTS_MACHINE "ppc\0\0" + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_time_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u32 __compat_uid_t; +typedef u32 __compat_gid_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; +typedef u32 compat_mode_t; +typedef u32 compat_ino_t; +typedef u32 compat_dev_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s16 compat_nlink_t; +typedef u16 compat_ipc_pid_t; +typedef s32 compat_daddr_t; +typedef u32 compat_caddr_t; +typedef __kernel_fsid_t compat_fsid_t; +typedef s32 compat_key_t; +typedef s32 compat_timer_t; + +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef s64 compat_s64; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u64 compat_u64; + +struct compat_timespec { + compat_time_t tv_sec; + s32 tv_nsec; +}; + +struct compat_timeval { + compat_time_t tv_sec; + s32 tv_usec; +}; + +struct compat_stat { + compat_dev_t st_dev; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid32_t st_uid; + __compat_gid32_t st_gid; + compat_dev_t st_rdev; + compat_off_t st_size; + compat_off_t st_blksize; + compat_off_t st_blocks; + compat_time_t st_atime; + u32 st_atime_nsec; + compat_time_t st_mtime; + u32 st_mtime_nsec; + compat_time_t st_ctime; + u32 st_ctime_nsec; + u32 __unused4[2]; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; + +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 + +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +}; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; /* SunOS ignores this field. */ + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff +#define COMPAT_RLIM_INFINITY 0xffffffff + +typedef u32 compat_old_sigset_t; + +#define _COMPAT_NSIG 64 +#define _COMPAT_NSIG_BPW 32 + +typedef u32 compat_sigset_word; + +#define COMPAT_OFF_T_MAX 0x7fffffff +#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL + +/* + * A pointer passed in from user mode. This should not + * be used for syscall parameters, just declare them + * as pointers because the syscall entry code will have + * appropriately converted them already. + */ +typedef u32 compat_uptr_t; + +static inline void __user *compat_ptr(compat_uptr_t uptr) +{ + return (void __user *)(unsigned long)uptr; +} + +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + +static inline void __user *arch_compat_alloc_user_space(long len) +{ + struct pt_regs *regs = current->thread.regs; + unsigned long usp = regs->gpr[1]; + + /* + * We can't access below the stack pointer in the 32bit ABI and + * can access 288 bytes in the 64bit ABI + */ + if (!is_32bit_task()) + usp -= 288; + + return (void __user *) (usp - len); +} + +/* + * ipc64_perm is actually 32/64bit clean but since the compat layer refers to + * it we may as well define it. + */ +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid_t uid; + __compat_gid_t gid; + __compat_uid_t cuid; + __compat_gid_t cgid; + compat_mode_t mode; + unsigned int seq; + unsigned int __pad2; + unsigned long __unused1; /* yes they really are 64bit pads */ + unsigned long __unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + unsigned int __unused1; + compat_time_t sem_otime; + unsigned int __unused2; + compat_time_t sem_ctime; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + unsigned int __unused1; + compat_time_t msg_stime; + unsigned int __unused2; + compat_time_t msg_rtime; + unsigned int __unused3; + compat_time_t msg_ctime; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + unsigned int __unused1; + compat_time_t shm_atime; + unsigned int __unused2; + compat_time_t shm_dtime; + unsigned int __unused3; + compat_time_t shm_ctime; + unsigned int __unused4; + compat_size_t shm_segsz; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused5; + compat_ulong_t __unused6; +}; + +static inline int is_compat_task(void) +{ + return is_32bit_task(); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_COMPAT_H */ diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h new file mode 100644 index 00000000000..4398a6cdcf5 --- /dev/null +++ b/arch/powerpc/include/asm/cpm.h @@ -0,0 +1,212 @@ +#ifndef __CPM_H +#define __CPM_H + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/of.h> + +/* + * SPI Parameter RAM common to QE and CPM. + */ +struct spi_pram { + __be16 rbase; /* Rx Buffer descriptor base address */ + __be16 tbase; /* Tx Buffer descriptor base address */ + u8 rfcr; /* Rx function code */ + u8 tfcr; /* Tx function code */ + __be16 mrblr; /* Max receive buffer length */ + __be32 rstate; /* Internal */ + __be32 rdp; /* Internal */ + __be16 rbptr; /* Internal */ + __be16 rbc; /* Internal */ + __be32 rxtmp; /* Internal */ + __be32 tstate; /* Internal */ + __be32 tdp; /* Internal */ + __be16 tbptr; /* Internal */ + __be16 tbc; /* Internal */ + __be32 txtmp; /* Internal */ + __be32 res; /* Tx temp. */ + __be16 rpbase; /* Relocation pointer (CPM1 only) */ + __be16 res1; /* Reserved */ +}; + +/* + * USB Controller pram common to QE and CPM. + */ +struct usb_ctlr { + u8 usb_usmod; + u8 usb_usadr; + u8 usb_uscom; + u8 res1[1]; + __be16 usb_usep[4]; + u8 res2[4]; + __be16 usb_usber; + u8 res3[2]; + __be16 usb_usbmr; + u8 res4[1]; + u8 usb_usbs; + /* Fields down below are QE-only */ + __be16 usb_ussft; + u8 res5[2]; + __be16 usb_usfrn; + u8 res6[0x22]; +} __attribute__ ((packed)); + +/* + * Function code bits, usually generic to devices. + */ +#ifdef CONFIG_CPM1 +#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */ +#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */ +#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */ +#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */ +#else +#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */ +#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */ +#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */ +#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */ +#endif +#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */ + +/* Opcodes common to CPM1 and CPM2 +*/ +#define CPM_CR_INIT_TRX ((ushort)0x0000) +#define CPM_CR_INIT_RX ((ushort)0x0001) +#define CPM_CR_INIT_TX ((ushort)0x0002) +#define CPM_CR_HUNT_MODE ((ushort)0x0003) +#define CPM_CR_STOP_TX ((ushort)0x0004) +#define CPM_CR_GRA_STOP_TX ((ushort)0x0005) +#define CPM_CR_RESTART_TX ((ushort)0x0006) +#define CPM_CR_CLOSE_RX_BD ((ushort)0x0007) +#define CPM_CR_SET_GADDR ((ushort)0x0008) +#define CPM_CR_SET_TIMER ((ushort)0x0008) +#define CPM_CR_STOP_IDMA ((ushort)0x000b) + +/* Buffer descriptors used by many of the CPM protocols. */ +typedef struct cpm_buf_desc { + ushort cbd_sc; /* Status and Control */ + ushort cbd_datlen; /* Data length in buffer */ + uint cbd_bufaddr; /* Buffer address in host memory */ +} cbd_t; + +/* Buffer descriptor control/status used by serial + */ + +#define BD_SC_EMPTY (0x8000) /* Receive is empty */ +#define BD_SC_READY (0x8000) /* Transmit is ready */ +#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */ +#define BD_SC_INTRPT (0x1000) /* Interrupt on change */ +#define BD_SC_LAST (0x0800) /* Last buffer in frame */ +#define BD_SC_TC (0x0400) /* Transmit CRC */ +#define BD_SC_CM (0x0200) /* Continuous mode */ +#define BD_SC_ID (0x0100) /* Rec'd too many idles */ +#define BD_SC_P (0x0100) /* xmt preamble */ +#define BD_SC_BR (0x0020) /* Break received */ +#define BD_SC_FR (0x0010) /* Framing error */ +#define BD_SC_PR (0x0008) /* Parity error */ +#define BD_SC_NAK (0x0004) /* NAK - did not respond */ +#define BD_SC_OV (0x0002) /* Overrun */ +#define BD_SC_UN (0x0002) /* Underrun */ +#define BD_SC_CD (0x0001) /* */ +#define BD_SC_CL (0x0001) /* Collision */ + +/* Buffer descriptor control/status used by Ethernet receive. + * Common to SCC and FCC. + */ +#define BD_ENET_RX_EMPTY (0x8000) +#define BD_ENET_RX_WRAP (0x2000) +#define BD_ENET_RX_INTR (0x1000) +#define BD_ENET_RX_LAST (0x0800) +#define BD_ENET_RX_FIRST (0x0400) +#define BD_ENET_RX_MISS (0x0100) +#define BD_ENET_RX_BC (0x0080) /* FCC Only */ +#define BD_ENET_RX_MC (0x0040) /* FCC Only */ +#define BD_ENET_RX_LG (0x0020) +#define BD_ENET_RX_NO (0x0010) +#define BD_ENET_RX_SH (0x0008) +#define BD_ENET_RX_CR (0x0004) +#define BD_ENET_RX_OV (0x0002) +#define BD_ENET_RX_CL (0x0001) +#define BD_ENET_RX_STATS (0x01ff) /* All status bits */ + +/* Buffer descriptor control/status used by Ethernet transmit. + * Common to SCC and FCC. + */ +#define BD_ENET_TX_READY (0x8000) +#define BD_ENET_TX_PAD (0x4000) +#define BD_ENET_TX_WRAP (0x2000) +#define BD_ENET_TX_INTR (0x1000) +#define BD_ENET_TX_LAST (0x0800) +#define BD_ENET_TX_TC (0x0400) +#define BD_ENET_TX_DEF (0x0200) +#define BD_ENET_TX_HB (0x0100) +#define BD_ENET_TX_LC (0x0080) +#define BD_ENET_TX_RL (0x0040) +#define BD_ENET_TX_RCMASK (0x003c) +#define BD_ENET_TX_UN (0x0002) +#define BD_ENET_TX_CSL (0x0001) +#define BD_ENET_TX_STATS (0x03ff) /* All status bits */ + +/* Buffer descriptor control/status used by Transparent mode SCC. + */ +#define BD_SCC_TX_LAST (0x0800) + +/* Buffer descriptor control/status used by I2C. + */ +#define BD_I2C_START (0x0400) + +int cpm_muram_init(void); + +#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) +unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); +int cpm_muram_free(unsigned long offset); +unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); +void __iomem *cpm_muram_addr(unsigned long offset); +unsigned long cpm_muram_offset(void __iomem *addr); +dma_addr_t cpm_muram_dma(void __iomem *addr); +#else +static inline unsigned long cpm_muram_alloc(unsigned long size, + unsigned long align) +{ + return -ENOSYS; +} + +static inline int cpm_muram_free(unsigned long offset) +{ + return -ENOSYS; +} + +static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset, + unsigned long size) +{ + return -ENOSYS; +} + +static inline void __iomem *cpm_muram_addr(unsigned long offset) +{ + return NULL; +} + +static inline unsigned long cpm_muram_offset(void __iomem *addr) +{ + return -ENOSYS; +} + +static inline dma_addr_t cpm_muram_dma(void __iomem *addr) +{ + return 0; +} +#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */ + +#ifdef CONFIG_CPM +int cpm_command(u32 command, u8 opcode); +#else +static inline int cpm_command(u32 command, u8 opcode) +{ + return -ENOSYS; +} +#endif /* CONFIG_CPM */ + +int cpm2_gpiochip_add32(struct device_node *np); + +#endif diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h new file mode 100644 index 00000000000..8ee4211ca0c --- /dev/null +++ b/arch/powerpc/include/asm/cpm1.h @@ -0,0 +1,606 @@ +/* + * MPC8xx Communication Processor Module. + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * This file contains structures and information for the communication + * processor channels. Some CPM control and status is available + * through the MPC8xx internal memory map. See immap.h for details. + * This file only contains what I need for the moment, not the total + * CPM capabilities. I (or someone else) will add definitions as they + * are needed. -- Dan + * + * On the MBX board, EPPC-Bug loads CPM microcode into the first 512 + * bytes of the DP RAM and relocates the I2C parameter area to the + * IDMA1 space. The remaining DP RAM is available for buffer descriptors + * or other use. + */ +#ifndef __CPM1__ +#define __CPM1__ + +#include <linux/init.h> +#include <asm/8xx_immap.h> +#include <asm/ptrace.h> +#include <asm/cpm.h> + +/* CPM Command register. +*/ +#define CPM_CR_RST ((ushort)0x8000) +#define CPM_CR_OPCODE ((ushort)0x0f00) +#define CPM_CR_CHAN ((ushort)0x00f0) +#define CPM_CR_FLG ((ushort)0x0001) + +/* Channel numbers. +*/ +#define CPM_CR_CH_SCC1 ((ushort)0x0000) +#define CPM_CR_CH_I2C ((ushort)0x0001) /* I2C and IDMA1 */ +#define CPM_CR_CH_SCC2 ((ushort)0x0004) +#define CPM_CR_CH_SPI ((ushort)0x0005) /* SPI / IDMA2 / Timers */ +#define CPM_CR_CH_TIMER CPM_CR_CH_SPI +#define CPM_CR_CH_SCC3 ((ushort)0x0008) +#define CPM_CR_CH_SMC1 ((ushort)0x0009) /* SMC1 / DSP1 */ +#define CPM_CR_CH_SCC4 ((ushort)0x000c) +#define CPM_CR_CH_SMC2 ((ushort)0x000d) /* SMC2 / DSP2 */ + +#define mk_cr_cmd(CH, CMD) ((CMD << 8) | (CH << 4)) + +/* Export the base address of the communication processor registers + * and dual port ram. + */ +extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */ + +#define cpm_dpalloc cpm_muram_alloc +#define cpm_dpfree cpm_muram_free +#define cpm_dpram_addr cpm_muram_addr +#define cpm_dpram_phys cpm_muram_dma + +extern void cpm_setbrg(uint brg, uint rate); + +extern void __init cpm_load_patch(cpm8xx_t *cp); + +extern void cpm_reset(void); + +/* Parameter RAM offsets. +*/ +#define PROFF_SCC1 ((uint)0x0000) +#define PROFF_IIC ((uint)0x0080) +#define PROFF_SCC2 ((uint)0x0100) +#define PROFF_SPI ((uint)0x0180) +#define PROFF_SCC3 ((uint)0x0200) +#define PROFF_SMC1 ((uint)0x0280) +#define PROFF_SCC4 ((uint)0x0300) +#define PROFF_SMC2 ((uint)0x0380) + +/* Define enough so I can at least use the serial port as a UART. + * The MBX uses SMC1 as the host serial port. + */ +typedef struct smc_uart { + ushort smc_rbase; /* Rx Buffer descriptor base address */ + ushort smc_tbase; /* Tx Buffer descriptor base address */ + u_char smc_rfcr; /* Rx function code */ + u_char smc_tfcr; /* Tx function code */ + ushort smc_mrblr; /* Max receive buffer length */ + uint smc_rstate; /* Internal */ + uint smc_idp; /* Internal */ + ushort smc_rbptr; /* Internal */ + ushort smc_ibc; /* Internal */ + uint smc_rxtmp; /* Internal */ + uint smc_tstate; /* Internal */ + uint smc_tdp; /* Internal */ + ushort smc_tbptr; /* Internal */ + ushort smc_tbc; /* Internal */ + uint smc_txtmp; /* Internal */ + ushort smc_maxidl; /* Maximum idle characters */ + ushort smc_tmpidl; /* Temporary idle counter */ + ushort smc_brklen; /* Last received break length */ + ushort smc_brkec; /* rcv'd break condition counter */ + ushort smc_brkcr; /* xmt break count register */ + ushort smc_rmask; /* Temporary bit mask */ + char res1[8]; /* Reserved */ + ushort smc_rpbase; /* Relocation pointer */ +} smc_uart_t; + +/* Function code bits. +*/ +#define SMC_EB ((u_char)0x10) /* Set big endian byte order */ + +/* SMC uart mode register. +*/ +#define SMCMR_REN ((ushort)0x0001) +#define SMCMR_TEN ((ushort)0x0002) +#define SMCMR_DM ((ushort)0x000c) +#define SMCMR_SM_GCI ((ushort)0x0000) +#define SMCMR_SM_UART ((ushort)0x0020) +#define SMCMR_SM_TRANS ((ushort)0x0030) +#define SMCMR_SM_MASK ((ushort)0x0030) +#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */ +#define SMCMR_REVD SMCMR_PM_EVEN +#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */ +#define SMCMR_BS SMCMR_PEN +#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */ +#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */ +#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK) + +/* SMC2 as Centronics parallel printer. It is half duplex, in that + * it can only receive or transmit. The parameter ram values for + * each direction are either unique or properly overlap, so we can + * include them in one structure. + */ +typedef struct smc_centronics { + ushort scent_rbase; + ushort scent_tbase; + u_char scent_cfcr; + u_char scent_smask; + ushort scent_mrblr; + uint scent_rstate; + uint scent_r_ptr; + ushort scent_rbptr; + ushort scent_r_cnt; + uint scent_rtemp; + uint scent_tstate; + uint scent_t_ptr; + ushort scent_tbptr; + ushort scent_t_cnt; + uint scent_ttemp; + ushort scent_max_sl; + ushort scent_sl_cnt; + ushort scent_character1; + ushort scent_character2; + ushort scent_character3; + ushort scent_character4; + ushort scent_character5; + ushort scent_character6; + ushort scent_character7; + ushort scent_character8; + ushort scent_rccm; + ushort scent_rccr; +} smc_cent_t; + +/* Centronics Status Mask Register. +*/ +#define SMC_CENT_F ((u_char)0x08) +#define SMC_CENT_PE ((u_char)0x04) +#define SMC_CENT_S ((u_char)0x02) + +/* SMC Event and Mask register. +*/ +#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */ +#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */ +#define SMCM_TXE ((unsigned char)0x10) /* When in Transparent Mode */ +#define SMCM_BSY ((unsigned char)0x04) +#define SMCM_TX ((unsigned char)0x02) +#define SMCM_RX ((unsigned char)0x01) + +/* Baud rate generators. +*/ +#define CPM_BRG_RST ((uint)0x00020000) +#define CPM_BRG_EN ((uint)0x00010000) +#define CPM_BRG_EXTC_INT ((uint)0x00000000) +#define CPM_BRG_EXTC_CLK2 ((uint)0x00004000) +#define CPM_BRG_EXTC_CLK6 ((uint)0x00008000) +#define CPM_BRG_ATB ((uint)0x00002000) +#define CPM_BRG_CD_MASK ((uint)0x00001ffe) +#define CPM_BRG_DIV16 ((uint)0x00000001) + +/* SI Clock Route Register +*/ +#define SICR_RCLK_SCC1_BRG1 ((uint)0x00000000) +#define SICR_TCLK_SCC1_BRG1 ((uint)0x00000000) +#define SICR_RCLK_SCC2_BRG2 ((uint)0x00000800) +#define SICR_TCLK_SCC2_BRG2 ((uint)0x00000100) +#define SICR_RCLK_SCC3_BRG3 ((uint)0x00100000) +#define SICR_TCLK_SCC3_BRG3 ((uint)0x00020000) +#define SICR_RCLK_SCC4_BRG4 ((uint)0x18000000) +#define SICR_TCLK_SCC4_BRG4 ((uint)0x03000000) + +/* SCCs. +*/ +#define SCC_GSMRH_IRP ((uint)0x00040000) +#define SCC_GSMRH_GDE ((uint)0x00010000) +#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000) +#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000) +#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000) +#define SCC_GSMRH_REVD ((uint)0x00002000) +#define SCC_GSMRH_TRX ((uint)0x00001000) +#define SCC_GSMRH_TTX ((uint)0x00000800) +#define SCC_GSMRH_CDP ((uint)0x00000400) +#define SCC_GSMRH_CTSP ((uint)0x00000200) +#define SCC_GSMRH_CDS ((uint)0x00000100) +#define SCC_GSMRH_CTSS ((uint)0x00000080) +#define SCC_GSMRH_TFL ((uint)0x00000040) +#define SCC_GSMRH_RFW ((uint)0x00000020) +#define SCC_GSMRH_TXSY ((uint)0x00000010) +#define SCC_GSMRH_SYNL16 ((uint)0x0000000c) +#define SCC_GSMRH_SYNL8 ((uint)0x00000008) +#define SCC_GSMRH_SYNL4 ((uint)0x00000004) +#define SCC_GSMRH_RTSM ((uint)0x00000002) +#define SCC_GSMRH_RSYN ((uint)0x00000001) + +#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */ +#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000) +#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000) +#define SCC_GSMRL_EDGE_POS ((uint)0x20000000) +#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000) +#define SCC_GSMRL_TCI ((uint)0x10000000) +#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000) +#define SCC_GSMRL_TSNC_4 ((uint)0x08000000) +#define SCC_GSMRL_TSNC_14 ((uint)0x04000000) +#define SCC_GSMRL_TSNC_INF ((uint)0x00000000) +#define SCC_GSMRL_RINV ((uint)0x02000000) +#define SCC_GSMRL_TINV ((uint)0x01000000) +#define SCC_GSMRL_TPL_128 ((uint)0x00c00000) +#define SCC_GSMRL_TPL_64 ((uint)0x00a00000) +#define SCC_GSMRL_TPL_48 ((uint)0x00800000) +#define SCC_GSMRL_TPL_32 ((uint)0x00600000) +#define SCC_GSMRL_TPL_16 ((uint)0x00400000) +#define SCC_GSMRL_TPL_8 ((uint)0x00200000) +#define SCC_GSMRL_TPL_NONE ((uint)0x00000000) +#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000) +#define SCC_GSMRL_TPP_01 ((uint)0x00100000) +#define SCC_GSMRL_TPP_10 ((uint)0x00080000) +#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000) +#define SCC_GSMRL_TEND ((uint)0x00040000) +#define SCC_GSMRL_TDCR_32 ((uint)0x00030000) +#define SCC_GSMRL_TDCR_16 ((uint)0x00020000) +#define SCC_GSMRL_TDCR_8 ((uint)0x00010000) +#define SCC_GSMRL_TDCR_1 ((uint)0x00000000) +#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000) +#define SCC_GSMRL_RDCR_16 ((uint)0x00008000) +#define SCC_GSMRL_RDCR_8 ((uint)0x00004000) +#define SCC_GSMRL_RDCR_1 ((uint)0x00000000) +#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000) +#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000) +#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000) +#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800) +#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000) +#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600) +#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400) +#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200) +#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100) +#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000) +#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */ +#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080) +#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040) +#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000) +#define SCC_GSMRL_ENR ((uint)0x00000020) +#define SCC_GSMRL_ENT ((uint)0x00000010) +#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c) +#define SCC_GSMRL_MODE_QMC ((uint)0x0000000a) +#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009) +#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008) +#define SCC_GSMRL_MODE_V14 ((uint)0x00000007) +#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006) +#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005) +#define SCC_GSMRL_MODE_UART ((uint)0x00000004) +#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003) +#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002) +#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000) + +#define SCC_TODR_TOD ((ushort)0x8000) + +/* SCC Event and Mask register. +*/ +#define SCCM_TXE ((unsigned char)0x10) +#define SCCM_BSY ((unsigned char)0x04) +#define SCCM_TX ((unsigned char)0x02) +#define SCCM_RX ((unsigned char)0x01) + +typedef struct scc_param { + ushort scc_rbase; /* Rx Buffer descriptor base address */ + ushort scc_tbase; /* Tx Buffer descriptor base address */ + u_char scc_rfcr; /* Rx function code */ + u_char scc_tfcr; /* Tx function code */ + ushort scc_mrblr; /* Max receive buffer length */ + uint scc_rstate; /* Internal */ + uint scc_idp; /* Internal */ + ushort scc_rbptr; /* Internal */ + ushort scc_ibc; /* Internal */ + uint scc_rxtmp; /* Internal */ + uint scc_tstate; /* Internal */ + uint scc_tdp; /* Internal */ + ushort scc_tbptr; /* Internal */ + ushort scc_tbc; /* Internal */ + uint scc_txtmp; /* Internal */ + uint scc_rcrc; /* Internal */ + uint scc_tcrc; /* Internal */ +} sccp_t; + +/* Function code bits. +*/ +#define SCC_EB ((u_char)0x10) /* Set big endian byte order */ + +/* CPM Ethernet through SCCx. + */ +typedef struct scc_enet { + sccp_t sen_genscc; + uint sen_cpres; /* Preset CRC */ + uint sen_cmask; /* Constant mask for CRC */ + uint sen_crcec; /* CRC Error counter */ + uint sen_alec; /* alignment error counter */ + uint sen_disfc; /* discard frame counter */ + ushort sen_pads; /* Tx short frame pad character */ + ushort sen_retlim; /* Retry limit threshold */ + ushort sen_retcnt; /* Retry limit counter */ + ushort sen_maxflr; /* maximum frame length register */ + ushort sen_minflr; /* minimum frame length register */ + ushort sen_maxd1; /* maximum DMA1 length */ + ushort sen_maxd2; /* maximum DMA2 length */ + ushort sen_maxd; /* Rx max DMA */ + ushort sen_dmacnt; /* Rx DMA counter */ + ushort sen_maxb; /* Max BD byte count */ + ushort sen_gaddr1; /* Group address filter */ + ushort sen_gaddr2; + ushort sen_gaddr3; + ushort sen_gaddr4; + uint sen_tbuf0data0; /* Save area 0 - current frame */ + uint sen_tbuf0data1; /* Save area 1 - current frame */ + uint sen_tbuf0rba; /* Internal */ + uint sen_tbuf0crc; /* Internal */ + ushort sen_tbuf0bcnt; /* Internal */ + ushort sen_paddrh; /* physical address (MSB) */ + ushort sen_paddrm; + ushort sen_paddrl; /* physical address (LSB) */ + ushort sen_pper; /* persistence */ + ushort sen_rfbdptr; /* Rx first BD pointer */ + ushort sen_tfbdptr; /* Tx first BD pointer */ + ushort sen_tlbdptr; /* Tx last BD pointer */ + uint sen_tbuf1data0; /* Save area 0 - current frame */ + uint sen_tbuf1data1; /* Save area 1 - current frame */ + uint sen_tbuf1rba; /* Internal */ + uint sen_tbuf1crc; /* Internal */ + ushort sen_tbuf1bcnt; /* Internal */ + ushort sen_txlen; /* Tx Frame length counter */ + ushort sen_iaddr1; /* Individual address filter */ + ushort sen_iaddr2; + ushort sen_iaddr3; + ushort sen_iaddr4; + ushort sen_boffcnt; /* Backoff counter */ + + /* NOTE: Some versions of the manual have the following items + * incorrectly documented. Below is the proper order. + */ + ushort sen_taddrh; /* temp address (MSB) */ + ushort sen_taddrm; + ushort sen_taddrl; /* temp address (LSB) */ +} scc_enet_t; + +/* SCC Event register as used by Ethernet. +*/ +#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */ +#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */ +#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */ +#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */ +#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */ +#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */ + +/* SCC Mode Register (PMSR) as used by Ethernet. +*/ +#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */ +#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */ +#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */ +#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */ +#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */ +#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */ +#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */ +#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */ +#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */ +#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */ +#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */ +#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */ +#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */ + +/* SCC as UART +*/ +typedef struct scc_uart { + sccp_t scc_genscc; + char res1[8]; /* Reserved */ + ushort scc_maxidl; /* Maximum idle chars */ + ushort scc_idlc; /* temp idle counter */ + ushort scc_brkcr; /* Break count register */ + ushort scc_parec; /* receive parity error counter */ + ushort scc_frmec; /* receive framing error counter */ + ushort scc_nosec; /* receive noise counter */ + ushort scc_brkec; /* receive break condition counter */ + ushort scc_brkln; /* last received break length */ + ushort scc_uaddr1; /* UART address character 1 */ + ushort scc_uaddr2; /* UART address character 2 */ + ushort scc_rtemp; /* Temp storage */ + ushort scc_toseq; /* Transmit out of sequence char */ + ushort scc_char1; /* control character 1 */ + ushort scc_char2; /* control character 2 */ + ushort scc_char3; /* control character 3 */ + ushort scc_char4; /* control character 4 */ + ushort scc_char5; /* control character 5 */ + ushort scc_char6; /* control character 6 */ + ushort scc_char7; /* control character 7 */ + ushort scc_char8; /* control character 8 */ + ushort scc_rccm; /* receive control character mask */ + ushort scc_rccr; /* receive control character register */ + ushort scc_rlbc; /* receive last break character */ +} scc_uart_t; + +/* SCC Event and Mask registers when it is used as a UART. +*/ +#define UART_SCCM_GLR ((ushort)0x1000) +#define UART_SCCM_GLT ((ushort)0x0800) +#define UART_SCCM_AB ((ushort)0x0200) +#define UART_SCCM_IDL ((ushort)0x0100) +#define UART_SCCM_GRA ((ushort)0x0080) +#define UART_SCCM_BRKE ((ushort)0x0040) +#define UART_SCCM_BRKS ((ushort)0x0020) +#define UART_SCCM_CCR ((ushort)0x0008) +#define UART_SCCM_BSY ((ushort)0x0004) +#define UART_SCCM_TX ((ushort)0x0002) +#define UART_SCCM_RX ((ushort)0x0001) + +/* The SCC PMSR when used as a UART. +*/ +#define SCU_PSMR_FLC ((ushort)0x8000) +#define SCU_PSMR_SL ((ushort)0x4000) +#define SCU_PSMR_CL ((ushort)0x3000) +#define SCU_PSMR_UM ((ushort)0x0c00) +#define SCU_PSMR_FRZ ((ushort)0x0200) +#define SCU_PSMR_RZS ((ushort)0x0100) +#define SCU_PSMR_SYN ((ushort)0x0080) +#define SCU_PSMR_DRT ((ushort)0x0040) +#define SCU_PSMR_PEN ((ushort)0x0010) +#define SCU_PSMR_RPM ((ushort)0x000c) +#define SCU_PSMR_REVP ((ushort)0x0008) +#define SCU_PSMR_TPM ((ushort)0x0003) +#define SCU_PSMR_TEVP ((ushort)0x0002) + +/* CPM Transparent mode SCC. + */ +typedef struct scc_trans { + sccp_t st_genscc; + uint st_cpres; /* Preset CRC */ + uint st_cmask; /* Constant mask for CRC */ +} scc_trans_t; + +/* IIC parameter RAM. +*/ +typedef struct iic { + ushort iic_rbase; /* Rx Buffer descriptor base address */ + ushort iic_tbase; /* Tx Buffer descriptor base address */ + u_char iic_rfcr; /* Rx function code */ + u_char iic_tfcr; /* Tx function code */ + ushort iic_mrblr; /* Max receive buffer length */ + uint iic_rstate; /* Internal */ + uint iic_rdp; /* Internal */ + ushort iic_rbptr; /* Internal */ + ushort iic_rbc; /* Internal */ + uint iic_rxtmp; /* Internal */ + uint iic_tstate; /* Internal */ + uint iic_tdp; /* Internal */ + ushort iic_tbptr; /* Internal */ + ushort iic_tbc; /* Internal */ + uint iic_txtmp; /* Internal */ + char res1[4]; /* Reserved */ + ushort iic_rpbase; /* Relocation pointer */ + char res2[2]; /* Reserved */ +} iic_t; + +/* + * RISC Controller Configuration Register definitons + */ +#define RCCR_TIME 0x8000 /* RISC Timer Enable */ +#define RCCR_TIMEP(t) (((t) & 0x3F)<<8) /* RISC Timer Period */ +#define RCCR_TIME_MASK 0x00FF /* not RISC Timer related bits */ + +/* RISC Timer Parameter RAM offset */ +#define PROFF_RTMR ((uint)0x01B0) + +typedef struct risc_timer_pram { + unsigned short tm_base; /* RISC Timer Table Base Address */ + unsigned short tm_ptr; /* RISC Timer Table Pointer (internal) */ + unsigned short r_tmr; /* RISC Timer Mode Register */ + unsigned short r_tmv; /* RISC Timer Valid Register */ + unsigned long tm_cmd; /* RISC Timer Command Register */ + unsigned long tm_cnt; /* RISC Timer Internal Count */ +} rt_pram_t; + +/* Bits in RISC Timer Command Register */ +#define TM_CMD_VALID 0x80000000 /* Valid - Enables the timer */ +#define TM_CMD_RESTART 0x40000000 /* Restart - for automatic restart */ +#define TM_CMD_PWM 0x20000000 /* Run in Pulse Width Modulation Mode */ +#define TM_CMD_NUM(n) (((n)&0xF)<<16) /* Timer Number */ +#define TM_CMD_PERIOD(p) ((p)&0xFFFF) /* Timer Period */ + +/* CPM interrupts. There are nearly 32 interrupts generated by CPM + * channels or devices. All of these are presented to the PPC core + * as a single interrupt. The CPM interrupt handler dispatches its + * own handlers, in a similar fashion to the PPC core handler. We + * use the table as defined in the manuals (i.e. no special high + * priority and SCC1 == SCCa, etc...). + */ +#define CPMVEC_NR 32 +#define CPMVEC_PIO_PC15 ((ushort)0x1f) +#define CPMVEC_SCC1 ((ushort)0x1e) +#define CPMVEC_SCC2 ((ushort)0x1d) +#define CPMVEC_SCC3 ((ushort)0x1c) +#define CPMVEC_SCC4 ((ushort)0x1b) +#define CPMVEC_PIO_PC14 ((ushort)0x1a) +#define CPMVEC_TIMER1 ((ushort)0x19) +#define CPMVEC_PIO_PC13 ((ushort)0x18) +#define CPMVEC_PIO_PC12 ((ushort)0x17) +#define CPMVEC_SDMA_CB_ERR ((ushort)0x16) +#define CPMVEC_IDMA1 ((ushort)0x15) +#define CPMVEC_IDMA2 ((ushort)0x14) +#define CPMVEC_TIMER2 ((ushort)0x12) +#define CPMVEC_RISCTIMER ((ushort)0x11) +#define CPMVEC_I2C ((ushort)0x10) +#define CPMVEC_PIO_PC11 ((ushort)0x0f) +#define CPMVEC_PIO_PC10 ((ushort)0x0e) +#define CPMVEC_TIMER3 ((ushort)0x0c) +#define CPMVEC_PIO_PC9 ((ushort)0x0b) +#define CPMVEC_PIO_PC8 ((ushort)0x0a) +#define CPMVEC_PIO_PC7 ((ushort)0x09) +#define CPMVEC_TIMER4 ((ushort)0x07) +#define CPMVEC_PIO_PC6 ((ushort)0x06) +#define CPMVEC_SPI ((ushort)0x05) +#define CPMVEC_SMC1 ((ushort)0x04) +#define CPMVEC_SMC2 ((ushort)0x03) +#define CPMVEC_PIO_PC5 ((ushort)0x02) +#define CPMVEC_PIO_PC4 ((ushort)0x01) +#define CPMVEC_ERROR ((ushort)0x00) + +/* CPM interrupt configuration vector. +*/ +#define CICR_SCD_SCC4 ((uint)0x00c00000) /* SCC4 @ SCCd */ +#define CICR_SCC_SCC3 ((uint)0x00200000) /* SCC3 @ SCCc */ +#define CICR_SCB_SCC2 ((uint)0x00040000) /* SCC2 @ SCCb */ +#define CICR_SCA_SCC1 ((uint)0x00000000) /* SCC1 @ SCCa */ +#define CICR_IRL_MASK ((uint)0x0000e000) /* Core interrupt */ +#define CICR_HP_MASK ((uint)0x00001f00) /* Hi-pri int. */ +#define CICR_IEN ((uint)0x00000080) /* Int. enable */ +#define CICR_SPS ((uint)0x00000001) /* SCC Spread */ + +#define CPM_PIN_INPUT 0 +#define CPM_PIN_OUTPUT 1 +#define CPM_PIN_PRIMARY 0 +#define CPM_PIN_SECONDARY 2 +#define CPM_PIN_GPIO 4 +#define CPM_PIN_OPENDRAIN 8 + +enum cpm_port { + CPM_PORTA, + CPM_PORTB, + CPM_PORTC, + CPM_PORTD, + CPM_PORTE, +}; + +void cpm1_set_pin(enum cpm_port port, int pin, int flags); + +enum cpm_clk_dir { + CPM_CLK_RX, + CPM_CLK_TX, + CPM_CLK_RTX +}; + +enum cpm_clk_target { + CPM_CLK_SCC1, + CPM_CLK_SCC2, + CPM_CLK_SCC3, + CPM_CLK_SCC4, + CPM_CLK_SMC1, + CPM_CLK_SMC2, +}; + +enum cpm_clk { + CPM_BRG1, /* Baud Rate Generator 1 */ + CPM_BRG2, /* Baud Rate Generator 2 */ + CPM_BRG3, /* Baud Rate Generator 3 */ + CPM_BRG4, /* Baud Rate Generator 4 */ + CPM_CLK1, /* Clock 1 */ + CPM_CLK2, /* Clock 2 */ + CPM_CLK3, /* Clock 3 */ + CPM_CLK4, /* Clock 4 */ + CPM_CLK5, /* Clock 5 */ + CPM_CLK6, /* Clock 6 */ + CPM_CLK7, /* Clock 7 */ + CPM_CLK8, /* Clock 8 */ +}; + +int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode); + +#endif /* __CPM1__ */ diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h new file mode 100644 index 00000000000..f42e9baf3a4 --- /dev/null +++ b/arch/powerpc/include/asm/cpm2.h @@ -0,0 +1,1149 @@ +/* + * Communication Processor Module v2. + * + * This file contains structures and information for the communication + * processor channels found in the dual port RAM or parameter RAM. + * All CPM control and status is available through the CPM2 internal + * memory map. See immap_cpm2.h for details. + */ +#ifdef __KERNEL__ +#ifndef __CPM2__ +#define __CPM2__ + +#include <asm/immap_cpm2.h> +#include <asm/cpm.h> +#include <sysdev/fsl_soc.h> + +/* CPM Command register. +*/ +#define CPM_CR_RST ((uint)0x80000000) +#define CPM_CR_PAGE ((uint)0x7c000000) +#define CPM_CR_SBLOCK ((uint)0x03e00000) +#define CPM_CR_FLG ((uint)0x00010000) +#define CPM_CR_MCN ((uint)0x00003fc0) +#define CPM_CR_OPCODE ((uint)0x0000000f) + +/* Device sub-block and page codes. +*/ +#define CPM_CR_SCC1_SBLOCK (0x04) +#define CPM_CR_SCC2_SBLOCK (0x05) +#define CPM_CR_SCC3_SBLOCK (0x06) +#define CPM_CR_SCC4_SBLOCK (0x07) +#define CPM_CR_SMC1_SBLOCK (0x08) +#define CPM_CR_SMC2_SBLOCK (0x09) +#define CPM_CR_SPI_SBLOCK (0x0a) +#define CPM_CR_I2C_SBLOCK (0x0b) +#define CPM_CR_TIMER_SBLOCK (0x0f) +#define CPM_CR_RAND_SBLOCK (0x0e) +#define CPM_CR_FCC1_SBLOCK (0x10) +#define CPM_CR_FCC2_SBLOCK (0x11) +#define CPM_CR_FCC3_SBLOCK (0x12) +#define CPM_CR_IDMA1_SBLOCK (0x14) +#define CPM_CR_IDMA2_SBLOCK (0x15) +#define CPM_CR_IDMA3_SBLOCK (0x16) +#define CPM_CR_IDMA4_SBLOCK (0x17) +#define CPM_CR_MCC1_SBLOCK (0x1c) + +#define CPM_CR_FCC_SBLOCK(x) (x + 0x10) + +#define CPM_CR_SCC1_PAGE (0x00) +#define CPM_CR_SCC2_PAGE (0x01) +#define CPM_CR_SCC3_PAGE (0x02) +#define CPM_CR_SCC4_PAGE (0x03) +#define CPM_CR_SMC1_PAGE (0x07) +#define CPM_CR_SMC2_PAGE (0x08) +#define CPM_CR_SPI_PAGE (0x09) +#define CPM_CR_I2C_PAGE (0x0a) +#define CPM_CR_TIMER_PAGE (0x0a) +#define CPM_CR_RAND_PAGE (0x0a) +#define CPM_CR_FCC1_PAGE (0x04) +#define CPM_CR_FCC2_PAGE (0x05) +#define CPM_CR_FCC3_PAGE (0x06) +#define CPM_CR_IDMA1_PAGE (0x07) +#define CPM_CR_IDMA2_PAGE (0x08) +#define CPM_CR_IDMA3_PAGE (0x09) +#define CPM_CR_IDMA4_PAGE (0x0a) +#define CPM_CR_MCC1_PAGE (0x07) +#define CPM_CR_MCC2_PAGE (0x08) + +#define CPM_CR_FCC_PAGE(x) (x + 0x04) + +/* CPM2-specific opcodes (see cpm.h for common opcodes) +*/ +#define CPM_CR_START_IDMA ((ushort)0x0009) + +#define mk_cr_cmd(PG, SBC, MCN, OP) \ + ((PG << 26) | (SBC << 21) | (MCN << 6) | OP) + +/* The number of pages of host memory we allocate for CPM. This is + * done early in kernel initialization to get physically contiguous + * pages. + */ +#define NUM_CPM_HOST_PAGES 2 + +/* Export the base address of the communication processor registers + * and dual port ram. + */ +extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */ + +#define cpm_dpalloc cpm_muram_alloc +#define cpm_dpfree cpm_muram_free +#define cpm_dpram_addr cpm_muram_addr + +extern void cpm2_reset(void); + +/* Baud rate generators. +*/ +#define CPM_BRG_RST ((uint)0x00020000) +#define CPM_BRG_EN ((uint)0x00010000) +#define CPM_BRG_EXTC_INT ((uint)0x00000000) +#define CPM_BRG_EXTC_CLK3_9 ((uint)0x00004000) +#define CPM_BRG_EXTC_CLK5_15 ((uint)0x00008000) +#define CPM_BRG_ATB ((uint)0x00002000) +#define CPM_BRG_CD_MASK ((uint)0x00001ffe) +#define CPM_BRG_DIV16 ((uint)0x00000001) + +#define CPM2_BRG_INT_CLK (get_brgfreq()) +#define CPM2_BRG_UART_CLK (CPM2_BRG_INT_CLK/16) + +extern void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src); + +/* This function is used by UARTS, or anything else that uses a 16x + * oversampled clock. + */ +static inline void cpm_setbrg(uint brg, uint rate) +{ + __cpm2_setbrg(brg, rate, CPM2_BRG_UART_CLK, 0, CPM_BRG_EXTC_INT); +} + +/* This function is used to set high speed synchronous baud rate + * clocks. + */ +static inline void cpm2_fastbrg(uint brg, uint rate, int div16) +{ + __cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT); +} + +/* Parameter RAM offsets from the base. +*/ +#define PROFF_SCC1 ((uint)0x8000) +#define PROFF_SCC2 ((uint)0x8100) +#define PROFF_SCC3 ((uint)0x8200) +#define PROFF_SCC4 ((uint)0x8300) +#define PROFF_FCC1 ((uint)0x8400) +#define PROFF_FCC2 ((uint)0x8500) +#define PROFF_FCC3 ((uint)0x8600) +#define PROFF_MCC1 ((uint)0x8700) +#define PROFF_SMC1_BASE ((uint)0x87fc) +#define PROFF_IDMA1_BASE ((uint)0x87fe) +#define PROFF_MCC2 ((uint)0x8800) +#define PROFF_SMC2_BASE ((uint)0x88fc) +#define PROFF_IDMA2_BASE ((uint)0x88fe) +#define PROFF_SPI_BASE ((uint)0x89fc) +#define PROFF_IDMA3_BASE ((uint)0x89fe) +#define PROFF_TIMERS ((uint)0x8ae0) +#define PROFF_REVNUM ((uint)0x8af0) +#define PROFF_RAND ((uint)0x8af8) +#define PROFF_I2C_BASE ((uint)0x8afc) +#define PROFF_IDMA4_BASE ((uint)0x8afe) + +#define PROFF_SCC_SIZE ((uint)0x100) +#define PROFF_FCC_SIZE ((uint)0x100) +#define PROFF_SMC_SIZE ((uint)64) + +/* The SMCs are relocated to any of the first eight DPRAM pages. + * We will fix these at the first locations of DPRAM, until we + * get some microcode patches :-). + * The parameter ram space for the SMCs is fifty-some bytes, and + * they are required to start on a 64 byte boundary. + */ +#define PROFF_SMC1 (0) +#define PROFF_SMC2 (64) + + +/* Define enough so I can at least use the serial port as a UART. + */ +typedef struct smc_uart { + ushort smc_rbase; /* Rx Buffer descriptor base address */ + ushort smc_tbase; /* Tx Buffer descriptor base address */ + u_char smc_rfcr; /* Rx function code */ + u_char smc_tfcr; /* Tx function code */ + ushort smc_mrblr; /* Max receive buffer length */ + uint smc_rstate; /* Internal */ + uint smc_idp; /* Internal */ + ushort smc_rbptr; /* Internal */ + ushort smc_ibc; /* Internal */ + uint smc_rxtmp; /* Internal */ + uint smc_tstate; /* Internal */ + uint smc_tdp; /* Internal */ + ushort smc_tbptr; /* Internal */ + ushort smc_tbc; /* Internal */ + uint smc_txtmp; /* Internal */ + ushort smc_maxidl; /* Maximum idle characters */ + ushort smc_tmpidl; /* Temporary idle counter */ + ushort smc_brklen; /* Last received break length */ + ushort smc_brkec; /* rcv'd break condition counter */ + ushort smc_brkcr; /* xmt break count register */ + ushort smc_rmask; /* Temporary bit mask */ + uint smc_stmp; /* SDMA Temp */ +} smc_uart_t; + +/* SMC uart mode register (Internal memory map). +*/ +#define SMCMR_REN ((ushort)0x0001) +#define SMCMR_TEN ((ushort)0x0002) +#define SMCMR_DM ((ushort)0x000c) +#define SMCMR_SM_GCI ((ushort)0x0000) +#define SMCMR_SM_UART ((ushort)0x0020) +#define SMCMR_SM_TRANS ((ushort)0x0030) +#define SMCMR_SM_MASK ((ushort)0x0030) +#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */ +#define SMCMR_REVD SMCMR_PM_EVEN +#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */ +#define SMCMR_BS SMCMR_PEN +#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */ +#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */ +#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK) + +/* SMC Event and Mask register. +*/ +#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */ +#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */ +#define SMCM_TXE ((unsigned char)0x10) +#define SMCM_BSY ((unsigned char)0x04) +#define SMCM_TX ((unsigned char)0x02) +#define SMCM_RX ((unsigned char)0x01) + +/* SCCs. +*/ +#define SCC_GSMRH_IRP ((uint)0x00040000) +#define SCC_GSMRH_GDE ((uint)0x00010000) +#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000) +#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000) +#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000) +#define SCC_GSMRH_REVD ((uint)0x00002000) +#define SCC_GSMRH_TRX ((uint)0x00001000) +#define SCC_GSMRH_TTX ((uint)0x00000800) +#define SCC_GSMRH_CDP ((uint)0x00000400) +#define SCC_GSMRH_CTSP ((uint)0x00000200) +#define SCC_GSMRH_CDS ((uint)0x00000100) +#define SCC_GSMRH_CTSS ((uint)0x00000080) +#define SCC_GSMRH_TFL ((uint)0x00000040) +#define SCC_GSMRH_RFW ((uint)0x00000020) +#define SCC_GSMRH_TXSY ((uint)0x00000010) +#define SCC_GSMRH_SYNL16 ((uint)0x0000000c) +#define SCC_GSMRH_SYNL8 ((uint)0x00000008) +#define SCC_GSMRH_SYNL4 ((uint)0x00000004) +#define SCC_GSMRH_RTSM ((uint)0x00000002) +#define SCC_GSMRH_RSYN ((uint)0x00000001) + +#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */ +#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000) +#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000) +#define SCC_GSMRL_EDGE_POS ((uint)0x20000000) +#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000) +#define SCC_GSMRL_TCI ((uint)0x10000000) +#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000) +#define SCC_GSMRL_TSNC_4 ((uint)0x08000000) +#define SCC_GSMRL_TSNC_14 ((uint)0x04000000) +#define SCC_GSMRL_TSNC_INF ((uint)0x00000000) +#define SCC_GSMRL_RINV ((uint)0x02000000) +#define SCC_GSMRL_TINV ((uint)0x01000000) +#define SCC_GSMRL_TPL_128 ((uint)0x00c00000) +#define SCC_GSMRL_TPL_64 ((uint)0x00a00000) +#define SCC_GSMRL_TPL_48 ((uint)0x00800000) +#define SCC_GSMRL_TPL_32 ((uint)0x00600000) +#define SCC_GSMRL_TPL_16 ((uint)0x00400000) +#define SCC_GSMRL_TPL_8 ((uint)0x00200000) +#define SCC_GSMRL_TPL_NONE ((uint)0x00000000) +#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000) +#define SCC_GSMRL_TPP_01 ((uint)0x00100000) +#define SCC_GSMRL_TPP_10 ((uint)0x00080000) +#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000) +#define SCC_GSMRL_TEND ((uint)0x00040000) +#define SCC_GSMRL_TDCR_32 ((uint)0x00030000) +#define SCC_GSMRL_TDCR_16 ((uint)0x00020000) +#define SCC_GSMRL_TDCR_8 ((uint)0x00010000) +#define SCC_GSMRL_TDCR_1 ((uint)0x00000000) +#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000) +#define SCC_GSMRL_RDCR_16 ((uint)0x00008000) +#define SCC_GSMRL_RDCR_8 ((uint)0x00004000) +#define SCC_GSMRL_RDCR_1 ((uint)0x00000000) +#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000) +#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000) +#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000) +#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800) +#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000) +#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600) +#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400) +#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200) +#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100) +#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000) +#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */ +#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080) +#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040) +#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000) +#define SCC_GSMRL_ENR ((uint)0x00000020) +#define SCC_GSMRL_ENT ((uint)0x00000010) +#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c) +#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009) +#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008) +#define SCC_GSMRL_MODE_V14 ((uint)0x00000007) +#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006) +#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005) +#define SCC_GSMRL_MODE_UART ((uint)0x00000004) +#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003) +#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002) +#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000) + +#define SCC_TODR_TOD ((ushort)0x8000) + +/* SCC Event and Mask register. +*/ +#define SCCM_TXE ((unsigned char)0x10) +#define SCCM_BSY ((unsigned char)0x04) +#define SCCM_TX ((unsigned char)0x02) +#define SCCM_RX ((unsigned char)0x01) + +typedef struct scc_param { + ushort scc_rbase; /* Rx Buffer descriptor base address */ + ushort scc_tbase; /* Tx Buffer descriptor base address */ + u_char scc_rfcr; /* Rx function code */ + u_char scc_tfcr; /* Tx function code */ + ushort scc_mrblr; /* Max receive buffer length */ + uint scc_rstate; /* Internal */ + uint scc_idp; /* Internal */ + ushort scc_rbptr; /* Internal */ + ushort scc_ibc; /* Internal */ + uint scc_rxtmp; /* Internal */ + uint scc_tstate; /* Internal */ + uint scc_tdp; /* Internal */ + ushort scc_tbptr; /* Internal */ + ushort scc_tbc; /* Internal */ + uint scc_txtmp; /* Internal */ + uint scc_rcrc; /* Internal */ + uint scc_tcrc; /* Internal */ +} sccp_t; + +/* Function code bits. +*/ +#define SCC_EB ((u_char) 0x10) /* Set big endian byte order */ +#define SCC_GBL ((u_char) 0x20) /* Snooping enabled */ + +/* CPM Ethernet through SCC1. + */ +typedef struct scc_enet { + sccp_t sen_genscc; + uint sen_cpres; /* Preset CRC */ + uint sen_cmask; /* Constant mask for CRC */ + uint sen_crcec; /* CRC Error counter */ + uint sen_alec; /* alignment error counter */ + uint sen_disfc; /* discard frame counter */ + ushort sen_pads; /* Tx short frame pad character */ + ushort sen_retlim; /* Retry limit threshold */ + ushort sen_retcnt; /* Retry limit counter */ + ushort sen_maxflr; /* maximum frame length register */ + ushort sen_minflr; /* minimum frame length register */ + ushort sen_maxd1; /* maximum DMA1 length */ + ushort sen_maxd2; /* maximum DMA2 length */ + ushort sen_maxd; /* Rx max DMA */ + ushort sen_dmacnt; /* Rx DMA counter */ + ushort sen_maxb; /* Max BD byte count */ + ushort sen_gaddr1; /* Group address filter */ + ushort sen_gaddr2; + ushort sen_gaddr3; + ushort sen_gaddr4; + uint sen_tbuf0data0; /* Save area 0 - current frame */ + uint sen_tbuf0data1; /* Save area 1 - current frame */ + uint sen_tbuf0rba; /* Internal */ + uint sen_tbuf0crc; /* Internal */ + ushort sen_tbuf0bcnt; /* Internal */ + ushort sen_paddrh; /* physical address (MSB) */ + ushort sen_paddrm; + ushort sen_paddrl; /* physical address (LSB) */ + ushort sen_pper; /* persistence */ + ushort sen_rfbdptr; /* Rx first BD pointer */ + ushort sen_tfbdptr; /* Tx first BD pointer */ + ushort sen_tlbdptr; /* Tx last BD pointer */ + uint sen_tbuf1data0; /* Save area 0 - current frame */ + uint sen_tbuf1data1; /* Save area 1 - current frame */ + uint sen_tbuf1rba; /* Internal */ + uint sen_tbuf1crc; /* Internal */ + ushort sen_tbuf1bcnt; /* Internal */ + ushort sen_txlen; /* Tx Frame length counter */ + ushort sen_iaddr1; /* Individual address filter */ + ushort sen_iaddr2; + ushort sen_iaddr3; + ushort sen_iaddr4; + ushort sen_boffcnt; /* Backoff counter */ + + /* NOTE: Some versions of the manual have the following items + * incorrectly documented. Below is the proper order. + */ + ushort sen_taddrh; /* temp address (MSB) */ + ushort sen_taddrm; + ushort sen_taddrl; /* temp address (LSB) */ +} scc_enet_t; + + +/* SCC Event register as used by Ethernet. +*/ +#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */ +#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */ +#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */ +#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */ +#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */ +#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */ + +/* SCC Mode Register (PSMR) as used by Ethernet. +*/ +#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */ +#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */ +#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */ +#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */ +#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */ +#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */ +#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */ +#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */ +#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */ +#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */ +#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */ +#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */ +#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */ + +/* SCC as UART +*/ +typedef struct scc_uart { + sccp_t scc_genscc; + uint scc_res1; /* Reserved */ + uint scc_res2; /* Reserved */ + ushort scc_maxidl; /* Maximum idle chars */ + ushort scc_idlc; /* temp idle counter */ + ushort scc_brkcr; /* Break count register */ + ushort scc_parec; /* receive parity error counter */ + ushort scc_frmec; /* receive framing error counter */ + ushort scc_nosec; /* receive noise counter */ + ushort scc_brkec; /* receive break condition counter */ + ushort scc_brkln; /* last received break length */ + ushort scc_uaddr1; /* UART address character 1 */ + ushort scc_uaddr2; /* UART address character 2 */ + ushort scc_rtemp; /* Temp storage */ + ushort scc_toseq; /* Transmit out of sequence char */ + ushort scc_char1; /* control character 1 */ + ushort scc_char2; /* control character 2 */ + ushort scc_char3; /* control character 3 */ + ushort scc_char4; /* control character 4 */ + ushort scc_char5; /* control character 5 */ + ushort scc_char6; /* control character 6 */ + ushort scc_char7; /* control character 7 */ + ushort scc_char8; /* control character 8 */ + ushort scc_rccm; /* receive control character mask */ + ushort scc_rccr; /* receive control character register */ + ushort scc_rlbc; /* receive last break character */ +} scc_uart_t; + +/* SCC Event and Mask registers when it is used as a UART. +*/ +#define UART_SCCM_GLR ((ushort)0x1000) +#define UART_SCCM_GLT ((ushort)0x0800) +#define UART_SCCM_AB ((ushort)0x0200) +#define UART_SCCM_IDL ((ushort)0x0100) +#define UART_SCCM_GRA ((ushort)0x0080) +#define UART_SCCM_BRKE ((ushort)0x0040) +#define UART_SCCM_BRKS ((ushort)0x0020) +#define UART_SCCM_CCR ((ushort)0x0008) +#define UART_SCCM_BSY ((ushort)0x0004) +#define UART_SCCM_TX ((ushort)0x0002) +#define UART_SCCM_RX ((ushort)0x0001) + +/* The SCC PSMR when used as a UART. +*/ +#define SCU_PSMR_FLC ((ushort)0x8000) +#define SCU_PSMR_SL ((ushort)0x4000) +#define SCU_PSMR_CL ((ushort)0x3000) +#define SCU_PSMR_UM ((ushort)0x0c00) +#define SCU_PSMR_FRZ ((ushort)0x0200) +#define SCU_PSMR_RZS ((ushort)0x0100) +#define SCU_PSMR_SYN ((ushort)0x0080) +#define SCU_PSMR_DRT ((ushort)0x0040) +#define SCU_PSMR_PEN ((ushort)0x0010) +#define SCU_PSMR_RPM ((ushort)0x000c) +#define SCU_PSMR_REVP ((ushort)0x0008) +#define SCU_PSMR_TPM ((ushort)0x0003) +#define SCU_PSMR_TEVP ((ushort)0x0002) + +/* CPM Transparent mode SCC. + */ +typedef struct scc_trans { + sccp_t st_genscc; + uint st_cpres; /* Preset CRC */ + uint st_cmask; /* Constant mask for CRC */ +} scc_trans_t; + +/* How about some FCCs..... +*/ +#define FCC_GFMR_DIAG_NORM ((uint)0x00000000) +#define FCC_GFMR_DIAG_LE ((uint)0x40000000) +#define FCC_GFMR_DIAG_AE ((uint)0x80000000) +#define FCC_GFMR_DIAG_ALE ((uint)0xc0000000) +#define FCC_GFMR_TCI ((uint)0x20000000) +#define FCC_GFMR_TRX ((uint)0x10000000) +#define FCC_GFMR_TTX ((uint)0x08000000) +#define FCC_GFMR_TTX ((uint)0x08000000) +#define FCC_GFMR_CDP ((uint)0x04000000) +#define FCC_GFMR_CTSP ((uint)0x02000000) +#define FCC_GFMR_CDS ((uint)0x01000000) +#define FCC_GFMR_CTSS ((uint)0x00800000) +#define FCC_GFMR_SYNL_NONE ((uint)0x00000000) +#define FCC_GFMR_SYNL_AUTO ((uint)0x00004000) +#define FCC_GFMR_SYNL_8 ((uint)0x00008000) +#define FCC_GFMR_SYNL_16 ((uint)0x0000c000) +#define FCC_GFMR_RTSM ((uint)0x00002000) +#define FCC_GFMR_RENC_NRZ ((uint)0x00000000) +#define FCC_GFMR_RENC_NRZI ((uint)0x00000800) +#define FCC_GFMR_REVD ((uint)0x00000400) +#define FCC_GFMR_TENC_NRZ ((uint)0x00000000) +#define FCC_GFMR_TENC_NRZI ((uint)0x00000100) +#define FCC_GFMR_TCRC_16 ((uint)0x00000000) +#define FCC_GFMR_TCRC_32 ((uint)0x00000080) +#define FCC_GFMR_ENR ((uint)0x00000020) +#define FCC_GFMR_ENT ((uint)0x00000010) +#define FCC_GFMR_MODE_ENET ((uint)0x0000000c) +#define FCC_GFMR_MODE_ATM ((uint)0x0000000a) +#define FCC_GFMR_MODE_HDLC ((uint)0x00000000) + +/* Generic FCC parameter ram. +*/ +typedef struct fcc_param { + ushort fcc_riptr; /* Rx Internal temp pointer */ + ushort fcc_tiptr; /* Tx Internal temp pointer */ + ushort fcc_res1; + ushort fcc_mrblr; /* Max receive buffer length, mod 32 bytes */ + uint fcc_rstate; /* Upper byte is Func code, must be set */ + uint fcc_rbase; /* Receive BD base */ + ushort fcc_rbdstat; /* RxBD status */ + ushort fcc_rbdlen; /* RxBD down counter */ + uint fcc_rdptr; /* RxBD internal data pointer */ + uint fcc_tstate; /* Upper byte is Func code, must be set */ + uint fcc_tbase; /* Transmit BD base */ + ushort fcc_tbdstat; /* TxBD status */ + ushort fcc_tbdlen; /* TxBD down counter */ + uint fcc_tdptr; /* TxBD internal data pointer */ + uint fcc_rbptr; /* Rx BD Internal buf pointer */ + uint fcc_tbptr; /* Tx BD Internal buf pointer */ + uint fcc_rcrc; /* Rx temp CRC */ + uint fcc_res2; + uint fcc_tcrc; /* Tx temp CRC */ +} fccp_t; + + +/* Ethernet controller through FCC. +*/ +typedef struct fcc_enet { + fccp_t fen_genfcc; + uint fen_statbuf; /* Internal status buffer */ + uint fen_camptr; /* CAM address */ + uint fen_cmask; /* Constant mask for CRC */ + uint fen_cpres; /* Preset CRC */ + uint fen_crcec; /* CRC Error counter */ + uint fen_alec; /* alignment error counter */ + uint fen_disfc; /* discard frame counter */ + ushort fen_retlim; /* Retry limit */ + ushort fen_retcnt; /* Retry counter */ + ushort fen_pper; /* Persistence */ + ushort fen_boffcnt; /* backoff counter */ + uint fen_gaddrh; /* Group address filter, high 32-bits */ + uint fen_gaddrl; /* Group address filter, low 32-bits */ + ushort fen_tfcstat; /* out of sequence TxBD */ + ushort fen_tfclen; + uint fen_tfcptr; + ushort fen_mflr; /* Maximum frame length (1518) */ + ushort fen_paddrh; /* MAC address */ + ushort fen_paddrm; + ushort fen_paddrl; + ushort fen_ibdcount; /* Internal BD counter */ + ushort fen_ibdstart; /* Internal BD start pointer */ + ushort fen_ibdend; /* Internal BD end pointer */ + ushort fen_txlen; /* Internal Tx frame length counter */ + uint fen_ibdbase[8]; /* Internal use */ + uint fen_iaddrh; /* Individual address filter */ + uint fen_iaddrl; + ushort fen_minflr; /* Minimum frame length (64) */ + ushort fen_taddrh; /* Filter transfer MAC address */ + ushort fen_taddrm; + ushort fen_taddrl; + ushort fen_padptr; /* Pointer to pad byte buffer */ + ushort fen_cftype; /* control frame type */ + ushort fen_cfrange; /* control frame range */ + ushort fen_maxb; /* maximum BD count */ + ushort fen_maxd1; /* Max DMA1 length (1520) */ + ushort fen_maxd2; /* Max DMA2 length (1520) */ + ushort fen_maxd; /* internal max DMA count */ + ushort fen_dmacnt; /* internal DMA counter */ + uint fen_octc; /* Total octect counter */ + uint fen_colc; /* Total collision counter */ + uint fen_broc; /* Total broadcast packet counter */ + uint fen_mulc; /* Total multicast packet count */ + uint fen_uspc; /* Total packets < 64 bytes */ + uint fen_frgc; /* Total packets < 64 bytes with errors */ + uint fen_ospc; /* Total packets > 1518 */ + uint fen_jbrc; /* Total packets > 1518 with errors */ + uint fen_p64c; /* Total packets == 64 bytes */ + uint fen_p65c; /* Total packets 64 < bytes <= 127 */ + uint fen_p128c; /* Total packets 127 < bytes <= 255 */ + uint fen_p256c; /* Total packets 256 < bytes <= 511 */ + uint fen_p512c; /* Total packets 512 < bytes <= 1023 */ + uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */ + uint fen_cambuf; /* Internal CAM buffer poiner */ + ushort fen_rfthr; /* Received frames threshold */ + ushort fen_rfcnt; /* Received frames count */ +} fcc_enet_t; + +/* FCC Event/Mask register as used by Ethernet. +*/ +#define FCC_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */ +#define FCC_ENET_RXC ((ushort)0x0040) /* Control Frame Received */ +#define FCC_ENET_TXC ((ushort)0x0020) /* Out of seq. Tx sent */ +#define FCC_ENET_TXE ((ushort)0x0010) /* Transmit Error */ +#define FCC_ENET_RXF ((ushort)0x0008) /* Full frame received */ +#define FCC_ENET_BSY ((ushort)0x0004) /* Busy. Rx Frame dropped */ +#define FCC_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */ +#define FCC_ENET_RXB ((ushort)0x0001) /* A buffer was received */ + +/* FCC Mode Register (FPSMR) as used by Ethernet. +*/ +#define FCC_PSMR_HBC ((uint)0x80000000) /* Enable heartbeat */ +#define FCC_PSMR_FC ((uint)0x40000000) /* Force Collision */ +#define FCC_PSMR_SBT ((uint)0x20000000) /* Stop backoff timer */ +#define FCC_PSMR_LPB ((uint)0x10000000) /* Local protect. 1 = FDX */ +#define FCC_PSMR_LCW ((uint)0x08000000) /* Late collision select */ +#define FCC_PSMR_FDE ((uint)0x04000000) /* Full Duplex Enable */ +#define FCC_PSMR_MON ((uint)0x02000000) /* RMON Enable */ +#define FCC_PSMR_PRO ((uint)0x00400000) /* Promiscuous Enable */ +#define FCC_PSMR_FCE ((uint)0x00200000) /* Flow Control Enable */ +#define FCC_PSMR_RSH ((uint)0x00100000) /* Receive Short Frames */ +#define FCC_PSMR_CAM ((uint)0x00000400) /* CAM enable */ +#define FCC_PSMR_BRO ((uint)0x00000200) /* Broadcast pkt discard */ +#define FCC_PSMR_ENCRC ((uint)0x00000080) /* Use 32-bit CRC */ + +/* IIC parameter RAM. +*/ +typedef struct iic { + ushort iic_rbase; /* Rx Buffer descriptor base address */ + ushort iic_tbase; /* Tx Buffer descriptor base address */ + u_char iic_rfcr; /* Rx function code */ + u_char iic_tfcr; /* Tx function code */ + ushort iic_mrblr; /* Max receive buffer length */ + uint iic_rstate; /* Internal */ + uint iic_rdp; /* Internal */ + ushort iic_rbptr; /* Internal */ + ushort iic_rbc; /* Internal */ + uint iic_rxtmp; /* Internal */ + uint iic_tstate; /* Internal */ + uint iic_tdp; /* Internal */ + ushort iic_tbptr; /* Internal */ + ushort iic_tbc; /* Internal */ + uint iic_txtmp; /* Internal */ +} iic_t; + +/* IDMA parameter RAM +*/ +typedef struct idma { + ushort ibase; /* IDMA buffer descriptor table base address */ + ushort dcm; /* DMA channel mode */ + ushort ibdptr; /* IDMA current buffer descriptor pointer */ + ushort dpr_buf; /* IDMA transfer buffer base address */ + ushort buf_inv; /* internal buffer inventory */ + ushort ss_max; /* steady-state maximum transfer size */ + ushort dpr_in_ptr; /* write pointer inside the internal buffer */ + ushort sts; /* source transfer size */ + ushort dpr_out_ptr; /* read pointer inside the internal buffer */ + ushort seob; /* source end of burst */ + ushort deob; /* destination end of burst */ + ushort dts; /* destination transfer size */ + ushort ret_add; /* return address when working in ERM=1 mode */ + ushort res0; /* reserved */ + uint bd_cnt; /* internal byte count */ + uint s_ptr; /* source internal data pointer */ + uint d_ptr; /* destination internal data pointer */ + uint istate; /* internal state */ + u_char res1[20]; /* pad to 64-byte length */ +} idma_t; + +/* DMA channel mode bit fields +*/ +#define IDMA_DCM_FB ((ushort)0x8000) /* fly-by mode */ +#define IDMA_DCM_LP ((ushort)0x4000) /* low priority */ +#define IDMA_DCM_TC2 ((ushort)0x0400) /* value driven on TC[2] */ +#define IDMA_DCM_DMA_WRAP_MASK ((ushort)0x01c0) /* mask for DMA wrap */ +#define IDMA_DCM_DMA_WRAP_64 ((ushort)0x0000) /* 64-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_128 ((ushort)0x0040) /* 128-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_256 ((ushort)0x0080) /* 256-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_512 ((ushort)0x00c0) /* 512-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_1024 ((ushort)0x0100) /* 1024-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_2048 ((ushort)0x0140) /* 2048-byte DMA xfer buffer */ +#define IDMA_DCM_SINC ((ushort)0x0020) /* source inc addr */ +#define IDMA_DCM_DINC ((ushort)0x0010) /* destination inc addr */ +#define IDMA_DCM_ERM ((ushort)0x0008) /* external request mode */ +#define IDMA_DCM_DT ((ushort)0x0004) /* DONE treatment */ +#define IDMA_DCM_SD_MASK ((ushort)0x0003) /* mask for SD bit field */ +#define IDMA_DCM_SD_MEM2MEM ((ushort)0x0000) /* memory-to-memory xfer */ +#define IDMA_DCM_SD_PER2MEM ((ushort)0x0002) /* peripheral-to-memory xfer */ +#define IDMA_DCM_SD_MEM2PER ((ushort)0x0001) /* memory-to-peripheral xfer */ + +/* IDMA Buffer Descriptors +*/ +typedef struct idma_bd { + uint flags; + uint len; /* data length */ + uint src; /* source data buffer pointer */ + uint dst; /* destination data buffer pointer */ +} idma_bd_t; + +/* IDMA buffer descriptor flag bit fields +*/ +#define IDMA_BD_V ((uint)0x80000000) /* valid */ +#define IDMA_BD_W ((uint)0x20000000) /* wrap */ +#define IDMA_BD_I ((uint)0x10000000) /* interrupt */ +#define IDMA_BD_L ((uint)0x08000000) /* last */ +#define IDMA_BD_CM ((uint)0x02000000) /* continuous mode */ +#define IDMA_BD_SDN ((uint)0x00400000) /* source done */ +#define IDMA_BD_DDN ((uint)0x00200000) /* destination done */ +#define IDMA_BD_DGBL ((uint)0x00100000) /* destination global */ +#define IDMA_BD_DBO_LE ((uint)0x00040000) /* little-end dest byte order */ +#define IDMA_BD_DBO_BE ((uint)0x00080000) /* big-end dest byte order */ +#define IDMA_BD_DDTB ((uint)0x00010000) /* destination data bus */ +#define IDMA_BD_SGBL ((uint)0x00002000) /* source global */ +#define IDMA_BD_SBO_LE ((uint)0x00000800) /* little-end src byte order */ +#define IDMA_BD_SBO_BE ((uint)0x00001000) /* big-end src byte order */ +#define IDMA_BD_SDTB ((uint)0x00000200) /* source data bus */ + +/* per-channel IDMA registers +*/ +typedef struct im_idma { + u_char idsr; /* IDMAn event status register */ + u_char res0[3]; + u_char idmr; /* IDMAn event mask register */ + u_char res1[3]; +} im_idma_t; + +/* IDMA event register bit fields +*/ +#define IDMA_EVENT_SC ((unsigned char)0x08) /* stop completed */ +#define IDMA_EVENT_OB ((unsigned char)0x04) /* out of buffers */ +#define IDMA_EVENT_EDN ((unsigned char)0x02) /* external DONE asserted */ +#define IDMA_EVENT_BC ((unsigned char)0x01) /* buffer descriptor complete */ + +/* RISC Controller Configuration Register (RCCR) bit fields +*/ +#define RCCR_TIME ((uint)0x80000000) /* timer enable */ +#define RCCR_TIMEP_MASK ((uint)0x3f000000) /* mask for timer period bit field */ +#define RCCR_DR0M ((uint)0x00800000) /* IDMA0 request mode */ +#define RCCR_DR1M ((uint)0x00400000) /* IDMA1 request mode */ +#define RCCR_DR2M ((uint)0x00000080) /* IDMA2 request mode */ +#define RCCR_DR3M ((uint)0x00000040) /* IDMA3 request mode */ +#define RCCR_DR0QP_MASK ((uint)0x00300000) /* mask for IDMA0 req priority */ +#define RCCR_DR0QP_HIGH ((uint)0x00000000) /* IDMA0 has high req priority */ +#define RCCR_DR0QP_MED ((uint)0x00100000) /* IDMA0 has medium req priority */ +#define RCCR_DR0QP_LOW ((uint)0x00200000) /* IDMA0 has low req priority */ +#define RCCR_DR1QP_MASK ((uint)0x00030000) /* mask for IDMA1 req priority */ +#define RCCR_DR1QP_HIGH ((uint)0x00000000) /* IDMA1 has high req priority */ +#define RCCR_DR1QP_MED ((uint)0x00010000) /* IDMA1 has medium req priority */ +#define RCCR_DR1QP_LOW ((uint)0x00020000) /* IDMA1 has low req priority */ +#define RCCR_DR2QP_MASK ((uint)0x00000030) /* mask for IDMA2 req priority */ +#define RCCR_DR2QP_HIGH ((uint)0x00000000) /* IDMA2 has high req priority */ +#define RCCR_DR2QP_MED ((uint)0x00000010) /* IDMA2 has medium req priority */ +#define RCCR_DR2QP_LOW ((uint)0x00000020) /* IDMA2 has low req priority */ +#define RCCR_DR3QP_MASK ((uint)0x00000003) /* mask for IDMA3 req priority */ +#define RCCR_DR3QP_HIGH ((uint)0x00000000) /* IDMA3 has high req priority */ +#define RCCR_DR3QP_MED ((uint)0x00000001) /* IDMA3 has medium req priority */ +#define RCCR_DR3QP_LOW ((uint)0x00000002) /* IDMA3 has low req priority */ +#define RCCR_EIE ((uint)0x00080000) /* external interrupt enable */ +#define RCCR_SCD ((uint)0x00040000) /* scheduler configuration */ +#define RCCR_ERAM_MASK ((uint)0x0000e000) /* mask for enable RAM microcode */ +#define RCCR_ERAM_0KB ((uint)0x00000000) /* use 0KB of dpram for microcode */ +#define RCCR_ERAM_2KB ((uint)0x00002000) /* use 2KB of dpram for microcode */ +#define RCCR_ERAM_4KB ((uint)0x00004000) /* use 4KB of dpram for microcode */ +#define RCCR_ERAM_6KB ((uint)0x00006000) /* use 6KB of dpram for microcode */ +#define RCCR_ERAM_8KB ((uint)0x00008000) /* use 8KB of dpram for microcode */ +#define RCCR_ERAM_10KB ((uint)0x0000a000) /* use 10KB of dpram for microcode */ +#define RCCR_ERAM_12KB ((uint)0x0000c000) /* use 12KB of dpram for microcode */ +#define RCCR_EDM0 ((uint)0x00000800) /* DREQ0 edge detect mode */ +#define RCCR_EDM1 ((uint)0x00000400) /* DREQ1 edge detect mode */ +#define RCCR_EDM2 ((uint)0x00000200) /* DREQ2 edge detect mode */ +#define RCCR_EDM3 ((uint)0x00000100) /* DREQ3 edge detect mode */ +#define RCCR_DEM01 ((uint)0x00000008) /* DONE0/DONE1 edge detect mode */ +#define RCCR_DEM23 ((uint)0x00000004) /* DONE2/DONE3 edge detect mode */ + +/*----------------------------------------------------------------------- + * CMXFCR - CMX FCC Clock Route Register + */ +#define CMXFCR_FC1 0x40000000 /* FCC1 connection */ +#define CMXFCR_RF1CS_MSK 0x38000000 /* Receive FCC1 Clock Source Mask */ +#define CMXFCR_TF1CS_MSK 0x07000000 /* Transmit FCC1 Clock Source Mask */ +#define CMXFCR_FC2 0x00400000 /* FCC2 connection */ +#define CMXFCR_RF2CS_MSK 0x00380000 /* Receive FCC2 Clock Source Mask */ +#define CMXFCR_TF2CS_MSK 0x00070000 /* Transmit FCC2 Clock Source Mask */ +#define CMXFCR_FC3 0x00004000 /* FCC3 connection */ +#define CMXFCR_RF3CS_MSK 0x00003800 /* Receive FCC3 Clock Source Mask */ +#define CMXFCR_TF3CS_MSK 0x00000700 /* Transmit FCC3 Clock Source Mask */ + +#define CMXFCR_RF1CS_BRG5 0x00000000 /* Receive FCC1 Clock Source is BRG5 */ +#define CMXFCR_RF1CS_BRG6 0x08000000 /* Receive FCC1 Clock Source is BRG6 */ +#define CMXFCR_RF1CS_BRG7 0x10000000 /* Receive FCC1 Clock Source is BRG7 */ +#define CMXFCR_RF1CS_BRG8 0x18000000 /* Receive FCC1 Clock Source is BRG8 */ +#define CMXFCR_RF1CS_CLK9 0x20000000 /* Receive FCC1 Clock Source is CLK9 */ +#define CMXFCR_RF1CS_CLK10 0x28000000 /* Receive FCC1 Clock Source is CLK10 */ +#define CMXFCR_RF1CS_CLK11 0x30000000 /* Receive FCC1 Clock Source is CLK11 */ +#define CMXFCR_RF1CS_CLK12 0x38000000 /* Receive FCC1 Clock Source is CLK12 */ + +#define CMXFCR_TF1CS_BRG5 0x00000000 /* Transmit FCC1 Clock Source is BRG5 */ +#define CMXFCR_TF1CS_BRG6 0x01000000 /* Transmit FCC1 Clock Source is BRG6 */ +#define CMXFCR_TF1CS_BRG7 0x02000000 /* Transmit FCC1 Clock Source is BRG7 */ +#define CMXFCR_TF1CS_BRG8 0x03000000 /* Transmit FCC1 Clock Source is BRG8 */ +#define CMXFCR_TF1CS_CLK9 0x04000000 /* Transmit FCC1 Clock Source is CLK9 */ +#define CMXFCR_TF1CS_CLK10 0x05000000 /* Transmit FCC1 Clock Source is CLK10 */ +#define CMXFCR_TF1CS_CLK11 0x06000000 /* Transmit FCC1 Clock Source is CLK11 */ +#define CMXFCR_TF1CS_CLK12 0x07000000 /* Transmit FCC1 Clock Source is CLK12 */ + +#define CMXFCR_RF2CS_BRG5 0x00000000 /* Receive FCC2 Clock Source is BRG5 */ +#define CMXFCR_RF2CS_BRG6 0x00080000 /* Receive FCC2 Clock Source is BRG6 */ +#define CMXFCR_RF2CS_BRG7 0x00100000 /* Receive FCC2 Clock Source is BRG7 */ +#define CMXFCR_RF2CS_BRG8 0x00180000 /* Receive FCC2 Clock Source is BRG8 */ +#define CMXFCR_RF2CS_CLK13 0x00200000 /* Receive FCC2 Clock Source is CLK13 */ +#define CMXFCR_RF2CS_CLK14 0x00280000 /* Receive FCC2 Clock Source is CLK14 */ +#define CMXFCR_RF2CS_CLK15 0x00300000 /* Receive FCC2 Clock Source is CLK15 */ +#define CMXFCR_RF2CS_CLK16 0x00380000 /* Receive FCC2 Clock Source is CLK16 */ + +#define CMXFCR_TF2CS_BRG5 0x00000000 /* Transmit FCC2 Clock Source is BRG5 */ +#define CMXFCR_TF2CS_BRG6 0x00010000 /* Transmit FCC2 Clock Source is BRG6 */ +#define CMXFCR_TF2CS_BRG7 0x00020000 /* Transmit FCC2 Clock Source is BRG7 */ +#define CMXFCR_TF2CS_BRG8 0x00030000 /* Transmit FCC2 Clock Source is BRG8 */ +#define CMXFCR_TF2CS_CLK13 0x00040000 /* Transmit FCC2 Clock Source is CLK13 */ +#define CMXFCR_TF2CS_CLK14 0x00050000 /* Transmit FCC2 Clock Source is CLK14 */ +#define CMXFCR_TF2CS_CLK15 0x00060000 /* Transmit FCC2 Clock Source is CLK15 */ +#define CMXFCR_TF2CS_CLK16 0x00070000 /* Transmit FCC2 Clock Source is CLK16 */ + +#define CMXFCR_RF3CS_BRG5 0x00000000 /* Receive FCC3 Clock Source is BRG5 */ +#define CMXFCR_RF3CS_BRG6 0x00000800 /* Receive FCC3 Clock Source is BRG6 */ +#define CMXFCR_RF3CS_BRG7 0x00001000 /* Receive FCC3 Clock Source is BRG7 */ +#define CMXFCR_RF3CS_BRG8 0x00001800 /* Receive FCC3 Clock Source is BRG8 */ +#define CMXFCR_RF3CS_CLK13 0x00002000 /* Receive FCC3 Clock Source is CLK13 */ +#define CMXFCR_RF3CS_CLK14 0x00002800 /* Receive FCC3 Clock Source is CLK14 */ +#define CMXFCR_RF3CS_CLK15 0x00003000 /* Receive FCC3 Clock Source is CLK15 */ +#define CMXFCR_RF3CS_CLK16 0x00003800 /* Receive FCC3 Clock Source is CLK16 */ + +#define CMXFCR_TF3CS_BRG5 0x00000000 /* Transmit FCC3 Clock Source is BRG5 */ +#define CMXFCR_TF3CS_BRG6 0x00000100 /* Transmit FCC3 Clock Source is BRG6 */ +#define CMXFCR_TF3CS_BRG7 0x00000200 /* Transmit FCC3 Clock Source is BRG7 */ +#define CMXFCR_TF3CS_BRG8 0x00000300 /* Transmit FCC3 Clock Source is BRG8 */ +#define CMXFCR_TF3CS_CLK13 0x00000400 /* Transmit FCC3 Clock Source is CLK13 */ +#define CMXFCR_TF3CS_CLK14 0x00000500 /* Transmit FCC3 Clock Source is CLK14 */ +#define CMXFCR_TF3CS_CLK15 0x00000600 /* Transmit FCC3 Clock Source is CLK15 */ +#define CMXFCR_TF3CS_CLK16 0x00000700 /* Transmit FCC3 Clock Source is CLK16 */ + +/*----------------------------------------------------------------------- + * CMXSCR - CMX SCC Clock Route Register + */ +#define CMXSCR_GR1 0x80000000 /* Grant Support of SCC1 */ +#define CMXSCR_SC1 0x40000000 /* SCC1 connection */ +#define CMXSCR_RS1CS_MSK 0x38000000 /* Receive SCC1 Clock Source Mask */ +#define CMXSCR_TS1CS_MSK 0x07000000 /* Transmit SCC1 Clock Source Mask */ +#define CMXSCR_GR2 0x00800000 /* Grant Support of SCC2 */ +#define CMXSCR_SC2 0x00400000 /* SCC2 connection */ +#define CMXSCR_RS2CS_MSK 0x00380000 /* Receive SCC2 Clock Source Mask */ +#define CMXSCR_TS2CS_MSK 0x00070000 /* Transmit SCC2 Clock Source Mask */ +#define CMXSCR_GR3 0x00008000 /* Grant Support of SCC3 */ +#define CMXSCR_SC3 0x00004000 /* SCC3 connection */ +#define CMXSCR_RS3CS_MSK 0x00003800 /* Receive SCC3 Clock Source Mask */ +#define CMXSCR_TS3CS_MSK 0x00000700 /* Transmit SCC3 Clock Source Mask */ +#define CMXSCR_GR4 0x00000080 /* Grant Support of SCC4 */ +#define CMXSCR_SC4 0x00000040 /* SCC4 connection */ +#define CMXSCR_RS4CS_MSK 0x00000038 /* Receive SCC4 Clock Source Mask */ +#define CMXSCR_TS4CS_MSK 0x00000007 /* Transmit SCC4 Clock Source Mask */ + +#define CMXSCR_RS1CS_BRG1 0x00000000 /* SCC1 Rx Clock Source is BRG1 */ +#define CMXSCR_RS1CS_BRG2 0x08000000 /* SCC1 Rx Clock Source is BRG2 */ +#define CMXSCR_RS1CS_BRG3 0x10000000 /* SCC1 Rx Clock Source is BRG3 */ +#define CMXSCR_RS1CS_BRG4 0x18000000 /* SCC1 Rx Clock Source is BRG4 */ +#define CMXSCR_RS1CS_CLK11 0x20000000 /* SCC1 Rx Clock Source is CLK11 */ +#define CMXSCR_RS1CS_CLK12 0x28000000 /* SCC1 Rx Clock Source is CLK12 */ +#define CMXSCR_RS1CS_CLK3 0x30000000 /* SCC1 Rx Clock Source is CLK3 */ +#define CMXSCR_RS1CS_CLK4 0x38000000 /* SCC1 Rx Clock Source is CLK4 */ + +#define CMXSCR_TS1CS_BRG1 0x00000000 /* SCC1 Tx Clock Source is BRG1 */ +#define CMXSCR_TS1CS_BRG2 0x01000000 /* SCC1 Tx Clock Source is BRG2 */ +#define CMXSCR_TS1CS_BRG3 0x02000000 /* SCC1 Tx Clock Source is BRG3 */ +#define CMXSCR_TS1CS_BRG4 0x03000000 /* SCC1 Tx Clock Source is BRG4 */ +#define CMXSCR_TS1CS_CLK11 0x04000000 /* SCC1 Tx Clock Source is CLK11 */ +#define CMXSCR_TS1CS_CLK12 0x05000000 /* SCC1 Tx Clock Source is CLK12 */ +#define CMXSCR_TS1CS_CLK3 0x06000000 /* SCC1 Tx Clock Source is CLK3 */ +#define CMXSCR_TS1CS_CLK4 0x07000000 /* SCC1 Tx Clock Source is CLK4 */ + +#define CMXSCR_RS2CS_BRG1 0x00000000 /* SCC2 Rx Clock Source is BRG1 */ +#define CMXSCR_RS2CS_BRG2 0x00080000 /* SCC2 Rx Clock Source is BRG2 */ +#define CMXSCR_RS2CS_BRG3 0x00100000 /* SCC2 Rx Clock Source is BRG3 */ +#define CMXSCR_RS2CS_BRG4 0x00180000 /* SCC2 Rx Clock Source is BRG4 */ +#define CMXSCR_RS2CS_CLK11 0x00200000 /* SCC2 Rx Clock Source is CLK11 */ +#define CMXSCR_RS2CS_CLK12 0x00280000 /* SCC2 Rx Clock Source is CLK12 */ +#define CMXSCR_RS2CS_CLK3 0x00300000 /* SCC2 Rx Clock Source is CLK3 */ +#define CMXSCR_RS2CS_CLK4 0x00380000 /* SCC2 Rx Clock Source is CLK4 */ + +#define CMXSCR_TS2CS_BRG1 0x00000000 /* SCC2 Tx Clock Source is BRG1 */ +#define CMXSCR_TS2CS_BRG2 0x00010000 /* SCC2 Tx Clock Source is BRG2 */ +#define CMXSCR_TS2CS_BRG3 0x00020000 /* SCC2 Tx Clock Source is BRG3 */ +#define CMXSCR_TS2CS_BRG4 0x00030000 /* SCC2 Tx Clock Source is BRG4 */ +#define CMXSCR_TS2CS_CLK11 0x00040000 /* SCC2 Tx Clock Source is CLK11 */ +#define CMXSCR_TS2CS_CLK12 0x00050000 /* SCC2 Tx Clock Source is CLK12 */ +#define CMXSCR_TS2CS_CLK3 0x00060000 /* SCC2 Tx Clock Source is CLK3 */ +#define CMXSCR_TS2CS_CLK4 0x00070000 /* SCC2 Tx Clock Source is CLK4 */ + +#define CMXSCR_RS3CS_BRG1 0x00000000 /* SCC3 Rx Clock Source is BRG1 */ +#define CMXSCR_RS3CS_BRG2 0x00000800 /* SCC3 Rx Clock Source is BRG2 */ +#define CMXSCR_RS3CS_BRG3 0x00001000 /* SCC3 Rx Clock Source is BRG3 */ +#define CMXSCR_RS3CS_BRG4 0x00001800 /* SCC3 Rx Clock Source is BRG4 */ +#define CMXSCR_RS3CS_CLK5 0x00002000 /* SCC3 Rx Clock Source is CLK5 */ +#define CMXSCR_RS3CS_CLK6 0x00002800 /* SCC3 Rx Clock Source is CLK6 */ +#define CMXSCR_RS3CS_CLK7 0x00003000 /* SCC3 Rx Clock Source is CLK7 */ +#define CMXSCR_RS3CS_CLK8 0x00003800 /* SCC3 Rx Clock Source is CLK8 */ + +#define CMXSCR_TS3CS_BRG1 0x00000000 /* SCC3 Tx Clock Source is BRG1 */ +#define CMXSCR_TS3CS_BRG2 0x00000100 /* SCC3 Tx Clock Source is BRG2 */ +#define CMXSCR_TS3CS_BRG3 0x00000200 /* SCC3 Tx Clock Source is BRG3 */ +#define CMXSCR_TS3CS_BRG4 0x00000300 /* SCC3 Tx Clock Source is BRG4 */ +#define CMXSCR_TS3CS_CLK5 0x00000400 /* SCC3 Tx Clock Source is CLK5 */ +#define CMXSCR_TS3CS_CLK6 0x00000500 /* SCC3 Tx Clock Source is CLK6 */ +#define CMXSCR_TS3CS_CLK7 0x00000600 /* SCC3 Tx Clock Source is CLK7 */ +#define CMXSCR_TS3CS_CLK8 0x00000700 /* SCC3 Tx Clock Source is CLK8 */ + +#define CMXSCR_RS4CS_BRG1 0x00000000 /* SCC4 Rx Clock Source is BRG1 */ +#define CMXSCR_RS4CS_BRG2 0x00000008 /* SCC4 Rx Clock Source is BRG2 */ +#define CMXSCR_RS4CS_BRG3 0x00000010 /* SCC4 Rx Clock Source is BRG3 */ +#define CMXSCR_RS4CS_BRG4 0x00000018 /* SCC4 Rx Clock Source is BRG4 */ +#define CMXSCR_RS4CS_CLK5 0x00000020 /* SCC4 Rx Clock Source is CLK5 */ +#define CMXSCR_RS4CS_CLK6 0x00000028 /* SCC4 Rx Clock Source is CLK6 */ +#define CMXSCR_RS4CS_CLK7 0x00000030 /* SCC4 Rx Clock Source is CLK7 */ +#define CMXSCR_RS4CS_CLK8 0x00000038 /* SCC4 Rx Clock Source is CLK8 */ + +#define CMXSCR_TS4CS_BRG1 0x00000000 /* SCC4 Tx Clock Source is BRG1 */ +#define CMXSCR_TS4CS_BRG2 0x00000001 /* SCC4 Tx Clock Source is BRG2 */ +#define CMXSCR_TS4CS_BRG3 0x00000002 /* SCC4 Tx Clock Source is BRG3 */ +#define CMXSCR_TS4CS_BRG4 0x00000003 /* SCC4 Tx Clock Source is BRG4 */ +#define CMXSCR_TS4CS_CLK5 0x00000004 /* SCC4 Tx Clock Source is CLK5 */ +#define CMXSCR_TS4CS_CLK6 0x00000005 /* SCC4 Tx Clock Source is CLK6 */ +#define CMXSCR_TS4CS_CLK7 0x00000006 /* SCC4 Tx Clock Source is CLK7 */ +#define CMXSCR_TS4CS_CLK8 0x00000007 /* SCC4 Tx Clock Source is CLK8 */ + +/*----------------------------------------------------------------------- + * SIUMCR - SIU Module Configuration Register 4-31 + */ +#define SIUMCR_BBD 0x80000000 /* Bus Busy Disable */ +#define SIUMCR_ESE 0x40000000 /* External Snoop Enable */ +#define SIUMCR_PBSE 0x20000000 /* Parity Byte Select Enable */ +#define SIUMCR_CDIS 0x10000000 /* Core Disable */ +#define SIUMCR_DPPC00 0x00000000 /* Data Parity Pins Configuration*/ +#define SIUMCR_DPPC01 0x04000000 /* - " - */ +#define SIUMCR_DPPC10 0x08000000 /* - " - */ +#define SIUMCR_DPPC11 0x0c000000 /* - " - */ +#define SIUMCR_L2CPC00 0x00000000 /* L2 Cache Pins Configuration */ +#define SIUMCR_L2CPC01 0x01000000 /* - " - */ +#define SIUMCR_L2CPC10 0x02000000 /* - " - */ +#define SIUMCR_L2CPC11 0x03000000 /* - " - */ +#define SIUMCR_LBPC00 0x00000000 /* Local Bus Pins Configuration */ +#define SIUMCR_LBPC01 0x00400000 /* - " - */ +#define SIUMCR_LBPC10 0x00800000 /* - " - */ +#define SIUMCR_LBPC11 0x00c00000 /* - " - */ +#define SIUMCR_APPC00 0x00000000 /* Address Parity Pins Configuration*/ +#define SIUMCR_APPC01 0x00100000 /* - " - */ +#define SIUMCR_APPC10 0x00200000 /* - " - */ +#define SIUMCR_APPC11 0x00300000 /* - " - */ +#define SIUMCR_CS10PC00 0x00000000 /* CS10 Pin Configuration */ +#define SIUMCR_CS10PC01 0x00040000 /* - " - */ +#define SIUMCR_CS10PC10 0x00080000 /* - " - */ +#define SIUMCR_CS10PC11 0x000c0000 /* - " - */ +#define SIUMCR_BCTLC00 0x00000000 /* Buffer Control Configuration */ +#define SIUMCR_BCTLC01 0x00010000 /* - " - */ +#define SIUMCR_BCTLC10 0x00020000 /* - " - */ +#define SIUMCR_BCTLC11 0x00030000 /* - " - */ +#define SIUMCR_MMR00 0x00000000 /* Mask Masters Requests */ +#define SIUMCR_MMR01 0x00004000 /* - " - */ +#define SIUMCR_MMR10 0x00008000 /* - " - */ +#define SIUMCR_MMR11 0x0000c000 /* - " - */ +#define SIUMCR_LPBSE 0x00002000 /* LocalBus Parity Byte Select Enable*/ + +/*----------------------------------------------------------------------- + * SCCR - System Clock Control Register 9-8 +*/ +#define SCCR_PCI_MODE 0x00000100 /* PCI Mode */ +#define SCCR_PCI_MODCK 0x00000080 /* Value of PCI_MODCK pin */ +#define SCCR_PCIDF_MSK 0x00000078 /* PCI division factor */ +#define SCCR_PCIDF_SHIFT 3 + +#ifndef CPM_IMMR_OFFSET +#define CPM_IMMR_OFFSET 0x101a8 +#endif + +#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */ + +/* FCC iop & clock configuration. BSP code is responsible to define Fx_RXCLK & Fx_TXCLK + * in order to use clock-computing stuff below for the FCC x + */ + +/* Automatically generates register configurations */ +#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */ + +#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */ +#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */ +#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */ +#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */ +#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */ +#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */ + +#define PC_F1RXCLK PC_CLK(F1_RXCLK) +#define PC_F1TXCLK PC_CLK(F1_TXCLK) +#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK)) +#define CMX1_CLK_MASK ((uint)0xff000000) + +#define PC_F2RXCLK PC_CLK(F2_RXCLK) +#define PC_F2TXCLK PC_CLK(F2_TXCLK) +#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK)) +#define CMX2_CLK_MASK ((uint)0x00ff0000) + +#define PC_F3RXCLK PC_CLK(F3_RXCLK) +#define PC_F3TXCLK PC_CLK(F3_TXCLK) +#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK)) +#define CMX3_CLK_MASK ((uint)0x0000ff00) + +#define CPMUX_CLK_MASK (CMX3_CLK_MASK | CMX2_CLK_MASK) +#define CPMUX_CLK_ROUTE (CMX3_CLK_ROUTE | CMX2_CLK_ROUTE) + +#define CLK_TRX (PC_F3TXCLK | PC_F3RXCLK | PC_F2TXCLK | PC_F2RXCLK) + +/* I/O Pin assignment for FCC1. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PA1_COL 0x00000001U +#define PA1_CRS 0x00000002U +#define PA1_TXER 0x00000004U +#define PA1_TXEN 0x00000008U +#define PA1_RXDV 0x00000010U +#define PA1_RXER 0x00000020U +#define PA1_TXDAT 0x00003c00U +#define PA1_RXDAT 0x0003c000U +#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT) +#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \ + PA1_RXDV | PA1_RXER) +#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV) +#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER) + + +/* I/O Pin assignment for FCC2. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PB2_TXER 0x00000001U +#define PB2_RXDV 0x00000002U +#define PB2_TXEN 0x00000004U +#define PB2_RXER 0x00000008U +#define PB2_COL 0x00000010U +#define PB2_CRS 0x00000020U +#define PB2_TXDAT 0x000003c0U +#define PB2_RXDAT 0x00003c00U +#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \ + PB2_RXER | PB2_RXDV | PB2_TXER) +#define PB2_PSORB1 (PB2_TXEN) +#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV) +#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER) + + +/* I/O Pin assignment for FCC3. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PB3_RXDV 0x00004000U +#define PB3_RXER 0x00008000U +#define PB3_TXER 0x00010000U +#define PB3_TXEN 0x00020000U +#define PB3_COL 0x00040000U +#define PB3_CRS 0x00080000U +#define PB3_TXDAT 0x0f000000U +#define PC3_TXDAT 0x00000010U +#define PB3_RXDAT 0x00f00000U +#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \ + PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN) +#define PB3_PSORB1 0 +#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV) +#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER) +#define PC3_DIRC1 (PC3_TXDAT) + +/* Handy macro to specify mem for FCCs*/ +#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128)) +#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0) +#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1) +#define FCC3_MEM_OFFSET FCC_MEM_OFFSET(2) + +/* Clocks and GRG's */ + +enum cpm_clk_dir { + CPM_CLK_RX, + CPM_CLK_TX, + CPM_CLK_RTX +}; + +enum cpm_clk_target { + CPM_CLK_SCC1, + CPM_CLK_SCC2, + CPM_CLK_SCC3, + CPM_CLK_SCC4, + CPM_CLK_FCC1, + CPM_CLK_FCC2, + CPM_CLK_FCC3, + CPM_CLK_SMC1, + CPM_CLK_SMC2, +}; + +enum cpm_clk { + CPM_CLK_NONE = 0, + CPM_BRG1, /* Baud Rate Generator 1 */ + CPM_BRG2, /* Baud Rate Generator 2 */ + CPM_BRG3, /* Baud Rate Generator 3 */ + CPM_BRG4, /* Baud Rate Generator 4 */ + CPM_BRG5, /* Baud Rate Generator 5 */ + CPM_BRG6, /* Baud Rate Generator 6 */ + CPM_BRG7, /* Baud Rate Generator 7 */ + CPM_BRG8, /* Baud Rate Generator 8 */ + CPM_CLK1, /* Clock 1 */ + CPM_CLK2, /* Clock 2 */ + CPM_CLK3, /* Clock 3 */ + CPM_CLK4, /* Clock 4 */ + CPM_CLK5, /* Clock 5 */ + CPM_CLK6, /* Clock 6 */ + CPM_CLK7, /* Clock 7 */ + CPM_CLK8, /* Clock 8 */ + CPM_CLK9, /* Clock 9 */ + CPM_CLK10, /* Clock 10 */ + CPM_CLK11, /* Clock 11 */ + CPM_CLK12, /* Clock 12 */ + CPM_CLK13, /* Clock 13 */ + CPM_CLK14, /* Clock 14 */ + CPM_CLK15, /* Clock 15 */ + CPM_CLK16, /* Clock 16 */ + CPM_CLK17, /* Clock 17 */ + CPM_CLK18, /* Clock 18 */ + CPM_CLK19, /* Clock 19 */ + CPM_CLK20, /* Clock 20 */ + CPM_CLK_DUMMY +}; + +extern int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode); +extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock); + +#define CPM_PIN_INPUT 0 +#define CPM_PIN_OUTPUT 1 +#define CPM_PIN_PRIMARY 0 +#define CPM_PIN_SECONDARY 2 +#define CPM_PIN_GPIO 4 +#define CPM_PIN_OPENDRAIN 8 + +void cpm2_set_pin(int port, int pin, int flags); + +#endif /* __CPM2__ */ +#endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h new file mode 100644 index 00000000000..ad55a1ccb9f --- /dev/null +++ b/arch/powerpc/include/asm/cputable.h @@ -0,0 +1,552 @@ +#ifndef __ASM_POWERPC_CPUTABLE_H +#define __ASM_POWERPC_CPUTABLE_H + +#define PPC_FEATURE_32 0x80000000 +#define PPC_FEATURE_64 0x40000000 +#define PPC_FEATURE_601_INSTR 0x20000000 +#define PPC_FEATURE_HAS_ALTIVEC 0x10000000 +#define PPC_FEATURE_HAS_FPU 0x08000000 +#define PPC_FEATURE_HAS_MMU 0x04000000 +#define PPC_FEATURE_HAS_4xxMAC 0x02000000 +#define PPC_FEATURE_UNIFIED_CACHE 0x01000000 +#define PPC_FEATURE_HAS_SPE 0x00800000 +#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000 +#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000 +#define PPC_FEATURE_NO_TB 0x00100000 +#define PPC_FEATURE_POWER4 0x00080000 +#define PPC_FEATURE_POWER5 0x00040000 +#define PPC_FEATURE_POWER5_PLUS 0x00020000 +#define PPC_FEATURE_CELL 0x00010000 +#define PPC_FEATURE_BOOKE 0x00008000 +#define PPC_FEATURE_SMT 0x00004000 +#define PPC_FEATURE_ICACHE_SNOOP 0x00002000 +#define PPC_FEATURE_ARCH_2_05 0x00001000 +#define PPC_FEATURE_PA6T 0x00000800 +#define PPC_FEATURE_HAS_DFP 0x00000400 +#define PPC_FEATURE_POWER6_EXT 0x00000200 +#define PPC_FEATURE_ARCH_2_06 0x00000100 +#define PPC_FEATURE_HAS_VSX 0x00000080 + +#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ + 0x00000040 + +#define PPC_FEATURE_TRUE_LE 0x00000002 +#define PPC_FEATURE_PPC_LE 0x00000001 + +#ifdef __KERNEL__ + +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> + +#ifndef __ASSEMBLY__ + +/* This structure can grow, it's real size is used by head.S code + * via the mkdefs mechanism. + */ +struct cpu_spec; + +typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); +typedef void (*cpu_restore_t)(void); + +enum powerpc_oprofile_type { + PPC_OPROFILE_INVALID = 0, + PPC_OPROFILE_RS64 = 1, + PPC_OPROFILE_POWER4 = 2, + PPC_OPROFILE_G4 = 3, + PPC_OPROFILE_FSL_EMB = 4, + PPC_OPROFILE_CELL = 5, + PPC_OPROFILE_PA6T = 6, +}; + +enum powerpc_pmc_type { + PPC_PMC_DEFAULT = 0, + PPC_PMC_IBM = 1, + PPC_PMC_PA6T = 2, + PPC_PMC_G4 = 3, +}; + +struct pt_regs; + +extern int machine_check_generic(struct pt_regs *regs); +extern int machine_check_4xx(struct pt_regs *regs); +extern int machine_check_440A(struct pt_regs *regs); +extern int machine_check_e500mc(struct pt_regs *regs); +extern int machine_check_e500(struct pt_regs *regs); +extern int machine_check_e200(struct pt_regs *regs); +extern int machine_check_47x(struct pt_regs *regs); + +/* NOTE WELL: Update identify_cpu() if fields are added or removed! */ +struct cpu_spec { + /* CPU is matched via (PVR & pvr_mask) == pvr_value */ + unsigned int pvr_mask; + unsigned int pvr_value; + + char *cpu_name; + unsigned long cpu_features; /* Kernel features */ + unsigned int cpu_user_features; /* Userland features */ + unsigned int mmu_features; /* MMU features */ + + /* cache line sizes */ + unsigned int icache_bsize; + unsigned int dcache_bsize; + + /* number of performance monitor counters */ + unsigned int num_pmcs; + enum powerpc_pmc_type pmc_type; + + /* this is called to initialize various CPU bits like L1 cache, + * BHT, SPD, etc... from head.S before branching to identify_machine + */ + cpu_setup_t cpu_setup; + /* Used to restore cpu setup on secondary processors and at resume */ + cpu_restore_t cpu_restore; + + /* Used by oprofile userspace to select the right counters */ + char *oprofile_cpu_type; + + /* Processor specific oprofile operations */ + enum powerpc_oprofile_type oprofile_type; + + /* Bit locations inside the mmcra change */ + unsigned long oprofile_mmcra_sihv; + unsigned long oprofile_mmcra_sipr; + + /* Bits to clear during an oprofile exception */ + unsigned long oprofile_mmcra_clear; + + /* Name of processor class, for the ELF AT_PLATFORM entry */ + char *platform; + + /* Processor specific machine check handling. Return negative + * if the error is fatal, 1 if it was fully recovered and 0 to + * pass up (not CPU originated) */ + int (*machine_check)(struct pt_regs *regs); +}; + +extern struct cpu_spec *cur_cpu_spec; + +extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; + +extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr); +extern void do_feature_fixups(unsigned long value, void *fixup_start, + void *fixup_end); + +extern const char *powerpc_base_platform; + +#endif /* __ASSEMBLY__ */ + +/* CPU kernel features */ + +/* Retain the 32b definitions all use bottom half of word */ +#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000000000000001) +#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002) +#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004) +#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008) +#define CPU_FTR_TAU ASM_CONST(0x0000000000000010) +#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020) +#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) +#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) +#define CPU_FTR_601 ASM_CONST(0x0000000000000100) +#define CPU_FTR_DBELL ASM_CONST(0x0000000000000200) +#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) +#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) +#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) +#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) +#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) +#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) +#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000) +#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) +#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) +#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000) +#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) +#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) +#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) +#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000) +#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) +#define CPU_FTR_SPE ASM_CONST(0x0000000002000000) +#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) +#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) +#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) + +/* + * Add the 64-bit processor unique features in the top half of the word; + * on 32-bit, make the names available but defined to be 0. + */ +#ifdef __powerpc64__ +#define LONG_ASM_CONST(x) ASM_CONST(x) +#else +#define LONG_ASM_CONST(x) 0 +#endif + +#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000) +#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000) +#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000) +#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) +#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) +#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) +#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) +#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000) +#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) +#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) +#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) +#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) +#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) +#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) +#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) +#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) +#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) +#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000) +#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000) +#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000) +#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000) +#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000) +#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x2000000000000000) + +#ifndef __ASSEMBLY__ + +#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) + +#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \ + MMU_FTR_16M_PAGE) + +/* We only set the altivec features if the kernel was compiled with altivec + * support + */ +#ifdef CONFIG_ALTIVEC +#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC +#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC +#else +#define CPU_FTR_ALTIVEC_COMP 0 +#define PPC_FEATURE_HAS_ALTIVEC_COMP 0 +#endif + +/* We only set the VSX features if the kernel was compiled with VSX + * support + */ +#ifdef CONFIG_VSX +#define CPU_FTR_VSX_COMP CPU_FTR_VSX +#define PPC_FEATURE_HAS_VSX_COMP PPC_FEATURE_HAS_VSX +#else +#define CPU_FTR_VSX_COMP 0 +#define PPC_FEATURE_HAS_VSX_COMP 0 +#endif + +/* We only set the spe features if the kernel was compiled with spe + * support + */ +#ifdef CONFIG_SPE +#define CPU_FTR_SPE_COMP CPU_FTR_SPE +#define PPC_FEATURE_HAS_SPE_COMP PPC_FEATURE_HAS_SPE +#define PPC_FEATURE_HAS_EFP_SINGLE_COMP PPC_FEATURE_HAS_EFP_SINGLE +#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP PPC_FEATURE_HAS_EFP_DOUBLE +#else +#define CPU_FTR_SPE_COMP 0 +#define PPC_FEATURE_HAS_SPE_COMP 0 +#define PPC_FEATURE_HAS_EFP_SINGLE_COMP 0 +#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0 +#endif + +/* We need to mark all pages as being coherent if we're SMP or we have a + * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II + * require it for PCI "streaming/prefetch" to work properly. + * This is also required by 52xx family. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ + || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \ + || defined(CONFIG_PPC_MPC52xx) +#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT +#else +#define CPU_FTR_COMMON 0 +#endif + +/* The powersave features NAP & DOZE seems to confuse BDI when + debugging. So if a BDI is used, disable theses + */ +#ifndef CONFIG_BDI_SWITCH +#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE +#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP +#else +#define CPU_FTR_MAYBE_CAN_DOZE 0 +#define CPU_FTR_MAYBE_CAN_NAP 0 +#endif + +#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \ + !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ + !defined(CONFIG_BOOKE)) + +#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \ + CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE) +#define CPU_FTRS_603 (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) +#define CPU_FTRS_604 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | CPU_FTR_PPC_LE) +#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) +#define CPU_FTRS_740 (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_PPC_LE) +#define CPU_FTRS_750 (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_PPC_LE) +#define CPU_FTRS_750CL (CPU_FTRS_750) +#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) +#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM) +#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX) +#define CPU_FTRS_750GX (CPU_FTRS_750FX) +#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) +#define CPU_FTRS_7400 (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) +#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) +#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ + CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ + CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) +#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ + CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) +#define CPU_FTRS_7455 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \ + CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_7447 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_7447A (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_7448 (CPU_FTR_COMMON | \ + CPU_FTR_USE_TB | \ + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ + CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) +#define CPU_FTRS_82XX (CPU_FTR_COMMON | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) +#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP) +#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_COMMON) +#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) +#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB) +#define CPU_FTRS_8XX (CPU_FTR_USE_TB) +#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ + CPU_FTR_INDEXED_DCR) +#define CPU_FTRS_47X (CPU_FTRS_440x6) +#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ + CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_NOEXECUTE) +#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_DBELL) +#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_DEBUG_LVL_EXC) +#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) + +/* 64-bit CPUs */ +#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ + CPU_FTR_IABR | CPU_FTR_PPC_LE) +#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ + CPU_FTR_IABR | \ + CPU_FTR_MMCRA | CPU_FTR_CTRL) +#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ + CPU_FTR_STCX_CHECKS_ADDRESS) +#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ + CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ + CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ + CPU_FTR_HVMODE) +#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) +#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ + CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) +#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ + CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ + CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY) +#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ + CPU_FTR_UNALIGNED_LD_STD) +#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_PURR | CPU_FTR_REAL_LE) +#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) + +#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ + CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) + +#ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2) +#else +#define CPU_FTRS_POSSIBLE \ + (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ + CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ + CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ + CPU_FTR_VSX) +#endif +#else +enum { + CPU_FTRS_POSSIBLE = +#if CLASSIC_PPC + CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU | + CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 | + CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX | + CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 | + CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 | + CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 | + CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | + CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | + CPU_FTRS_CLASSIC32 | +#else + CPU_FTRS_GENERIC_32 | +#endif +#ifdef CONFIG_8xx + CPU_FTRS_8XX | +#endif +#ifdef CONFIG_40x + CPU_FTRS_40X | +#endif +#ifdef CONFIG_44x + CPU_FTRS_44X | CPU_FTRS_440x6 | +#endif +#ifdef CONFIG_PPC_47x + CPU_FTRS_47X | CPU_FTR_476_DD2 | +#endif +#ifdef CONFIG_E200 + CPU_FTRS_E200 | +#endif +#ifdef CONFIG_E500 + CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | + CPU_FTRS_E5500 | +#endif + 0, +}; +#endif /* __powerpc64__ */ + +#ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2) +#else +#define CPU_FTRS_ALWAYS \ + (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ + CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ + CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) +#endif +#else +enum { + CPU_FTRS_ALWAYS = +#if CLASSIC_PPC + CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU & + CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 & + CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX & + CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 & + CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 & + CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 & + CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & + CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & + CPU_FTRS_CLASSIC32 & +#else + CPU_FTRS_GENERIC_32 & +#endif +#ifdef CONFIG_8xx + CPU_FTRS_8XX & +#endif +#ifdef CONFIG_40x + CPU_FTRS_40X & +#endif +#ifdef CONFIG_44x + CPU_FTRS_44X & CPU_FTRS_440x6 & +#endif +#ifdef CONFIG_E200 + CPU_FTRS_E200 & +#endif +#ifdef CONFIG_E500 + CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & + CPU_FTRS_E5500 & +#endif + CPU_FTRS_POSSIBLE, +}; +#endif /* __powerpc64__ */ + +static inline int cpu_has_feature(unsigned long feature) +{ + return (CPU_FTRS_ALWAYS & feature) || + (CPU_FTRS_POSSIBLE + & cur_cpu_spec->cpu_features + & feature); +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +#define HBP_NUM 1 +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_POWERPC_CPUTABLE_H */ diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h new file mode 100644 index 00000000000..ce516e5eb0d --- /dev/null +++ b/arch/powerpc/include/asm/cputhreads.h @@ -0,0 +1,90 @@ +#ifndef _ASM_POWERPC_CPUTHREADS_H +#define _ASM_POWERPC_CPUTHREADS_H + +#include <linux/cpumask.h> + +/* + * Mapping of threads to cores + * + * Note: This implementation is limited to a power of 2 number of + * threads per core and the same number for each core in the system + * (though it would work if some processors had less threads as long + * as the CPU numbers are still allocated, just not brought offline). + * + * However, the API allows for a different implementation in the future + * if needed, as long as you only use the functions and not the variables + * directly. + */ + +#ifdef CONFIG_SMP +extern int threads_per_core; +extern int threads_shift; +extern cpumask_t threads_core_mask; +#else +#define threads_per_core 1 +#define threads_shift 0 +#define threads_core_mask (CPU_MASK_CPU0) +#endif + +/* cpu_thread_mask_to_cores - Return a cpumask of one per cores + * hit by the argument + * + * @threads: a cpumask of threads + * + * This function returns a cpumask which will have one "cpu" (or thread) + * bit set for each core that has at least one thread set in the argument. + * + * This can typically be used for things like IPI for tlb invalidations + * since those need to be done only once per core/TLB + */ +static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) +{ + cpumask_t tmp, res; + int i; + + cpumask_clear(&res); + for (i = 0; i < NR_CPUS; i += threads_per_core) { + cpumask_shift_left(&tmp, &threads_core_mask, i); + if (cpumask_intersects(threads, &tmp)) + cpumask_set_cpu(i, &res); + } + return res; +} + +static inline int cpu_nr_cores(void) +{ + return NR_CPUS >> threads_shift; +} + +static inline cpumask_t cpu_online_cores_map(void) +{ + return cpu_thread_mask_to_cores(cpu_online_mask); +} + +#ifdef CONFIG_SMP +int cpu_core_index_of_thread(int cpu); +int cpu_first_thread_of_core(int core); +#else +static inline int cpu_core_index_of_thread(int cpu) { return cpu; } +static inline int cpu_first_thread_of_core(int core) { return core; } +#endif + +static inline int cpu_thread_in_core(int cpu) +{ + return cpu & (threads_per_core - 1); +} + +static inline int cpu_first_thread_sibling(int cpu) +{ + return cpu & ~(threads_per_core - 1); +} + +static inline int cpu_last_thread_sibling(int cpu) +{ + return cpu | (threads_per_core - 1); +} + + + +#endif /* _ASM_POWERPC_CPUTHREADS_H */ + diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h new file mode 100644 index 00000000000..487d46ff68a --- /dev/null +++ b/arch/powerpc/include/asm/cputime.h @@ -0,0 +1,233 @@ +/* + * Definitions for measuring cputime on powerpc machines. + * + * Copyright (C) 2006 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in + * the same units as the timebase. Otherwise we measure cpu time + * in jiffies using the generic definitions. + */ + +#ifndef __POWERPC_CPUTIME_H +#define __POWERPC_CPUTIME_H + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#include <asm-generic/cputime.h> +#ifdef __KERNEL__ +static inline void setup_cputime_one_jiffy(void) { } +#endif +#else + +#include <linux/types.h> +#include <linux/time.h> +#include <asm/div64.h> +#include <asm/time.h> +#include <asm/param.h> + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#ifdef __KERNEL__ + +/* + * One jiffy in timebase units computed during initialization + */ +extern cputime_t cputime_one_jiffy; + +/* + * Convert cputime <-> jiffies + */ +extern u64 __cputime_jiffies_factor; +DECLARE_PER_CPU(unsigned long, cputime_last_delta); +DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); + +static inline unsigned long cputime_to_jiffies(const cputime_t ct) +{ + return mulhdu((__force u64) ct, __cputime_jiffies_factor); +} + +/* Estimate the scaled cputime by scaling the real cputime based on + * the last scaled to real ratio */ +static inline cputime_t cputime_to_scaled(const cputime_t ct) +{ + if (cpu_has_feature(CPU_FTR_SPURR) && + __get_cpu_var(cputime_last_delta)) + return (__force u64) ct * + __get_cpu_var(cputime_scaled_last_delta) / + __get_cpu_var(cputime_last_delta); + return ct; +} + +static inline cputime_t jiffies_to_cputime(const unsigned long jif) +{ + u64 ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = jif % HZ; + sec = jif / HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, HZ); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return (__force cputime_t) ct; +} + +static inline void setup_cputime_one_jiffy(void) +{ + cputime_one_jiffy = jiffies_to_cputime(1); +} + +static inline cputime64_t jiffies64_to_cputime64(const u64 jif) +{ + u64 ct; + u64 sec; + + /* have to be a little careful about overflow */ + ct = jif % HZ; + sec = jif / HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, HZ); + } + if (sec) + ct += (u64) sec * tb_ticks_per_sec; + return (__force cputime64_t) ct; +} + +static inline u64 cputime64_to_jiffies64(const cputime_t ct) +{ + return mulhdu((__force u64) ct, __cputime_jiffies_factor); +} + +/* + * Convert cputime <-> microseconds + */ +extern u64 __cputime_usec_factor; + +static inline unsigned long cputime_to_usecs(const cputime_t ct) +{ + return mulhdu((__force u64) ct, __cputime_usec_factor); +} + +static inline cputime_t usecs_to_cputime(const unsigned long us) +{ + u64 ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = us % 1000000; + sec = us / 1000000; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, 1000000); + } + if (sec) + ct += (cputime_t) sec * tb_ticks_per_sec; + return (__force cputime_t) ct; +} + +#define usecs_to_cputime64(us) usecs_to_cputime(us) + +/* + * Convert cputime <-> seconds + */ +extern u64 __cputime_sec_factor; + +static inline unsigned long cputime_to_secs(const cputime_t ct) +{ + return mulhdu((__force u64) ct, __cputime_sec_factor); +} + +static inline cputime_t secs_to_cputime(const unsigned long sec) +{ + return (__force cputime_t)((u64) sec * tb_ticks_per_sec); +} + +/* + * Convert cputime <-> timespec + */ +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) +{ + u64 x = (__force u64) ct; + unsigned int frac; + + frac = do_div(x, tb_ticks_per_sec); + p->tv_sec = x; + x = (u64) frac * 1000000000; + do_div(x, tb_ticks_per_sec); + p->tv_nsec = x; +} + +static inline cputime_t timespec_to_cputime(const struct timespec *p) +{ + u64 ct; + + ct = (u64) p->tv_nsec * tb_ticks_per_sec; + do_div(ct, 1000000000); + return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); +} + +/* + * Convert cputime <-> timeval + */ +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) +{ + u64 x = (__force u64) ct; + unsigned int frac; + + frac = do_div(x, tb_ticks_per_sec); + p->tv_sec = x; + x = (u64) frac * 1000000; + do_div(x, tb_ticks_per_sec); + p->tv_usec = x; +} + +static inline cputime_t timeval_to_cputime(const struct timeval *p) +{ + u64 ct; + + ct = (u64) p->tv_usec * tb_ticks_per_sec; + do_div(ct, 1000000); + return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); +} + +/* + * Convert cputime <-> clock_t (units of 1/USER_HZ seconds) + */ +extern u64 __cputime_clockt_factor; + +static inline unsigned long cputime_to_clock_t(const cputime_t ct) +{ + return mulhdu((__force u64) ct, __cputime_clockt_factor); +} + +static inline cputime_t clock_t_to_cputime(const unsigned long clk) +{ + u64 ct; + unsigned long sec; + + /* have to be a little careful about overflow */ + ct = clk % USER_HZ; + sec = clk / USER_HZ; + if (ct) { + ct *= tb_ticks_per_sec; + do_div(ct, USER_HZ); + } + if (sec) + ct += (u64) sec * tb_ticks_per_sec; + return (__force cputime_t) ct; +} + +#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) + +#endif /* __KERNEL__ */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* __POWERPC_CPUTIME_H */ diff --git a/arch/powerpc/include/asm/current.h b/arch/powerpc/include/asm/current.h new file mode 100644 index 00000000000..e2c7f06931e --- /dev/null +++ b/arch/powerpc/include/asm/current.h @@ -0,0 +1,40 @@ +#ifndef _ASM_POWERPC_CURRENT_H +#define _ASM_POWERPC_CURRENT_H +#ifdef __KERNEL__ + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +struct task_struct; + +#ifdef __powerpc64__ +#include <linux/stddef.h> +#include <asm/paca.h> + +static inline struct task_struct *get_current(void) +{ + struct task_struct *task; + + __asm__ __volatile__("ld %0,%1(13)" + : "=r" (task) + : "i" (offsetof(struct paca_struct, __current))); + + return task; +} +#define current get_current() + +#else + +/* + * We keep `current' in r2 for speed. + */ +register struct task_struct *current asm ("r2"); + +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_CURRENT_H */ diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h new file mode 100644 index 00000000000..e23f07e73cb --- /dev/null +++ b/arch/powerpc/include/asm/dbdma.h @@ -0,0 +1,108 @@ +/* + * Definitions for using the Apple Descriptor-Based DMA controller + * in Power Macintosh computers. + * + * Copyright (C) 1996 Paul Mackerras. + */ + +#ifdef __KERNEL__ +#ifndef _ASM_DBDMA_H_ +#define _ASM_DBDMA_H_ +/* + * DBDMA control/status registers. All little-endian. + */ +struct dbdma_regs { + unsigned int control; /* lets you change bits in status */ + unsigned int status; /* DMA and device status bits (see below) */ + unsigned int cmdptr_hi; /* upper 32 bits of command address */ + unsigned int cmdptr; /* (lower 32 bits of) command address (phys) */ + unsigned int intr_sel; /* select interrupt condition bit */ + unsigned int br_sel; /* select branch condition bit */ + unsigned int wait_sel; /* select wait condition bit */ + unsigned int xfer_mode; + unsigned int data2ptr_hi; + unsigned int data2ptr; + unsigned int res1; + unsigned int address_hi; + unsigned int br_addr_hi; + unsigned int res2[3]; +}; + +/* Bits in control and status registers */ +#define RUN 0x8000 +#define PAUSE 0x4000 +#define FLUSH 0x2000 +#define WAKE 0x1000 +#define DEAD 0x0800 +#define ACTIVE 0x0400 +#define BT 0x0100 +#define DEVSTAT 0x00ff + +/* + * DBDMA command structure. These fields are all little-endian! + */ +struct dbdma_cmd { + unsigned short req_count; /* requested byte transfer count */ + unsigned short command; /* command word (has bit-fields) */ + unsigned int phy_addr; /* physical data address */ + unsigned int cmd_dep; /* command-dependent field */ + unsigned short res_count; /* residual count after completion */ + unsigned short xfer_status; /* transfer status */ +}; + +/* DBDMA command values in command field */ +#define OUTPUT_MORE 0 /* transfer memory data to stream */ +#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */ +#define INPUT_MORE 0x2000 /* transfer stream data to memory */ +#define INPUT_LAST 0x3000 /* ditto, expect end marker */ +#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */ +#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */ +#define DBDMA_NOP 0x6000 /* do nothing */ +#define DBDMA_STOP 0x7000 /* suspend processing */ + +/* Key values in command field */ +#define KEY_STREAM0 0 /* usual data stream */ +#define KEY_STREAM1 0x100 /* control/status stream */ +#define KEY_STREAM2 0x200 /* device-dependent stream */ +#define KEY_STREAM3 0x300 /* device-dependent stream */ +#define KEY_REGS 0x500 /* device register space */ +#define KEY_SYSTEM 0x600 /* system memory-mapped space */ +#define KEY_DEVICE 0x700 /* device memory-mapped space */ + +/* Interrupt control values in command field */ +#define INTR_NEVER 0 /* don't interrupt */ +#define INTR_IFSET 0x10 /* intr if condition bit is 1 */ +#define INTR_IFCLR 0x20 /* intr if condition bit is 0 */ +#define INTR_ALWAYS 0x30 /* always interrupt */ + +/* Branch control values in command field */ +#define BR_NEVER 0 /* don't branch */ +#define BR_IFSET 0x4 /* branch if condition bit is 1 */ +#define BR_IFCLR 0x8 /* branch if condition bit is 0 */ +#define BR_ALWAYS 0xc /* always branch */ + +/* Wait control values in command field */ +#define WAIT_NEVER 0 /* don't wait */ +#define WAIT_IFSET 1 /* wait if condition bit is 1 */ +#define WAIT_IFCLR 2 /* wait if condition bit is 0 */ +#define WAIT_ALWAYS 3 /* always wait */ + +/* Align an address for a DBDMA command structure */ +#define DBDMA_ALIGN(x) (((unsigned long)(x) + sizeof(struct dbdma_cmd) - 1) \ + & -sizeof(struct dbdma_cmd)) + +/* Useful macros */ +#define DBDMA_DO_STOP(regs) do { \ + out_le32(&((regs)->control), (RUN|FLUSH)<<16); \ + while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH)) \ + ; \ +} while(0) + +#define DBDMA_DO_RESET(regs) do { \ + out_le32(&((regs)->control), (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);\ + while(in_le32(&((regs)->status)) & (RUN)) \ + ; \ +} while(0) + +#endif /* _ASM_DBDMA_H_ */ +#endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h new file mode 100644 index 00000000000..efa74ac44a3 --- /dev/null +++ b/arch/powerpc/include/asm/dbell.h @@ -0,0 +1,42 @@ +/* + * Copyright 2009 Freescale Semicondutor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * provides masks and opcode images for use by code generation, emulation + * and for instructions that older assemblers might not know about + */ +#ifndef _ASM_POWERPC_DBELL_H +#define _ASM_POWERPC_DBELL_H + +#include <linux/smp.h> +#include <linux/threads.h> + +#include <asm/ppc-opcode.h> + +#define PPC_DBELL_MSG_BRDCAST (0x04000000) +#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) +enum ppc_dbell { + PPC_DBELL = 0, /* doorbell */ + PPC_DBELL_CRIT = 1, /* critical doorbell */ + PPC_G_DBELL = 2, /* guest doorbell */ + PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */ + PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ +}; + +extern void doorbell_cause_ipi(int cpu, unsigned long data); +extern void doorbell_exception(struct pt_regs *regs); +extern void doorbell_setup_this_cpu(void); + +static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) +{ + u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) | + (tag & 0x07ffffff); + + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); +} + +#endif /* _ASM_POWERPC_DBELL_H */ diff --git a/arch/powerpc/include/asm/dcr-generic.h b/arch/powerpc/include/asm/dcr-generic.h new file mode 100644 index 00000000000..35b71599ec4 --- /dev/null +++ b/arch/powerpc/include/asm/dcr-generic.h @@ -0,0 +1,49 @@ +/* + * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_POWERPC_DCR_GENERIC_H +#define _ASM_POWERPC_DCR_GENERIC_H +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID}; + +typedef struct { + enum host_type_t type; + union { + dcr_host_mmio_t mmio; + dcr_host_native_t native; + } host; +} dcr_host_t; + +extern bool dcr_map_ok_generic(dcr_host_t host); + +extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n, + unsigned int dcr_c); +extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c); + +extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n); + +extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value); + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_DCR_GENERIC_H */ + + diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h new file mode 100644 index 00000000000..acd491dbd45 --- /dev/null +++ b/arch/powerpc/include/asm/dcr-mmio.h @@ -0,0 +1,61 @@ +/* + * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_POWERPC_DCR_MMIO_H +#define _ASM_POWERPC_DCR_MMIO_H +#ifdef __KERNEL__ + +#include <asm/io.h> + +typedef struct { + void __iomem *token; + unsigned int stride; + unsigned int base; +} dcr_host_mmio_t; + +static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host) +{ + return host.token != NULL; +} + +extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, + unsigned int dcr_n, + unsigned int dcr_c); +extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c); + +static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n) +{ + return in_be32(host.token + ((host.base + dcr_n) * host.stride)); +} + +static inline void dcr_write_mmio(dcr_host_mmio_t host, + unsigned int dcr_n, + u32 value) +{ + out_be32(host.token + ((host.base + dcr_n) * host.stride), value); +} + +extern u64 of_translate_dcr_address(struct device_node *dev, + unsigned int dcr_n, + unsigned int *stride); + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_DCR_MMIO_H */ + + diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h new file mode 100644 index 00000000000..7d2e6235726 --- /dev/null +++ b/arch/powerpc/include/asm/dcr-native.h @@ -0,0 +1,155 @@ +/* + * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_POWERPC_DCR_NATIVE_H +#define _ASM_POWERPC_DCR_NATIVE_H +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include <linux/spinlock.h> +#include <asm/cputable.h> + +typedef struct { + unsigned int base; +} dcr_host_native_t; + +static inline bool dcr_map_ok_native(dcr_host_native_t host) +{ + return 1; +} + +#define dcr_map_native(dev, dcr_n, dcr_c) \ + ((dcr_host_native_t){ .base = (dcr_n) }) +#define dcr_unmap_native(host, dcr_c) do {} while (0) +#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) +#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) + +/* Table based DCR accessors */ +extern void __mtdcr(unsigned int reg, unsigned int val); +extern unsigned int __mfdcr(unsigned int reg); + +/* mfdcrx/mtdcrx instruction based accessors. We hand code + * the opcodes in order not to depend on newer binutils + */ +static inline unsigned int mfdcrx(unsigned int reg) +{ + unsigned int ret; + asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)" + : "=r" (ret) : "r" (reg)); + return ret; +} + +static inline void mtdcrx(unsigned int reg, unsigned int val) +{ + asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)" + : : "r" (val), "r" (reg)); +} + +#define mfdcr(rn) \ + ({unsigned int rval; \ + if (__builtin_constant_p(rn) && rn < 1024) \ + asm volatile("mfdcr %0," __stringify(rn) \ + : "=r" (rval)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + rval = mfdcrx(rn); \ + else \ + rval = __mfdcr(rn); \ + rval;}) + +#define mtdcr(rn, v) \ +do { \ + if (__builtin_constant_p(rn) && rn < 1024) \ + asm volatile("mtdcr " __stringify(rn) ",%0" \ + : : "r" (v)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + mtdcrx(rn, v); \ + else \ + __mtdcr(rn, v); \ +} while (0) + +/* R/W of indirect DCRs make use of standard naming conventions for DCRs */ +extern spinlock_t dcr_ind_lock; + +static inline unsigned __mfdcri(int base_addr, int base_data, int reg) +{ + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&dcr_ind_lock, flags); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + val = mfdcrx(base_data); + } else { + __mtdcr(base_addr, reg); + val = __mfdcr(base_data); + } + spin_unlock_irqrestore(&dcr_ind_lock, flags); + return val; +} + +static inline void __mtdcri(int base_addr, int base_data, int reg, + unsigned val) +{ + unsigned long flags; + + spin_lock_irqsave(&dcr_ind_lock, flags); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + mtdcrx(base_data, val); + } else { + __mtdcr(base_addr, reg); + __mtdcr(base_data, val); + } + spin_unlock_irqrestore(&dcr_ind_lock, flags); +} + +static inline void __dcri_clrset(int base_addr, int base_data, int reg, + unsigned clr, unsigned set) +{ + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&dcr_ind_lock, flags); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + val = (mfdcrx(base_data) & ~clr) | set; + mtdcrx(base_data, val); + } else { + __mtdcr(base_addr, reg); + val = (__mfdcr(base_data) & ~clr) | set; + __mtdcr(base_data, val); + } + spin_unlock_irqrestore(&dcr_ind_lock, flags); +} + +#define mfdcri(base, reg) __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \ + DCRN_ ## base ## _CONFIG_DATA, \ + reg) + +#define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \ + DCRN_ ## base ## _CONFIG_DATA, \ + reg, data) + +#define dcri_clrset(base, reg, clr, set) __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR, \ + DCRN_ ## base ## _CONFIG_DATA, \ + reg, clr, set) + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_DCR_NATIVE_H */ diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h new file mode 100644 index 00000000000..380274de429 --- /dev/null +++ b/arch/powerpc/include/asm/dcr-regs.h @@ -0,0 +1,183 @@ +/* + * Common DCR / SDR / CPR register definitions used on various IBM/AMCC + * 4xx processors + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp + * <benh@kernel.crashing.org> + * + * Mostly lifted from asm-ppc/ibm4xx.h by + * + * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> + * + */ + +#ifndef __DCR_REGS_H__ +#define __DCR_REGS_H__ + +/* + * Most DCRs used for controlling devices such as the MAL, DMA engine, + * etc... are obtained for the device tree. + * + * The definitions in this files are fixed DCRs and indirect DCRs that + * are commonly used outside of specific drivers or refer to core + * common registers that may occasionally have to be tweaked outside + * of the driver main register set + */ + +/* CPRs (440GX and 440SP/440SPe) */ +#define DCRN_CPR0_CONFIG_ADDR 0xc +#define DCRN_CPR0_CONFIG_DATA 0xd + +/* SDRs (440GX and 440SP/440SPe) */ +#define DCRN_SDR0_CONFIG_ADDR 0xe +#define DCRN_SDR0_CONFIG_DATA 0xf + +#define SDR0_PFC0 0x4100 +#define SDR0_PFC1 0x4101 +#define SDR0_PFC1_EPS 0x1c00000 +#define SDR0_PFC1_EPS_SHIFT 22 +#define SDR0_PFC1_RMII 0x02000000 +#define SDR0_MFR 0x4300 +#define SDR0_MFR_TAH0 0x80000000 /* TAHOE0 Enable */ +#define SDR0_MFR_TAH1 0x40000000 /* TAHOE1 Enable */ +#define SDR0_MFR_PCM 0x10000000 /* PPC440GP irq compat mode */ +#define SDR0_MFR_ECS 0x08000000 /* EMAC int clk */ +#define SDR0_MFR_T0TXFL 0x00080000 +#define SDR0_MFR_T0TXFH 0x00040000 +#define SDR0_MFR_T1TXFL 0x00020000 +#define SDR0_MFR_T1TXFH 0x00010000 +#define SDR0_MFR_E0TXFL 0x00008000 +#define SDR0_MFR_E0TXFH 0x00004000 +#define SDR0_MFR_E0RXFL 0x00002000 +#define SDR0_MFR_E0RXFH 0x00001000 +#define SDR0_MFR_E1TXFL 0x00000800 +#define SDR0_MFR_E1TXFH 0x00000400 +#define SDR0_MFR_E1RXFL 0x00000200 +#define SDR0_MFR_E1RXFH 0x00000100 +#define SDR0_MFR_E2TXFL 0x00000080 +#define SDR0_MFR_E2TXFH 0x00000040 +#define SDR0_MFR_E2RXFL 0x00000020 +#define SDR0_MFR_E2RXFH 0x00000010 +#define SDR0_MFR_E3TXFL 0x00000008 +#define SDR0_MFR_E3TXFH 0x00000004 +#define SDR0_MFR_E3RXFL 0x00000002 +#define SDR0_MFR_E3RXFH 0x00000001 +#define SDR0_UART0 0x0120 +#define SDR0_UART1 0x0121 +#define SDR0_UART2 0x0122 +#define SDR0_UART3 0x0123 +#define SDR0_CUST0 0x4000 + +/* SDR for 405EZ */ +#define DCRN_SDR_ICINTSTAT 0x4510 +#define ICINTSTAT_ICRX 0x80000000 +#define ICINTSTAT_ICTX0 0x40000000 +#define ICINTSTAT_ICTX1 0x20000000 +#define ICINTSTAT_ICTX 0x60000000 + +/* SDRs (460EX/460GT) */ +#define SDR0_ETH_CFG 0x4103 +#define SDR0_ETH_CFG_ECS 0x00000100 /* EMAC int clk source */ + +/* + * All those DCR register addresses are offsets from the base address + * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is + * excluded here and configured in the device tree. + */ +#define DCRN_SRAM0_SB0CR 0x00 +#define DCRN_SRAM0_SB1CR 0x01 +#define DCRN_SRAM0_SB2CR 0x02 +#define DCRN_SRAM0_SB3CR 0x03 +#define SRAM_SBCR_BU_MASK 0x00000180 +#define SRAM_SBCR_BS_64KB 0x00000800 +#define SRAM_SBCR_BU_RO 0x00000080 +#define SRAM_SBCR_BU_RW 0x00000180 +#define DCRN_SRAM0_BEAR 0x04 +#define DCRN_SRAM0_BESR0 0x05 +#define DCRN_SRAM0_BESR1 0x06 +#define DCRN_SRAM0_PMEG 0x07 +#define DCRN_SRAM0_CID 0x08 +#define DCRN_SRAM0_REVID 0x09 +#define DCRN_SRAM0_DPC 0x0a +#define SRAM_DPC_ENABLE 0x80000000 + +/* + * All those DCR register addresses are offsets from the base address + * for the SRAM0 controller (e.g. 0x30 on 440GX). The base address is + * excluded here and configured in the device tree. + */ +#define DCRN_L2C0_CFG 0x00 +#define L2C_CFG_L2M 0x80000000 +#define L2C_CFG_ICU 0x40000000 +#define L2C_CFG_DCU 0x20000000 +#define L2C_CFG_DCW_MASK 0x1e000000 +#define L2C_CFG_TPC 0x01000000 +#define L2C_CFG_CPC 0x00800000 +#define L2C_CFG_FRAN 0x00200000 +#define L2C_CFG_SS_MASK 0x00180000 +#define L2C_CFG_SS_256 0x00000000 +#define L2C_CFG_CPIM 0x00040000 +#define L2C_CFG_TPIM 0x00020000 +#define L2C_CFG_LIM 0x00010000 +#define L2C_CFG_PMUX_MASK 0x00007000 +#define L2C_CFG_PMUX_SNP 0x00000000 +#define L2C_CFG_PMUX_IF 0x00001000 +#define L2C_CFG_PMUX_DF 0x00002000 +#define L2C_CFG_PMUX_DS 0x00003000 +#define L2C_CFG_PMIM 0x00000800 +#define L2C_CFG_TPEI 0x00000400 +#define L2C_CFG_CPEI 0x00000200 +#define L2C_CFG_NAM 0x00000100 +#define L2C_CFG_SMCM 0x00000080 +#define L2C_CFG_NBRM 0x00000040 +#define L2C_CFG_RDBW 0x00000008 /* only 460EX/GT */ +#define DCRN_L2C0_CMD 0x01 +#define L2C_CMD_CLR 0x80000000 +#define L2C_CMD_DIAG 0x40000000 +#define L2C_CMD_INV 0x20000000 +#define L2C_CMD_CCP 0x10000000 +#define L2C_CMD_CTE 0x08000000 +#define L2C_CMD_STRC 0x04000000 +#define L2C_CMD_STPC 0x02000000 +#define L2C_CMD_RPMC 0x01000000 +#define L2C_CMD_HCC 0x00800000 +#define DCRN_L2C0_ADDR 0x02 +#define DCRN_L2C0_DATA 0x03 +#define DCRN_L2C0_SR 0x04 +#define L2C_SR_CC 0x80000000 +#define L2C_SR_CPE 0x40000000 +#define L2C_SR_TPE 0x20000000 +#define L2C_SR_LRU 0x10000000 +#define L2C_SR_PCS 0x08000000 +#define DCRN_L2C0_REVID 0x05 +#define DCRN_L2C0_SNP0 0x06 +#define DCRN_L2C0_SNP1 0x07 +#define L2C_SNP_BA_MASK 0xffff0000 +#define L2C_SNP_SSR_MASK 0x0000f000 +#define L2C_SNP_SSR_32G 0x0000f000 +#define L2C_SNP_ESR 0x00000800 + +/* + * DCR register offsets for 440SP/440SPe I2O/DMA controller. + * The base address is configured in the device tree. + */ +#define DCRN_I2O0_IBAL 0x006 +#define DCRN_I2O0_IBAH 0x007 +#define I2O_REG_ENABLE 0x00000001 /* Enable I2O/DMA access */ + +/* 440SP/440SPe Software Reset DCR */ +#define DCRN_SDR0_SRST 0x0200 +#define DCRN_SDR0_SRST_I2ODMA (0x80000000 >> 15) /* Reset I2O/DMA */ + +/* 440SP/440SPe Memory Queue DCR offsets */ +#define DCRN_MQ0_XORBA 0x04 +#define DCRN_MQ0_CF2H 0x06 +#define DCRN_MQ0_CFBHL 0x0f +#define DCRN_MQ0_BAUH 0x10 + +/* HB/LL Paths Configuration Register */ +#define MQ0_CFBHL_TPLM 28 +#define MQ0_CFBHL_HBCL 23 +#define MQ0_CFBHL_POLY 15 + +#endif /* __DCR_REGS_H__ */ diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h new file mode 100644 index 00000000000..9d6851cfb84 --- /dev/null +++ b/arch/powerpc/include/asm/dcr.h @@ -0,0 +1,78 @@ +/* + * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_POWERPC_DCR_H +#define _ASM_POWERPC_DCR_H +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#ifdef CONFIG_PPC_DCR + +#ifdef CONFIG_PPC_DCR_NATIVE +#include <asm/dcr-native.h> +#endif + +#ifdef CONFIG_PPC_DCR_MMIO +#include <asm/dcr-mmio.h> +#endif + + +/* Indirection layer for providing both NATIVE and MMIO support. */ + +#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) + +#include <asm/dcr-generic.h> + +#define DCR_MAP_OK(host) dcr_map_ok_generic(host) +#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c) +#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c) +#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n) +#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value) + +#else + +#ifdef CONFIG_PPC_DCR_NATIVE +typedef dcr_host_native_t dcr_host_t; +#define DCR_MAP_OK(host) dcr_map_ok_native(host) +#define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c) +#define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c) +#define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n) +#define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value) +#else +typedef dcr_host_mmio_t dcr_host_t; +#define DCR_MAP_OK(host) dcr_map_ok_mmio(host) +#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c) +#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c) +#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n) +#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value) +#endif + +#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ + +/* + * additional helpers to read the DCR * base from the device-tree + */ +struct device_node; +extern unsigned int dcr_resource_start(const struct device_node *np, + unsigned int index); +extern unsigned int dcr_resource_len(const struct device_node *np, + unsigned int index); +#endif /* CONFIG_PPC_DCR */ +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_DCR_H */ diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h new file mode 100644 index 00000000000..52e4d54da2a --- /dev/null +++ b/arch/powerpc/include/asm/delay.h @@ -0,0 +1,72 @@ +#ifndef _ASM_POWERPC_DELAY_H +#define _ASM_POWERPC_DELAY_H +#ifdef __KERNEL__ + +#include <asm/time.h> + +/* + * Copyright 1996, Paul Mackerras. + * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan, + * Anton Blanchard. + */ + +extern void __delay(unsigned long loops); +extern void udelay(unsigned long usecs); + +/* + * On shared processor machines the generic implementation of mdelay can + * result in large errors. While each iteration of the loop inside mdelay + * is supposed to take 1ms, the hypervisor could sleep our partition for + * longer (eg 10ms). With the right timing these errors can add up. + * + * Since there is no 32bit overflow issue on 64bit kernels, just call + * udelay directly. + */ +#ifdef CONFIG_PPC64 +#define mdelay(n) udelay((n) * 1000) +#endif + +/** + * spin_event_timeout - spin until a condition gets true or a timeout elapses + * @condition: a C expression to evalate + * @timeout: timeout, in microseconds + * @delay: the number of microseconds to delay between each evaluation of + * @condition + * + * The process spins until the condition evaluates to true (non-zero) or the + * timeout elapses. The return value of this macro is the value of + * @condition when the loop terminates. This allows you to determine the cause + * of the loop terminates. If the return value is zero, then you know a + * timeout has occurred. + * + * This primary purpose of this macro is to poll on a hardware register + * until a status bit changes. The timeout ensures that the loop still + * terminates even if the bit never changes. The delay is for devices that + * need a delay in between successive reads. + * + * gcc will optimize out the if-statement if @delay is a constant. + */ +#define spin_event_timeout(condition, timeout, delay) \ +({ \ + typeof(condition) __ret; \ + unsigned long __loops = tb_ticks_per_usec * timeout; \ + unsigned long __start = get_tbl(); \ + while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \ + if (delay) \ + udelay(delay); \ + else \ + cpu_relax(); \ + if (!__ret) \ + __ret = (condition); \ + __ret; \ +}) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_DELAY_H */ diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h new file mode 100644 index 00000000000..d57c08acedf --- /dev/null +++ b/arch/powerpc/include/asm/device.h @@ -0,0 +1,42 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_POWERPC_DEVICE_H +#define _ASM_POWERPC_DEVICE_H + +struct dma_map_ops; +struct device_node; + +/* + * Arch extensions to struct device. + * + * When adding fields, consider macio_add_one_device in + * drivers/macintosh/macio_asic.c + */ +struct dev_archdata { + /* DMA operations on that device */ + struct dma_map_ops *dma_ops; + + /* + * When an iommu is in use, dma_data is used as a ptr to the base of the + * iommu_table. Otherwise, it is a simple numerical offset. + */ + union { + dma_addr_t dma_offset; + void *iommu_table_base; + } dma_data; + +#ifdef CONFIG_SWIOTLB + dma_addr_t max_direct_dma_addr; +#endif +}; + +struct pdev_archdata { + u64 dma_mask; +}; + +#define ARCH_HAS_DMA_GET_REQUIRED_MASK + +#endif /* _ASM_POWERPC_DEVICE_H */ diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h new file mode 100644 index 00000000000..9b198d1b3b2 --- /dev/null +++ b/arch/powerpc/include/asm/disassemble.h @@ -0,0 +1,80 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __ASM_PPC_DISASSEMBLE_H__ +#define __ASM_PPC_DISASSEMBLE_H__ + +#include <linux/types.h> + +static inline unsigned int get_op(u32 inst) +{ + return inst >> 26; +} + +static inline unsigned int get_xop(u32 inst) +{ + return (inst >> 1) & 0x3ff; +} + +static inline unsigned int get_sprn(u32 inst) +{ + return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); +} + +static inline unsigned int get_dcrn(u32 inst) +{ + return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); +} + +static inline unsigned int get_rt(u32 inst) +{ + return (inst >> 21) & 0x1f; +} + +static inline unsigned int get_rs(u32 inst) +{ + return (inst >> 21) & 0x1f; +} + +static inline unsigned int get_ra(u32 inst) +{ + return (inst >> 16) & 0x1f; +} + +static inline unsigned int get_rb(u32 inst) +{ + return (inst >> 11) & 0x1f; +} + +static inline unsigned int get_rc(u32 inst) +{ + return inst & 0x1; +} + +static inline unsigned int get_ws(u32 inst) +{ + return (inst >> 11) & 0x1f; +} + +static inline unsigned int get_d(u32 inst) +{ + return inst & 0xffff; +} + +#endif /* __ASM_PPC_DISASSEMBLE_H__ */ diff --git a/arch/powerpc/include/asm/div64.h b/arch/powerpc/include/asm/div64.h new file mode 100644 index 00000000000..6cd978cefb2 --- /dev/null +++ b/arch/powerpc/include/asm/div64.h @@ -0,0 +1 @@ +#include <asm-generic/div64.h> diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h new file mode 100644 index 00000000000..dd70fac57ec --- /dev/null +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2004 IBM + * + * Implements the generic device dma API for powerpc. + * the pci and vio busses + */ +#ifndef _ASM_DMA_MAPPING_H +#define _ASM_DMA_MAPPING_H +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/cache.h> +/* need struct page definitions */ +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/dma-attrs.h> +#include <linux/dma-debug.h> +#include <asm/io.h> +#include <asm/swiotlb.h> + +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + +/* Some dma direct funcs must be visible for use in other dma_ops */ +extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +extern void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + + +#ifdef CONFIG_NOT_COHERENT_CACHE +/* + * DMA-consistent mapping functions for PowerPCs that don't support + * cache snooping. These allocate/free a region of uncached mapped + * memory space for use with DMA devices. Alternatively, you could + * allocate the space "normally" and use the cache management functions + * to ensure it is consistent. + */ +struct device; +extern void *__dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp); +extern void __dma_free_coherent(size_t size, void *vaddr); +extern void __dma_sync(void *vaddr, size_t size, int direction); +extern void __dma_sync_page(struct page *page, unsigned long offset, + size_t size, int direction); +extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); + +#else /* ! CONFIG_NOT_COHERENT_CACHE */ +/* + * Cache coherent cores. + */ + +#define __dma_alloc_coherent(dev, gfp, size, handle) NULL +#define __dma_free_coherent(size, addr) ((void)0) +#define __dma_sync(addr, size, rw) ((void)0) +#define __dma_sync_page(pg, off, sz, rw) ((void)0) + +#endif /* ! CONFIG_NOT_COHERENT_CACHE */ + +static inline unsigned long device_to_mask(struct device *dev) +{ + if (dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} + +/* + * Available generic sets of operations + */ +#ifdef CONFIG_PPC64 +extern struct dma_map_ops dma_iommu_ops; +#endif +extern struct dma_map_ops dma_direct_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + /* We don't handle the NULL dev case for ISA for now. We could + * do it via an out of line call but it is not needed for now. The + * only ISA DMA device we support is the floppy and we have a hack + * in the floppy driver directly to get a device for us. + */ + if (unlikely(dev == NULL)) + return NULL; + + return dev->archdata.dma_ops; +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + dev->archdata.dma_ops = ops; +} + +/* + * get_dma_offset() + * + * Get the dma offset on configurations where the dma address can be determined + * from the physical address by looking at a simple offset. Direct dma and + * swiotlb use this function, but it is typically not used by implementations + * with an iommu. + */ +static inline dma_addr_t get_dma_offset(struct device *dev) +{ + if (dev) + return dev->archdata.dma_data.dma_offset; + + return PCI_DRAM_OFFSET; +} + +static inline void set_dma_offset(struct device *dev, dma_addr_t off) +{ + if (dev) + dev->archdata.dma_data.dma_offset = off; +} + +/* this will be removed soon */ +#define flush_write_buffers() + +#include <asm-generic/dma-mapping-common.h> + +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *dma_ops = get_dma_ops(dev); + + if (unlikely(dma_ops == NULL)) + return 0; + if (dma_ops->dma_supported == NULL) + return 1; + return dma_ops->dma_supported(dev, mask); +} + +extern int dma_set_mask(struct device *dev, u64 dma_mask); + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + struct dma_map_ops *dma_ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!dma_ops); + + cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); + + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + + return cpu_addr; +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + struct dma_map_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + + dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); +} + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + struct dma_map_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops->mapping_error) + return dma_ops->mapping_error(dev, dma_addr); + +#ifdef CONFIG_PPC64 + return (dma_addr == DMA_ERROR_CODE); +#else + return 0; +#endif +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ +#ifdef CONFIG_SWIOTLB + struct dev_archdata *sd = &dev->archdata; + + if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) + return 0; +#endif + + if (!dev->dma_mask) + return 0; + + return addr + size - 1 <= *dev->dma_mask; +} + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr + get_dma_offset(dev); +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr - get_dma_offset(dev); +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t); +#define ARCH_HAS_DMA_MMAP_COHERENT + + +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + __dma_sync(vaddr, size, (int)direction); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h new file mode 100644 index 00000000000..a7e06e25c70 --- /dev/null +++ b/arch/powerpc/include/asm/dma.h @@ -0,0 +1,360 @@ +#ifndef _ASM_POWERPC_DMA_H +#define _ASM_POWERPC_DMA_H +#ifdef __KERNEL__ + +/* + * Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + * Changes for ppc sound by Christoph Nadig + */ + +/* + * Note: Adapted for PowerPC by Gary Thomas + * Modified by Cort Dougan <cort@cs.nmt.edu> + * + * None of this really applies for Power Macintoshes. There is + * basically just enough here to get kernel/dma.c to compile. + * + * There may be some comments or restrictions made here which are + * not valid for the PReP platform. Take what you read + * with a grain of salt. + */ + +#include <asm/io.h> +#include <linux/spinlock.h> +#include <asm/system.h> + +#ifndef MAX_DMA_CHANNELS +#define MAX_DMA_CHANNELS 8 +#endif + +/* The maximum address that we can perform a DMA transfer to on this platform */ +/* Doesn't really apply... */ +#define MAX_DMA_ADDRESS (~0UL) + +#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) + +#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER +#define dma_outb outb_p +#else +#define dma_outb outb +#endif + +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory. + * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_LO_PAGE_0 0x87 /* DMA page registers */ +#define DMA_LO_PAGE_1 0x83 +#define DMA_LO_PAGE_2 0x81 +#define DMA_LO_PAGE_3 0x82 +#define DMA_LO_PAGE_5 0x8B +#define DMA_LO_PAGE_6 0x89 +#define DMA_LO_PAGE_7 0x8A + +#define DMA_HI_PAGE_0 0x487 /* DMA page registers */ +#define DMA_HI_PAGE_1 0x483 +#define DMA_HI_PAGE_2 0x481 +#define DMA_HI_PAGE_3 0x482 +#define DMA_HI_PAGE_5 0x48B +#define DMA_HI_PAGE_6 0x489 +#define DMA_HI_PAGE_7 0x48A + +#define DMA1_EXT_REG 0x40B +#define DMA2_EXT_REG 0x4D6 + +#ifndef __powerpc64__ + /* in arch/ppc/kernel/setup.c -- Cort */ + extern unsigned int DMA_MODE_WRITE; + extern unsigned int DMA_MODE_READ; + extern unsigned long ISA_DMA_THRESHOLD; +#else + #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ + #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#endif + +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + unsigned char ucDmaCmd = 0x00; + + if (dmanr != 4) { + dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */ + dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */ + } + if (dmanr <= 3) { + dma_outb(dmanr, DMA1_MASK_REG); + dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */ + } else { + dma_outb(dmanr & 3, DMA2_MASK_REG); + } +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while interrupts are disabled! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr <= 3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register, but a 64k boundary + * may have been crossed. + */ +static __inline__ void set_dma_page(unsigned int dmanr, int pagenr) +{ + switch (dmanr) { + case 0: + dma_outb(pagenr, DMA_LO_PAGE_0); + dma_outb(pagenr >> 8, DMA_HI_PAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_LO_PAGE_1); + dma_outb(pagenr >> 8, DMA_HI_PAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_LO_PAGE_2); + dma_outb(pagenr >> 8, DMA_HI_PAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_LO_PAGE_3); + dma_outb(pagenr >> 8, DMA_HI_PAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5); + dma_outb(pagenr >> 8, DMA_HI_PAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6); + dma_outb(pagenr >> 8, DMA_HI_PAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7); + dma_outb(pagenr >> 8, DMA_HI_PAGE_7); + break; + } +} + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys) +{ + if (dmanr <= 3) { + dma_outb(phys & 0xff, + ((dmanr & 3) << 1) + IO_DMA1_BASE); + dma_outb((phys >> 8) & 0xff, + ((dmanr & 3) << 1) + IO_DMA1_BASE); + } else { + dma_outb((phys >> 1) & 0xff, + ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((phys >> 9) & 0xff, + ((dmanr & 3) << 2) + IO_DMA2_BASE); + } + set_dma_page(dmanr, phys >> 16); +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb(count & 0xff, + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + } else { + dma_outb((count >> 1) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr <= 3) + ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE + : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr <= 3) ? count : (count << 1); +} + +/* These are in kernel/dma.c: */ + +/* reserve a DMA channel */ +extern int request_dma(unsigned int dmanr, const char *device_id); +/* release it again */ +extern void free_dma(unsigned int dmanr); + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_DMA_H */ diff --git a/arch/powerpc/include/asm/edac.h b/arch/powerpc/include/asm/edac.h new file mode 100644 index 00000000000..6ead88bbfbb --- /dev/null +++ b/arch/powerpc/include/asm/edac.h @@ -0,0 +1,40 @@ +/* + * PPC EDAC common defs + * + * Author: Dave Jiang <djiang@mvista.com> + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#ifndef ASM_EDAC_H +#define ASM_EDAC_H +/* + * ECC atomic, DMA, SMP and interrupt safe scrub function. + * Implements the per arch atomic_scrub() that EDAC use for software + * ECC scrubbing. It reads memory and then writes back the original + * value, allowing the hardware to detect and correct memory errors. + */ +static __inline__ void atomic_scrub(void *va, u32 size) +{ + unsigned int *virt_addr = va; + unsigned int temp; + unsigned int i; + + for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__ ("\n\ + 1: lwarx %0,0,%1\n\ + stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync" + : "=&r"(temp) + : "r"(virt_addr) + : "cr0", "memory"); + } +} + +#endif diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h new file mode 100644 index 00000000000..66ea9b8b95c --- /dev/null +++ b/arch/powerpc/include/asm/eeh.h @@ -0,0 +1,213 @@ +/* + * eeh.h + * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _POWERPC_EEH_H +#define _POWERPC_EEH_H +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/list.h> +#include <linux/string.h> + +struct pci_dev; +struct pci_bus; +struct device_node; + +#ifdef CONFIG_EEH + +extern int eeh_subsystem_enabled; + +/* Values for eeh_mode bits in device_node */ +#define EEH_MODE_SUPPORTED (1<<0) +#define EEH_MODE_NOCHECK (1<<1) +#define EEH_MODE_ISOLATED (1<<2) +#define EEH_MODE_RECOVERING (1<<3) +#define EEH_MODE_IRQ_DISABLED (1<<4) + +/* Max number of EEH freezes allowed before we consider the device + * to be permanently disabled. */ +#define EEH_MAX_ALLOWED_FREEZES 5 + +void __init eeh_init(void); +unsigned long eeh_check_failure(const volatile void __iomem *token, + unsigned long val); +int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); +void __init pci_addr_cache_build(void); + +/** + * eeh_add_device_early + * eeh_add_device_late + * + * Perform eeh initialization for devices added after boot. + * Call eeh_add_device_early before doing any i/o to the + * device (including config space i/o). Call eeh_add_device_late + * to finish the eeh setup for this device. + */ +void eeh_add_device_tree_early(struct device_node *); +void eeh_add_device_tree_late(struct pci_bus *); + +/** + * eeh_remove_device_recursive - undo EEH for device & children. + * @dev: pci device to be removed + * + * As above, this removes the device; it also removes child + * pci devices as well. + */ +void eeh_remove_bus_device(struct pci_dev *); + +/** + * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. + * + * If this macro yields TRUE, the caller relays to eeh_check_failure() + * which does further tests out of line. + */ +#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) + +/* + * Reads from a device which has been isolated by EEH will return + * all 1s. This macro gives an all-1s value of the given size (in + * bytes: 1, 2, or 4) for comparing with the result of a read. + */ +#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8)) + +#else /* !CONFIG_EEH */ +static inline void eeh_init(void) { } + +static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) +{ + return val; +} + +static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) +{ + return 0; +} + +static inline void pci_addr_cache_build(void) { } + +static inline void eeh_add_device_tree_early(struct device_node *dn) { } + +static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } + +static inline void eeh_remove_bus_device(struct pci_dev *dev) { } +#define EEH_POSSIBLE_ERROR(val, type) (0) +#define EEH_IO_ERROR_VALUE(size) (-1UL) +#endif /* CONFIG_EEH */ + +#ifdef CONFIG_PPC64 +/* + * MMIO read/write operations with EEH support. + */ +static inline u8 eeh_readb(const volatile void __iomem *addr) +{ + u8 val = in_8(addr); + if (EEH_POSSIBLE_ERROR(val, u8)) + return eeh_check_failure(addr, val); + return val; +} + +static inline u16 eeh_readw(const volatile void __iomem *addr) +{ + u16 val = in_le16(addr); + if (EEH_POSSIBLE_ERROR(val, u16)) + return eeh_check_failure(addr, val); + return val; +} + +static inline u32 eeh_readl(const volatile void __iomem *addr) +{ + u32 val = in_le32(addr); + if (EEH_POSSIBLE_ERROR(val, u32)) + return eeh_check_failure(addr, val); + return val; +} + +static inline u64 eeh_readq(const volatile void __iomem *addr) +{ + u64 val = in_le64(addr); + if (EEH_POSSIBLE_ERROR(val, u64)) + return eeh_check_failure(addr, val); + return val; +} + +static inline u16 eeh_readw_be(const volatile void __iomem *addr) +{ + u16 val = in_be16(addr); + if (EEH_POSSIBLE_ERROR(val, u16)) + return eeh_check_failure(addr, val); + return val; +} + +static inline u32 eeh_readl_be(const volatile void __iomem *addr) +{ + u32 val = in_be32(addr); + if (EEH_POSSIBLE_ERROR(val, u32)) + return eeh_check_failure(addr, val); + return val; +} + +static inline u64 eeh_readq_be(const volatile void __iomem *addr) +{ + u64 val = in_be64(addr); + if (EEH_POSSIBLE_ERROR(val, u64)) + return eeh_check_failure(addr, val); + return val; +} + +static inline void eeh_memcpy_fromio(void *dest, const + volatile void __iomem *src, + unsigned long n) +{ + _memcpy_fromio(dest, src, n); + + /* Look for ffff's here at dest[n]. Assume that at least 4 bytes + * were copied. Check all four bytes. + */ + if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32)) + eeh_check_failure(src, *((u32 *)(dest + n - 4))); +} + +/* in-string eeh macros */ +static inline void eeh_readsb(const volatile void __iomem *addr, void * buf, + int ns) +{ + _insb(addr, buf, ns); + if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) + eeh_check_failure(addr, *(u8*)buf); +} + +static inline void eeh_readsw(const volatile void __iomem *addr, void * buf, + int ns) +{ + _insw(addr, buf, ns); + if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) + eeh_check_failure(addr, *(u16*)buf); +} + +static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, + int nl) +{ + _insl(addr, buf, nl); + if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) + eeh_check_failure(addr, *(u32*)buf); +} + +#endif /* CONFIG_PPC64 */ +#endif /* __KERNEL__ */ +#endif /* _POWERPC_EEH_H */ diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h new file mode 100644 index 00000000000..cc3cb04539a --- /dev/null +++ b/arch/powerpc/include/asm/eeh_event.h @@ -0,0 +1,53 @@ +/* + * eeh_event.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Copyright (c) 2005 Linas Vepstas <linas@linas.org> + */ + +#ifndef ASM_POWERPC_EEH_EVENT_H +#define ASM_POWERPC_EEH_EVENT_H +#ifdef __KERNEL__ + +/** EEH event -- structure holding pci controller data that describes + * a change in the isolation status of a PCI slot. A pointer + * to this struct is passed as the data pointer in a notify callback. + */ +struct eeh_event { + struct list_head list; + struct device_node *dn; /* struct device node */ + struct pci_dev *dev; /* affected device */ +}; + +/** + * eeh_send_failure_event - generate a PCI error event + * @dev pci device + * + * This routine builds a PCI error event which will be delivered + * to all listeners on the eeh_notifier_chain. + * + * This routine can be called within an interrupt context; + * the actual event will be delivered in a normal context + * (from a workqueue). + */ +int eeh_send_failure_event (struct device_node *dn, + struct pci_dev *dev); + +/* Main recovery function */ +struct pci_dn * handle_eeh_events (struct eeh_event *); + +#endif /* __KERNEL__ */ +#endif /* ASM_POWERPC_EEH_EVENT_H */ diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h new file mode 100644 index 00000000000..a9e1f4f796f --- /dev/null +++ b/arch/powerpc/include/asm/ehv_pic.h @@ -0,0 +1,40 @@ +/* + * EHV_PIC private definitions and structure. + * + * Copyright 2008-2010 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ +#ifndef __EHV_PIC_H__ +#define __EHV_PIC_H__ + +#include <linux/irq.h> + +#define NR_EHV_PIC_INTS 1024 + +#define EHV_PIC_INFO(name) EHV_PIC_##name + +#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0 +#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1 +#define EHV_PIC_VECPRI_SENSE_EDGE 0 +#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2 +#define EHV_PIC_VECPRI_POLARITY_MASK 0x1 +#define EHV_PIC_VECPRI_SENSE_MASK 0x2 + +struct ehv_pic { + /* The remapper for this EHV_PIC */ + struct irq_host *irqhost; + + /* The "linux" controller struct */ + struct irq_chip hc_irq; + + /* core int flag */ + int coreint_flag; +}; + +void ehv_pic_init(void); +unsigned int ehv_pic_get_irq(void); + +#endif /* __EHV_PIC_H__ */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h new file mode 100644 index 00000000000..3bf9cca3514 --- /dev/null +++ b/arch/powerpc/include/asm/elf.h @@ -0,0 +1,438 @@ +#ifndef _ASM_POWERPC_ELF_H +#define _ASM_POWERPC_ELF_H + +#ifdef __KERNEL__ +#include <linux/sched.h> /* for task_struct */ +#include <asm/page.h> +#include <asm/string.h> +#endif + +#include <linux/types.h> + +#include <asm/ptrace.h> +#include <asm/cputable.h> +#include <asm/auxvec.h> + +/* PowerPC relocations defined by the ABIs */ +#define R_PPC_NONE 0 +#define R_PPC_ADDR32 1 /* 32bit absolute address */ +#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ +#define R_PPC_ADDR16 3 /* 16bit absolute address */ +#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ +#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ +#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ +#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ +#define R_PPC_ADDR14_BRTAKEN 8 +#define R_PPC_ADDR14_BRNTAKEN 9 +#define R_PPC_REL24 10 /* PC relative 26 bit */ +#define R_PPC_REL14 11 /* PC relative 16 bit */ +#define R_PPC_REL14_BRTAKEN 12 +#define R_PPC_REL14_BRNTAKEN 13 +#define R_PPC_GOT16 14 +#define R_PPC_GOT16_LO 15 +#define R_PPC_GOT16_HI 16 +#define R_PPC_GOT16_HA 17 +#define R_PPC_PLTREL24 18 +#define R_PPC_COPY 19 +#define R_PPC_GLOB_DAT 20 +#define R_PPC_JMP_SLOT 21 +#define R_PPC_RELATIVE 22 +#define R_PPC_LOCAL24PC 23 +#define R_PPC_UADDR32 24 +#define R_PPC_UADDR16 25 +#define R_PPC_REL32 26 +#define R_PPC_PLT32 27 +#define R_PPC_PLTREL32 28 +#define R_PPC_PLT16_LO 29 +#define R_PPC_PLT16_HI 30 +#define R_PPC_PLT16_HA 31 +#define R_PPC_SDAREL16 32 +#define R_PPC_SECTOFF 33 +#define R_PPC_SECTOFF_LO 34 +#define R_PPC_SECTOFF_HI 35 +#define R_PPC_SECTOFF_HA 36 + +/* PowerPC relocations defined for the TLS access ABI. */ +#define R_PPC_TLS 67 /* none (sym+add)@tls */ +#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */ +#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */ +#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ +#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ +#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ +#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */ +#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */ +#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ +#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ +#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ +#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */ +#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ +#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ +#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ +#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ +#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ +#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ +#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ +#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ +#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */ +#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */ +#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ +#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ +#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */ +#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */ +#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */ +#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */ + +/* keep this the last entry. */ +#define R_PPC_NUM 95 + +/* + * ELF register definitions.. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ +#define ELF_NFPREG 33 /* includes fpscr */ + +typedef unsigned long elf_greg_t64; +typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; + +typedef unsigned int elf_greg_t32; +typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG]; +typedef elf_gregset_t32 compat_elf_gregset_t; + +/* + * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps. + */ +#ifdef __powerpc64__ +# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */ +# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */ +# define ELF_NVSRHALFREG 32 /* Half the vsx registers */ +# define ELF_GREG_TYPE elf_greg_t64 +#else +# define ELF_NEVRREG 34 /* includes acc (as 2) */ +# define ELF_NVRREG 33 /* includes vscr */ +# define ELF_GREG_TYPE elf_greg_t32 +# define ELF_ARCH EM_PPC +# define ELF_CLASS ELFCLASS32 +# define ELF_DATA ELFDATA2MSB +#endif /* __powerpc64__ */ + +#ifndef ELF_ARCH +# define ELF_ARCH EM_PPC64 +# define ELF_CLASS ELFCLASS64 +# define ELF_DATA ELFDATA2MSB + typedef elf_greg_t64 elf_greg_t; + typedef elf_gregset_t64 elf_gregset_t; +#else + /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */ + typedef elf_greg_t32 elf_greg_t; + typedef elf_gregset_t32 elf_gregset_t; +#endif /* ELF_ARCH */ + +/* Floating point registers */ +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* Altivec registers */ +/* + * The entries with indexes 0-31 contain the corresponding vector registers. + * The entry with index 32 contains the vscr as the last word (offset 12) + * within the quadword. This allows the vscr to be stored as either a + * quadword (since it must be copied via a vector register to/from storage) + * or as a word. + * + * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first + * word (offset 0) within the quadword. + * + * This definition of the VMX state is compatible with the current PPC32 + * ptrace interface. This allows signal handling and ptrace to use the same + * structures. This also simplifies the implementation of a bi-arch + * (combined (32- and 64-bit) gdb. + * + * Note that it's _not_ compatible with 32 bits ucontext which stuffs the + * vrsave along with vscr and so only uses 33 vectors for the register set + */ +typedef __vector128 elf_vrreg_t; +typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG]; +#ifdef __powerpc64__ +typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; +typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; +#endif + +#ifdef __KERNEL__ +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) +#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC) + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +extern unsigned long randomize_et_dyn(unsigned long base); +#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) + +/* + * Our registers are always unsigned longs, whether we're a 32 bit + * process or 64 bit, on either a 64 bit or 32 bit kernel. + * + * This macro relies on elf_regs[i] having the right type to truncate to, + * either u32 or u64. It defines the body of the elf_core_copy_regs + * function, either the native one with elf_gregset_t elf_regs or + * the 32-bit one with elf_gregset_t32 elf_regs. + */ +#define PPC_ELF_CORE_COPY_REGS(elf_regs, regs) \ + int i, nregs = min(sizeof(*regs) / sizeof(unsigned long), \ + (size_t)ELF_NGREG); \ + for (i = 0; i < nregs; i++) \ + elf_regs[i] = ((unsigned long *) regs)[i]; \ + memset(&elf_regs[i], 0, (ELF_NGREG - i) * sizeof(elf_regs[0])) + +/* Common routine for both 32-bit and 64-bit native processes */ +static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, + struct pt_regs *regs) +{ + PPC_ELF_CORE_COPY_REGS(elf_regs, regs); +} +#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); + +typedef elf_vrregset_t elf_fpxregset_t; + +/* ELF_HWCAP yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ +# define ELF_HWCAP (cur_cpu_spec->cpu_user_features) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. */ + +#define ELF_PLATFORM (cur_cpu_spec->platform) + +/* While ELF_PLATFORM indicates the ISA supported by the platform, it + * may not accurately reflect the underlying behavior of the hardware + * (as in the case of running in Power5+ compatibility mode on a + * Power6 machine). ELF_BASE_PLATFORM allows ld.so to load libraries + * that are tuned for the real hardware. + */ +#define ELF_BASE_PLATFORM (powerpc_base_platform) + +#ifdef __powerpc64__ +# define ELF_PLAT_INIT(_r, load_addr) do { \ + _r->gpr[2] = load_addr; \ +} while (0) +#endif /* __powerpc64__ */ + +#ifdef __powerpc64__ +# define SET_PERSONALITY(ex) \ +do { \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + set_thread_flag(TIF_32BIT); \ + else \ + clear_thread_flag(TIF_32BIT); \ + if (personality(current->personality) != PER_LINUX32) \ + set_personality(PER_LINUX | \ + (current->personality & (~PER_MASK))); \ +} while (0) +/* + * An executable for which elf_read_implies_exec() returns TRUE will + * have the READ_IMPLIES_EXEC personality flag set automatically. This + * is only required to work around bugs in old 32bit toolchains. Since + * the 64bit ABI has never had these issues dont enable the workaround + * even if we have an executable stack. + */ +# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \ + (exec_stk == EXSTACK_DEFAULT) : 0) +#else +# define SET_PERSONALITY(ex) \ + set_personality(PER_LINUX | (current->personality & (~PER_MASK))) +# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT) +#endif /* __powerpc64__ */ + +extern int dcache_bsize; +extern int icache_bsize; +extern int ucache_bsize; + +/* vDSO has arch_setup_additional_pages */ +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b) + +/* 1GB for 64bit, 8MB for 32bit */ +#define STACK_RND_MASK (is_32bit_task() ? \ + (0x7ff >> (PAGE_SHIFT - 12)) : \ + (0x3ffff >> (PAGE_SHIFT - 12))) + +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk + +#endif /* __KERNEL__ */ + +/* + * The requirements here are: + * - keep the final alignment of sp (sp & 0xf) + * - make sure the 32-bit value at the first 16 byte aligned position of + * AUXV is greater than 16 for glibc compatibility. + * AT_IGNOREPPC is used for that. + * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, + * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. + * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes + */ +#define ARCH_DLINFO \ +do { \ + /* Handle glibc compatibility. */ \ + NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ + NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ + /* Cache size items */ \ + NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ + NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ + NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ + VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ +} while (0) + +/* PowerPC64 relocations defined by the ABIs */ +#define R_PPC64_NONE R_PPC_NONE +#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address. */ +#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned. */ +#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address. */ +#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of abs. address. */ +#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of abs. address. */ +#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */ +#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned. */ +#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN +#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN +#define R_PPC64_REL24 R_PPC_REL24 /* PC relative 26 bit, word aligned. */ +#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit. */ +#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN +#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN +#define R_PPC64_GOT16 R_PPC_GOT16 +#define R_PPC64_GOT16_LO R_PPC_GOT16_LO +#define R_PPC64_GOT16_HI R_PPC_GOT16_HI +#define R_PPC64_GOT16_HA R_PPC_GOT16_HA + +#define R_PPC64_COPY R_PPC_COPY +#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT +#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT +#define R_PPC64_RELATIVE R_PPC_RELATIVE + +#define R_PPC64_UADDR32 R_PPC_UADDR32 +#define R_PPC64_UADDR16 R_PPC_UADDR16 +#define R_PPC64_REL32 R_PPC_REL32 +#define R_PPC64_PLT32 R_PPC_PLT32 +#define R_PPC64_PLTREL32 R_PPC_PLTREL32 +#define R_PPC64_PLT16_LO R_PPC_PLT16_LO +#define R_PPC64_PLT16_HI R_PPC_PLT16_HI +#define R_PPC64_PLT16_HA R_PPC_PLT16_HA + +#define R_PPC64_SECTOFF R_PPC_SECTOFF +#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO +#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI +#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA +#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2. */ +#define R_PPC64_ADDR64 38 /* doubleword64 S + A. */ +#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A). */ +#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A). */ +#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A). */ +#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A). */ +#define R_PPC64_UADDR64 43 /* doubleword64 S + A. */ +#define R_PPC64_REL64 44 /* doubleword64 S + A - P. */ +#define R_PPC64_PLT64 45 /* doubleword64 L + A. */ +#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P. */ +#define R_PPC64_TOC16 47 /* half16* S + A - .TOC. */ +#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.). */ +#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.). */ +#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.). */ +#define R_PPC64_TOC 51 /* doubleword64 .TOC. */ +#define R_PPC64_PLTGOT16 52 /* half16* M + A. */ +#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A). */ +#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A). */ +#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A). */ + +#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2. */ +#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2. */ +#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2. */ +#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2. */ +#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2. */ +#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2. */ +#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2. */ +#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2. */ +#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2. */ +#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2. */ +#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2. */ + +/* PowerPC64 relocations defined for the TLS access ABI. */ +#define R_PPC64_TLS 67 /* none (sym+add)@tls */ +#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */ +#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */ +#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ +#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ +#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ +#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */ +#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */ +#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ +#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ +#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ +#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */ +#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ +#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ +#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ +#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ +#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ +#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ +#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ +#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ +#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */ +#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */ +#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ +#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ +#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */ +#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */ +#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */ +#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */ +#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */ +#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */ +#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */ +#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */ +#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */ +#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */ +#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */ +#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */ +#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */ +#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ +#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ +#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ + +/* Keep this the last entry. */ +#define R_PPC64_NUM 107 + +/* There's actually a third entry here, but it's unused */ +struct ppc64_opd_entry +{ + unsigned long funcaddr; + unsigned long r2; +}; + +#ifdef __KERNEL__ + +#ifdef CONFIG_SPU_BASE +/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ +#define NT_SPU 1 + +#define ARCH_HAVE_EXTRA_ELF_NOTES + +#endif /* CONFIG_SPU_BASE */ + +#endif /* __KERNEL */ + +#endif /* _ASM_POWERPC_ELF_H */ diff --git a/arch/powerpc/include/asm/emergency-restart.h b/arch/powerpc/include/asm/emergency-restart.h new file mode 100644 index 00000000000..3711bd9d50b --- /dev/null +++ b/arch/powerpc/include/asm/emergency-restart.h @@ -0,0 +1 @@ +#include <asm-generic/emergency-restart.h> diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h new file mode 100644 index 00000000000..63f2a22e995 --- /dev/null +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -0,0 +1,92 @@ +/* + * Copyright 2007 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _ASM_POWERPC_EMULATED_OPS_H +#define _ASM_POWERPC_EMULATED_OPS_H + +#include <linux/atomic.h> +#include <linux/perf_event.h> + + +#ifdef CONFIG_PPC_EMULATED_STATS + +struct ppc_emulated_entry { + const char *name; + atomic_t val; +}; + +extern struct ppc_emulated { +#ifdef CONFIG_ALTIVEC + struct ppc_emulated_entry altivec; +#endif + struct ppc_emulated_entry dcba; + struct ppc_emulated_entry dcbz; + struct ppc_emulated_entry fp_pair; + struct ppc_emulated_entry isel; + struct ppc_emulated_entry mcrxr; + struct ppc_emulated_entry mfpvr; + struct ppc_emulated_entry multiple; + struct ppc_emulated_entry popcntb; + struct ppc_emulated_entry spe; + struct ppc_emulated_entry string; + struct ppc_emulated_entry unaligned; +#ifdef CONFIG_MATH_EMULATION + struct ppc_emulated_entry math; +#elif defined(CONFIG_8XX_MINIMAL_FPEMU) + struct ppc_emulated_entry 8xx; +#endif +#ifdef CONFIG_VSX + struct ppc_emulated_entry vsx; +#endif +#ifdef CONFIG_PPC64 + struct ppc_emulated_entry mfdscr; + struct ppc_emulated_entry mtdscr; +#endif +} ppc_emulated; + +extern u32 ppc_warn_emulated; + +extern void ppc_warn_emulated_print(const char *type); + +#define __PPC_WARN_EMULATED(type) \ + do { \ + atomic_inc(&ppc_emulated.type.val); \ + if (ppc_warn_emulated) \ + ppc_warn_emulated_print(ppc_emulated.type.name); \ + } while (0) + +#else /* !CONFIG_PPC_EMULATED_STATS */ + +#define __PPC_WARN_EMULATED(type) do { } while (0) + +#endif /* !CONFIG_PPC_EMULATED_STATS */ + +#define PPC_WARN_EMULATED(type, regs) \ + do { \ + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ + 1, regs, 0); \ + __PPC_WARN_EMULATED(type); \ + } while (0) + +#define PPC_WARN_ALIGNMENT(type, regs) \ + do { \ + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ + 1, regs, regs->dar); \ + __PPC_WARN_EMULATED(type); \ + } while (0) + +#endif /* _ASM_POWERPC_EMULATED_OPS_H */ diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h new file mode 100644 index 00000000000..f3b0c2cc9fe --- /dev/null +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -0,0 +1,502 @@ +/* + * ePAPR hcall interface + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Author: Timur Tabi <timur@freescale.com> + * + * This file is provided under a dual BSD/GPL license. When using or + * redistributing this file, you may do so under either license. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* A "hypercall" is an "sc 1" instruction. This header file file provides C + * wrapper functions for the ePAPR hypervisor interface. It is inteded + * for use by Linux device drivers and other operating systems. + * + * The hypercalls are implemented as inline assembly, rather than assembly + * language functions in a .S file, for optimization. It allows + * the caller to issue the hypercall instruction directly, improving both + * performance and memory footprint. + */ + +#ifndef _EPAPR_HCALLS_H +#define _EPAPR_HCALLS_H + +#include <linux/types.h> +#include <linux/errno.h> +#include <asm/byteorder.h> + +#define EV_BYTE_CHANNEL_SEND 1 +#define EV_BYTE_CHANNEL_RECEIVE 2 +#define EV_BYTE_CHANNEL_POLL 3 +#define EV_INT_SET_CONFIG 4 +#define EV_INT_GET_CONFIG 5 +#define EV_INT_SET_MASK 6 +#define EV_INT_GET_MASK 7 +#define EV_INT_IACK 9 +#define EV_INT_EOI 10 +#define EV_INT_SEND_IPI 11 +#define EV_INT_SET_TASK_PRIORITY 12 +#define EV_INT_GET_TASK_PRIORITY 13 +#define EV_DOORBELL_SEND 14 +#define EV_MSGSND 15 +#define EV_IDLE 16 + +/* vendor ID: epapr */ +#define EV_LOCAL_VENDOR_ID 0 /* for private use */ +#define EV_EPAPR_VENDOR_ID 1 +#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */ +#define EV_IBM_VENDOR_ID 3 /* IBM */ +#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */ +#define EV_ENEA_VENDOR_ID 5 /* Enea */ +#define EV_WR_VENDOR_ID 6 /* Wind River Systems */ +#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */ +#define EV_KVM_VENDOR_ID 42 /* KVM */ + +/* The max number of bytes that a byte channel can send or receive per call */ +#define EV_BYTE_CHANNEL_MAX_BYTES 16 + + +#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num)) +#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num) + +/* epapr error codes */ +#define EV_EPERM 1 /* Operation not permitted */ +#define EV_ENOENT 2 /* Entry Not Found */ +#define EV_EIO 3 /* I/O error occured */ +#define EV_EAGAIN 4 /* The operation had insufficient + * resources to complete and should be + * retried + */ +#define EV_ENOMEM 5 /* There was insufficient memory to + * complete the operation */ +#define EV_EFAULT 6 /* Bad guest address */ +#define EV_ENODEV 7 /* No such device */ +#define EV_EINVAL 8 /* An argument supplied to the hcall + was out of range or invalid */ +#define EV_INTERNAL 9 /* An internal error occured */ +#define EV_CONFIG 10 /* A configuration error was detected */ +#define EV_INVALID_STATE 11 /* The object is in an invalid state */ +#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */ +#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */ + +/* + * Hypercall register clobber list + * + * These macros are used to define the list of clobbered registers during a + * hypercall. Technically, registers r0 and r3-r12 are always clobbered, + * but the gcc inline assembly syntax does not allow us to specify registers + * on the clobber list that are also on the input/output list. Therefore, + * the lists of clobbered registers depends on the number of register + * parmeters ("+r" and "=r") passed to the hypercall. + * + * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a + * general rule, 'x' is the number of parameters passed to the assembly + * block *except* for r11. + * + * If you're not sure, just use the smallest value of 'x' that does not + * generate a compilation error. Because these are static inline functions, + * the compiler will only check the clobber list for a function if you + * compile code that calls that function. + * + * r3 and r11 are not included in any clobbers list because they are always + * listed as output registers. + * + * XER, CTR, and LR are currently listed as clobbers because it's uncertain + * whether they will be clobbered. + * + * Note that r11 can be used as an output parameter. +*/ + +/* List of common clobbered registers. Do not use this macro. */ +#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc" + +#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS +#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10" +#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9" +#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8" +#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7" +#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6" +#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5" +#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4" + + +/* + * We use "uintptr_t" to define a register because it's guaranteed to be a + * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit + * platform. + * + * All registers are either input/output or output only. Registers that are + * initialized before making the hypercall are input/output. All + * input/output registers are represented with "+r". Output-only registers + * are represented with "=r". Do not specify any unused registers. The + * clobber list will tell the compiler that the hypercall modifies those + * registers, which is good enough. + */ + +/** + * ev_int_set_config - configure the specified interrupt + * @interrupt: the interrupt number + * @config: configuration for this interrupt + * @priority: interrupt priority + * @destination: destination CPU number + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_int_set_config(unsigned int interrupt, + uint32_t config, unsigned int priority, uint32_t destination) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + + r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG); + r3 = interrupt; + r4 = config; + r5 = priority; + r6 = destination; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6) + : : EV_HCALL_CLOBBERS4 + ); + + return r3; +} + +/** + * ev_int_get_config - return the config of the specified interrupt + * @interrupt: the interrupt number + * @config: returned configuration for this interrupt + * @priority: returned interrupt priority + * @destination: returned destination CPU number + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_int_get_config(unsigned int interrupt, + uint32_t *config, unsigned int *priority, uint32_t *destination) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + + r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG); + r3 = interrupt; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6) + : : EV_HCALL_CLOBBERS4 + ); + + *config = r4; + *priority = r5; + *destination = r6; + + return r3; +} + +/** + * ev_int_set_mask - sets the mask for the specified interrupt source + * @interrupt: the interrupt number + * @mask: 0=enable interrupts, 1=disable interrupts + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_int_set_mask(unsigned int interrupt, + unsigned int mask) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK); + r3 = interrupt; + r4 = mask; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + return r3; +} + +/** + * ev_int_get_mask - returns the mask for the specified interrupt source + * @interrupt: the interrupt number + * @mask: returned mask for this interrupt (0=enabled, 1=disabled) + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_int_get_mask(unsigned int interrupt, + unsigned int *mask) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK); + r3 = interrupt; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "=r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + *mask = r4; + + return r3; +} + +/** + * ev_int_eoi - signal the end of interrupt processing + * @interrupt: the interrupt number + * + * This function signals the end of processing for the the specified + * interrupt, which must be the interrupt currently in service. By + * definition, this is also the highest-priority interrupt. + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_int_eoi(unsigned int interrupt) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = EV_HCALL_TOKEN(EV_INT_EOI); + r3 = interrupt; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +/** + * ev_byte_channel_send - send characters to a byte stream + * @handle: byte stream handle + * @count: (input) num of chars to send, (output) num chars sent + * @buffer: pointer to a 16-byte buffer + * + * @buffer must be at least 16 bytes long, because all 16 bytes will be + * read from memory into registers, even if count < 16. + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_byte_channel_send(unsigned int handle, + unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES]) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + register uintptr_t r7 __asm__("r7"); + register uintptr_t r8 __asm__("r8"); + const uint32_t *p = (const uint32_t *) buffer; + + r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND); + r3 = handle; + r4 = *count; + r5 = be32_to_cpu(p[0]); + r6 = be32_to_cpu(p[1]); + r7 = be32_to_cpu(p[2]); + r8 = be32_to_cpu(p[3]); + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), + "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8) + : : EV_HCALL_CLOBBERS6 + ); + + *count = r4; + + return r3; +} + +/** + * ev_byte_channel_receive - fetch characters from a byte channel + * @handle: byte channel handle + * @count: (input) max num of chars to receive, (output) num chars received + * @buffer: pointer to a 16-byte buffer + * + * The size of @buffer must be at least 16 bytes, even if you request fewer + * than 16 characters, because we always write 16 bytes to @buffer. This is + * for performance reasons. + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_byte_channel_receive(unsigned int handle, + unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES]) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + register uintptr_t r7 __asm__("r7"); + register uintptr_t r8 __asm__("r8"); + uint32_t *p = (uint32_t *) buffer; + + r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE); + r3 = handle; + r4 = *count; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4), + "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8) + : : EV_HCALL_CLOBBERS6 + ); + + *count = r4; + p[0] = cpu_to_be32(r5); + p[1] = cpu_to_be32(r6); + p[2] = cpu_to_be32(r7); + p[3] = cpu_to_be32(r8); + + return r3; +} + +/** + * ev_byte_channel_poll - returns the status of the byte channel buffers + * @handle: byte channel handle + * @rx_count: returned count of bytes in receive queue + * @tx_count: returned count of free space in transmit queue + * + * This function reports the amount of data in the receive queue (i.e. the + * number of bytes you can read), and the amount of free space in the transmit + * queue (i.e. the number of bytes you can write). + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_byte_channel_poll(unsigned int handle, + unsigned int *rx_count, unsigned int *tx_count) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + + r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL); + r3 = handle; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5) + : : EV_HCALL_CLOBBERS3 + ); + + *rx_count = r4; + *tx_count = r5; + + return r3; +} + +/** + * ev_int_iack - acknowledge an interrupt + * @handle: handle to the target interrupt controller + * @vector: returned interrupt vector + * + * If handle is zero, the function returns the next interrupt source + * number to be handled irrespective of the hierarchy or cascading + * of interrupt controllers. If non-zero, specifies a handle to the + * interrupt controller that is the target of the acknowledge. + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_int_iack(unsigned int handle, + unsigned int *vector) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = EV_HCALL_TOKEN(EV_INT_IACK); + r3 = handle; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "=r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + *vector = r4; + + return r3; +} + +/** + * ev_doorbell_send - send a doorbell to another partition + * @handle: doorbell send handle + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_doorbell_send(unsigned int handle) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND); + r3 = handle; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +/** + * ev_idle -- wait for next interrupt on this core + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int ev_idle(void) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = EV_HCALL_TOKEN(EV_IDLE); + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "=r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +#endif diff --git a/arch/powerpc/include/asm/errno.h b/arch/powerpc/include/asm/errno.h new file mode 100644 index 00000000000..8c145fd17d8 --- /dev/null +++ b/arch/powerpc/include/asm/errno.h @@ -0,0 +1,11 @@ +#ifndef _ASM_POWERPC_ERRNO_H +#define _ASM_POWERPC_ERRNO_H + +#include <asm-generic/errno.h> + +#undef EDEADLOCK +#define EDEADLOCK 58 /* File locking deadlock error */ + +#define _LAST_ERRNO 516 + +#endif /* _ASM_POWERPC_ERRNO_H */ diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h new file mode 100644 index 00000000000..ac13addb849 --- /dev/null +++ b/arch/powerpc/include/asm/exception-64e.h @@ -0,0 +1,221 @@ +/* + * Definitions for use by exception code on Book3-E + * + * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_POWERPC_EXCEPTION_64E_H +#define _ASM_POWERPC_EXCEPTION_64E_H + +/* + * SPRGs usage an other considerations... + * + * Since TLB miss and other standard exceptions can be interrupted by + * critical exceptions which can themselves be interrupted by machine + * checks, and since the two later can themselves cause a TLB miss when + * hitting the linear mapping for the kernel stacks, we need to be a bit + * creative on how we use SPRGs. + * + * The base idea is that we have one SRPG reserved for critical and one + * for machine check interrupts. Those are used to save a GPR that can + * then be used to get the PACA, and store as much context as we need + * to save in there. That includes saving the SPRGs used by the TLB miss + * handler for linear mapping misses and the associated SRR0/1 due to + * the above re-entrancy issue. + * + * So here's the current usage pattern. It's done regardless of which + * SPRGs are user-readable though, thus we might have to change some of + * this later. In order to do that more easily, we use special constants + * for naming them + * + * WARNING: Some of these SPRGs are user readable. We need to do something + * about it as some point by making sure they can't be used to leak kernel + * critical data + */ + + +/* We are out of SPRGs so we save some things in the PACA. The normal + * exception frame is smaller than the CRIT or MC one though + */ +#define EX_R1 (0 * 8) +#define EX_CR (1 * 8) +#define EX_R10 (2 * 8) +#define EX_R11 (3 * 8) +#define EX_R14 (4 * 8) +#define EX_R15 (5 * 8) + +/* + * The TLB miss exception uses different slots. + * + * The bolted variant uses only the first six fields, + * which in combination with pgd and kernel_pgd fits in + * one 64-byte cache line. + */ + +#define EX_TLB_R10 ( 0 * 8) +#define EX_TLB_R11 ( 1 * 8) +#define EX_TLB_R14 ( 2 * 8) +#define EX_TLB_R15 ( 3 * 8) +#define EX_TLB_R16 ( 4 * 8) +#define EX_TLB_CR ( 5 * 8) +#define EX_TLB_R12 ( 6 * 8) +#define EX_TLB_R13 ( 7 * 8) +#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */ +#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */ +#define EX_TLB_SRR0 (10 * 8) +#define EX_TLB_SRR1 (11 * 8) +#ifdef CONFIG_BOOK3E_MMU_TLB_STATS +#define EX_TLB_R8 (12 * 8) +#define EX_TLB_R9 (13 * 8) +#define EX_TLB_LR (14 * 8) +#define EX_TLB_SIZE (15 * 8) +#else +#define EX_TLB_SIZE (12 * 8) +#endif + +#define START_EXCEPTION(label) \ + .globl exc_##label##_book3e; \ +exc_##label##_book3e: + +/* TLB miss exception prolog + * + * This prolog handles re-entrancy (up to 3 levels supported in the PACA + * though we currently don't test for overflow). It provides you with a + * re-entrancy safe working space of r10...r16 and CR with r12 being used + * as the exception area pointer in the PACA for that level of re-entrancy + * and r13 containing the PACA pointer. + * + * SRR0 and SRR1 are saved, but DEAR and ESR are not, since they don't apply + * as-is for instruction exceptions. It's up to the actual exception code + * to save them as well if required. + */ +#define TLB_MISS_PROLOG \ + mtspr SPRN_SPRG_TLB_SCRATCH,r12; \ + mfspr r12,SPRN_SPRG_TLB_EXFRAME; \ + std r10,EX_TLB_R10(r12); \ + mfcr r10; \ + std r11,EX_TLB_R11(r12); \ + mfspr r11,SPRN_SPRG_TLB_SCRATCH; \ + std r13,EX_TLB_R13(r12); \ + mfspr r13,SPRN_SPRG_PACA; \ + std r14,EX_TLB_R14(r12); \ + addi r14,r12,EX_TLB_SIZE; \ + std r15,EX_TLB_R15(r12); \ + mfspr r15,SPRN_SRR1; \ + std r16,EX_TLB_R16(r12); \ + mfspr r16,SPRN_SRR0; \ + std r10,EX_TLB_CR(r12); \ + std r11,EX_TLB_R12(r12); \ + mtspr SPRN_SPRG_TLB_EXFRAME,r14; \ + std r15,EX_TLB_SRR1(r12); \ + std r16,EX_TLB_SRR0(r12); \ + TLB_MISS_PROLOG_STATS + +/* And these are the matching epilogs that restores things + * + * There are 3 epilogs: + * + * - SUCCESS : Unwinds one level + * - ERROR : restore from level 0 and reset + * - ERROR_SPECIAL : restore from current level and reset + * + * Normal errors use ERROR, that is, they restore the initial fault context + * and trigger a fault. However, there is a special case for linear mapping + * errors. Those should basically never happen, but if they do happen, we + * want the error to point out the context that did that linear mapping + * fault, not the initial level 0 (basically, we got a bogus PGF or something + * like that). For userland errors on the linear mapping, there is no + * difference since those are always level 0 anyway + */ + +#define TLB_MISS_RESTORE(freg) \ + ld r14,EX_TLB_CR(r12); \ + ld r10,EX_TLB_R10(r12); \ + ld r15,EX_TLB_SRR0(r12); \ + ld r16,EX_TLB_SRR1(r12); \ + mtspr SPRN_SPRG_TLB_EXFRAME,freg; \ + ld r11,EX_TLB_R11(r12); \ + mtcr r14; \ + ld r13,EX_TLB_R13(r12); \ + ld r14,EX_TLB_R14(r12); \ + mtspr SPRN_SRR0,r15; \ + ld r15,EX_TLB_R15(r12); \ + mtspr SPRN_SRR1,r16; \ + TLB_MISS_RESTORE_STATS \ + ld r16,EX_TLB_R16(r12); \ + ld r12,EX_TLB_R12(r12); \ + +#define TLB_MISS_EPILOG_SUCCESS \ + TLB_MISS_RESTORE(r12) + +#define TLB_MISS_EPILOG_ERROR \ + addi r12,r13,PACA_EXTLB; \ + TLB_MISS_RESTORE(r12) + +#define TLB_MISS_EPILOG_ERROR_SPECIAL \ + addi r11,r13,PACA_EXTLB; \ + TLB_MISS_RESTORE(r11) + +#ifdef CONFIG_BOOK3E_MMU_TLB_STATS +#define TLB_MISS_PROLOG_STATS \ + mflr r10; \ + std r8,EX_TLB_R8(r12); \ + std r9,EX_TLB_R9(r12); \ + std r10,EX_TLB_LR(r12); +#define TLB_MISS_RESTORE_STATS \ + ld r16,EX_TLB_LR(r12); \ + ld r9,EX_TLB_R9(r12); \ + ld r8,EX_TLB_R8(r12); \ + mtlr r16; +#define TLB_MISS_PROLOG_STATS_BOLTED \ + mflr r10; \ + std r8,PACA_EXTLB+EX_TLB_R8(r13); \ + std r9,PACA_EXTLB+EX_TLB_R9(r13); \ + std r10,PACA_EXTLB+EX_TLB_LR(r13); +#define TLB_MISS_RESTORE_STATS_BOLTED \ + ld r16,PACA_EXTLB+EX_TLB_LR(r13); \ + ld r9,PACA_EXTLB+EX_TLB_R9(r13); \ + ld r8,PACA_EXTLB+EX_TLB_R8(r13); \ + mtlr r16; +#define TLB_MISS_STATS_D(name) \ + addi r9,r13,MMSTAT_DSTATS+name; \ + bl .tlb_stat_inc; +#define TLB_MISS_STATS_I(name) \ + addi r9,r13,MMSTAT_ISTATS+name; \ + bl .tlb_stat_inc; +#define TLB_MISS_STATS_X(name) \ + ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ + cmpdi cr2,r8,-1; \ + beq cr2,61f; \ + addi r9,r13,MMSTAT_DSTATS+name; \ + b 62f; \ +61: addi r9,r13,MMSTAT_ISTATS+name; \ +62: bl .tlb_stat_inc; +#define TLB_MISS_STATS_SAVE_INFO \ + std r14,EX_TLB_ESR(r12); /* save ESR */ +#define TLB_MISS_STATS_SAVE_INFO_BOLTED \ + std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */ +#else +#define TLB_MISS_PROLOG_STATS +#define TLB_MISS_RESTORE_STATS +#define TLB_MISS_PROLOG_STATS_BOLTED +#define TLB_MISS_RESTORE_STATS_BOLTED +#define TLB_MISS_STATS_D(name) +#define TLB_MISS_STATS_I(name) +#define TLB_MISS_STATS_X(name) +#define TLB_MISS_STATS_Y(name) +#define TLB_MISS_STATS_SAVE_INFO +#define TLB_MISS_STATS_SAVE_INFO_BOLTED +#endif + +#define SET_IVOR(vector_number, vector_offset) \ + li r3,vector_offset@l; \ + ori r3,r3,interrupt_base_book3e@l; \ + mtspr SPRN_IVOR##vector_number,r3; + +#endif /* _ASM_POWERPC_EXCEPTION_64E_H */ + diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h new file mode 100644 index 00000000000..8057f4f6980 --- /dev/null +++ b/arch/powerpc/include/asm/exception-64s.h @@ -0,0 +1,362 @@ +#ifndef _ASM_POWERPC_EXCEPTION_H +#define _ASM_POWERPC_EXCEPTION_H +/* + * Extracted from head_64.S + * + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP + * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> + * Adapted for Power Macintosh by Paul Mackerras. + * Low-level exception handlers and MMU support + * rewritten by Paul Mackerras. + * Copyright (C) 1996 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and + * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com + * + * This file contains the low-level support and setup for the + * PowerPC-64 platform, including trap and interrupt dispatch. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +/* + * The following macros define the code that appears as + * the prologue to each of the exception handlers. They + * are split into two parts to allow a single kernel binary + * to be used for pSeries and iSeries. + * + * We make as much of the exception code common between native + * exception handlers (including pSeries LPAR) and iSeries LPAR + * implementations as possible. + */ + +#define EX_R9 0 +#define EX_R10 8 +#define EX_R11 16 +#define EX_R12 24 +#define EX_R13 32 +#define EX_SRR0 40 +#define EX_DAR 48 +#define EX_DSISR 56 +#define EX_CCR 60 +#define EX_R3 64 +#define EX_LR 72 +#define EX_CFAR 80 + +/* + * We're short on space and time in the exception prolog, so we can't + * use the normal SET_REG_IMMEDIATE macro. Normally we just need the + * low halfword of the address, but for Kdump we need the whole low + * word. + */ +#define LOAD_HANDLER(reg, label) \ + addi reg,reg,(label)-_stext; /* virt addr of handler ... */ + +/* Exception register prefixes */ +#define EXC_HV H +#define EXC_STD + +#define __EXCEPTION_PROLOG_1(area, extra, vec) \ + GET_PACA(r13); \ + std r9,area+EX_R9(r13); /* save r9 - r12 */ \ + std r10,area+EX_R10(r13); \ + BEGIN_FTR_SECTION_NESTED(66); \ + mfspr r10,SPRN_CFAR; \ + std r10,area+EX_CFAR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + mfcr r9; \ + extra(vec); \ + std r11,area+EX_R11(r13); \ + std r12,area+EX_R12(r13); \ + GET_SCRATCH0(r10); \ + std r10,area+EX_R13(r13) +#define EXCEPTION_PROLOG_1(area, extra, vec) \ + __EXCEPTION_PROLOG_1(area, extra, vec) + +#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ + mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ + LOAD_HANDLER(r12,label) \ + mtspr SPRN_##h##SRR0,r12; \ + mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ + mtspr SPRN_##h##SRR1,r10; \ + h##rfid; \ + b . /* prevent speculative execution */ +#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ + __EXCEPTION_PROLOG_PSERIES_1(label, h) + +#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ + EXCEPTION_PROLOG_1(area, extra, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label, h); + +#define __KVMTEST(n) \ + lbz r10,HSTATE_IN_GUEST(r13); \ + cmpwi r10,0; \ + bne do_kvm_##n + +#define __KVM_HANDLER(area, h, n) \ +do_kvm_##n: \ + ld r10,area+EX_R10(r13); \ + stw r9,HSTATE_SCRATCH1(r13); \ + ld r9,area+EX_R9(r13); \ + std r12,HSTATE_SCRATCH0(r13); \ + li r12,n; \ + b kvmppc_interrupt + +#define __KVM_HANDLER_SKIP(area, h, n) \ +do_kvm_##n: \ + cmpwi r10,KVM_GUEST_MODE_SKIP; \ + ld r10,area+EX_R10(r13); \ + beq 89f; \ + stw r9,HSTATE_SCRATCH1(r13); \ + ld r9,area+EX_R9(r13); \ + std r12,HSTATE_SCRATCH0(r13); \ + li r12,n; \ + b kvmppc_interrupt; \ +89: mtocrf 0x80,r9; \ + ld r9,area+EX_R9(r13); \ + b kvmppc_skip_##h##interrupt + +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#define KVMTEST(n) __KVMTEST(n) +#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n) +#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) + +#else +#define KVMTEST(n) +#define KVM_HANDLER(area, h, n) +#define KVM_HANDLER_SKIP(area, h, n) +#endif + +#ifdef CONFIG_KVM_BOOK3S_PR +#define KVMTEST_PR(n) __KVMTEST(n) +#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) +#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) + +#else +#define KVMTEST_PR(n) +#define KVM_HANDLER_PR(area, h, n) +#define KVM_HANDLER_PR_SKIP(area, h, n) +#endif + +#define NOTEST(n) + +/* + * The common exception prolog is used for all except a few exceptions + * such as a segment miss on a kernel address. We have to be prepared + * to take another exception from the point where we first touch the + * kernel stack onwards. + * + * On entry r13 points to the paca, r9-r13 are saved in the paca, + * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and + * SRR1, and relocation is on. + */ +#define EXCEPTION_PROLOG_COMMON(n, area) \ + andi. r10,r12,MSR_PR; /* See if coming from user */ \ + mr r10,r1; /* Save r1 */ \ + subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ + beq- 1f; \ + ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ +1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ + blt+ cr1,3f; /* abort if it is */ \ + li r1,(n); /* will be reloaded later */ \ + sth r1,PACA_TRAP_SAVE(r13); \ + std r3,area+EX_R3(r13); \ + addi r3,r13,area; /* r3 -> where regs are saved*/ \ + b bad_stack; \ +3: std r9,_CCR(r1); /* save CR in stackframe */ \ + std r11,_NIP(r1); /* save SRR0 in stackframe */ \ + std r12,_MSR(r1); /* save SRR1 in stackframe */ \ + std r10,0(r1); /* make stack chain pointer */ \ + std r0,GPR0(r1); /* save r0 in stackframe */ \ + std r10,GPR1(r1); /* save r1 in stackframe */ \ + ACCOUNT_CPU_USER_ENTRY(r9, r10); \ + std r2,GPR2(r1); /* save r2 in stackframe */ \ + SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ + SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ + ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ + ld r10,area+EX_R10(r13); \ + std r9,GPR9(r1); \ + std r10,GPR10(r1); \ + ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ + ld r10,area+EX_R12(r13); \ + ld r11,area+EX_R13(r13); \ + std r9,GPR11(r1); \ + std r10,GPR12(r1); \ + std r11,GPR13(r1); \ + BEGIN_FTR_SECTION_NESTED(66); \ + ld r10,area+EX_CFAR(r13); \ + std r10,ORIG_GPR3(r1); \ + END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ + mflr r9; /* save LR in stackframe */ \ + std r9,_LINK(r1); \ + mfctr r10; /* save CTR in stackframe */ \ + std r10,_CTR(r1); \ + lbz r10,PACASOFTIRQEN(r13); \ + mfspr r11,SPRN_XER; /* save XER in stackframe */ \ + std r10,SOFTE(r1); \ + std r11,_XER(r1); \ + li r9,(n)+1; \ + std r9,_TRAP(r1); /* set trap number */ \ + li r10,0; \ + ld r11,exception_marker@toc(r2); \ + std r10,RESULT(r1); /* clear regs->result */ \ + std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ + ACCOUNT_STOLEN_TIME + +/* + * Exception vectors. + */ +#define STD_EXCEPTION_PSERIES(loc, vec, label) \ + . = loc; \ + .globl label##_pSeries; \ +label##_pSeries: \ + HMT_MEDIUM; \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ + EXC_STD, KVMTEST_PR, vec) + +#define STD_EXCEPTION_HV(loc, vec, label) \ + . = loc; \ + .globl label##_hv; \ +label##_hv: \ + HMT_MEDIUM; \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ + EXC_HV, KVMTEST, vec) + +#define __SOFTEN_TEST(h) \ + lbz r10,PACASOFTIRQEN(r13); \ + cmpwi r10,0; \ + beq masked_##h##interrupt +#define _SOFTEN_TEST(h) __SOFTEN_TEST(h) + +#define SOFTEN_TEST_PR(vec) \ + KVMTEST_PR(vec); \ + _SOFTEN_TEST(EXC_STD) + +#define SOFTEN_TEST_HV(vec) \ + KVMTEST(vec); \ + _SOFTEN_TEST(EXC_HV) + +#define SOFTEN_TEST_HV_201(vec) \ + KVMTEST(vec); \ + _SOFTEN_TEST(EXC_STD) + +#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ + HMT_MEDIUM; \ + SET_SCRATCH0(r13); /* save r13 */ \ + __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label##_common, h); +#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ + __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) + +#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ + . = loc; \ + .globl label##_pSeries; \ +label##_pSeries: \ + _MASKABLE_EXCEPTION_PSERIES(vec, label, \ + EXC_STD, SOFTEN_TEST_PR) + +#define MASKABLE_EXCEPTION_HV(loc, vec, label) \ + . = loc; \ + .globl label##_hv; \ +label##_hv: \ + _MASKABLE_EXCEPTION_PSERIES(vec, label, \ + EXC_HV, SOFTEN_TEST_HV) + +#ifdef CONFIG_PPC_ISERIES +#define DISABLE_INTS \ + li r11,0; \ + stb r11,PACASOFTIRQEN(r13); \ +BEGIN_FW_FTR_SECTION; \ + stb r11,PACAHARDIRQEN(r13); \ +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ + TRACE_DISABLE_INTS; \ +BEGIN_FW_FTR_SECTION; \ + mfmsr r10; \ + ori r10,r10,MSR_EE; \ + mtmsrd r10,1; \ +END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) +#else +#define DISABLE_INTS \ + li r11,0; \ + stb r11,PACASOFTIRQEN(r13); \ + stb r11,PACAHARDIRQEN(r13); \ + TRACE_DISABLE_INTS +#endif /* CONFIG_PPC_ISERIES */ + +#define ENABLE_INTS \ + ld r12,_MSR(r1); \ + mfmsr r11; \ + rlwimi r11,r12,0,MSR_EE; \ + mtmsrd r11,1 + +#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + DISABLE_INTS; \ + bl .save_nvgprs; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except + +/* + * Like STD_EXCEPTION_COMMON, but for exceptions that can occur + * in the idle task and therefore need the special idle handling. + */ +#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + FINISH_NAP; \ + DISABLE_INTS; \ + bl .save_nvgprs; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except + +#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + FINISH_NAP; \ + DISABLE_INTS; \ +BEGIN_FTR_SECTION \ + bl .ppc64_runlatch_on; \ +END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except_lite + +/* + * When the idle code in power4_idle puts the CPU into NAP mode, + * it has to do so in a loop, and relies on the external interrupt + * and decrementer interrupt entry code to get it out of the loop. + * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags + * to signal that it is in the loop and needs help to get out. + */ +#ifdef CONFIG_PPC_970_NAP +#define FINISH_NAP \ +BEGIN_FTR_SECTION \ + clrrdi r11,r1,THREAD_SHIFT; \ + ld r9,TI_LOCAL_FLAGS(r11); \ + andi. r10,r9,_TLF_NAPPING; \ + bnel power4_fixup_nap; \ +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) +#else +#define FINISH_NAP +#endif + +#endif /* _ASM_POWERPC_EXCEPTION_H */ diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h new file mode 100644 index 00000000000..411af8d17a6 --- /dev/null +++ b/arch/powerpc/include/asm/fb.h @@ -0,0 +1,21 @@ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ + +#include <linux/fb.h> +#include <linux/fs.h> +#include <asm/page.h> + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/arch/powerpc/include/asm/fcntl.h b/arch/powerpc/include/asm/fcntl.h new file mode 100644 index 00000000000..ce5c4516d40 --- /dev/null +++ b/arch/powerpc/include/asm/fcntl.h @@ -0,0 +1,11 @@ +#ifndef _ASM_FCNTL_H +#define _ASM_FCNTL_H + +#define O_DIRECTORY 040000 /* must be a directory */ +#define O_NOFOLLOW 0100000 /* don't follow links */ +#define O_LARGEFILE 0200000 +#define O_DIRECT 0400000 /* direct disk access hint */ + +#include <asm-generic/fcntl.h> + +#endif /* _ASM_FCNTL_H */ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h new file mode 100644 index 00000000000..9a67a38bf7b --- /dev/null +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -0,0 +1,187 @@ +#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H +#define __ASM_POWERPC_FEATURE_FIXUPS_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * Feature section common macros + * + * Note that the entries now contain offsets between the table entry + * and the code rather than absolute code pointers in order to be + * useable with the vdso shared library. There is also an assumption + * that values will be negative, that is, the fixup table has to be + * located after the code it fixes up. + */ +#if defined(CONFIG_PPC64) && !defined(__powerpc64__) +/* 64 bits kernel, 32 bits code (ie. vdso32) */ +#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_OFFSET .long 0xffffffff; .long +#elif defined(CONFIG_PPC64) +#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_OFFSET .llong +#else +#define FTR_ENTRY_LONG .long +#define FTR_ENTRY_OFFSET .long +#endif + +#define START_FTR_SECTION(label) label##1: + +#define FTR_SECTION_ELSE_NESTED(label) \ +label##2: \ + .pushsection __ftr_alt_##label,"a"; \ + .align 2; \ +label##3: + +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +label##4: \ + .popsection; \ + .pushsection sect,"a"; \ + .align 3; \ +label##5: \ + FTR_ENTRY_LONG msk; \ + FTR_ENTRY_LONG val; \ + FTR_ENTRY_OFFSET label##1b-label##5b; \ + FTR_ENTRY_OFFSET label##2b-label##5b; \ + FTR_ENTRY_OFFSET label##3b-label##5b; \ + FTR_ENTRY_OFFSET label##4b-label##5b; \ + .ifgt (label##4b- label##3b)-(label##2b- label##1b); \ + .error "Feature section else case larger than body"; \ + .endif; \ + .popsection; + + +/* CPU feature dependent sections */ +#define BEGIN_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_FTR_SECTION START_FTR_SECTION(97) + +#define END_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) + +#define END_FTR_SECTION(msk, val) \ + END_FTR_SECTION_NESTED(msk, val, 97) + +#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) +#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) + +/* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ +#define FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) +#define ALT_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) +#define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_FTR_SECTION_END(msk, val) \ + ALT_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_FTR_SECTION_END_IFSET(msk) \ + ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_FTR_SECTION_END_IFCLR(msk) \ + ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + +/* MMU feature dependent sections */ +#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97) + +#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup) + +#define END_MMU_FTR_SECTION(msk, val) \ + END_MMU_FTR_SECTION_NESTED(msk, val, 97) + +#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk)) +#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0) + +/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ +#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) +#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97) +#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup) +#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_MMU_FTR_SECTION_END(msk, val) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \ + ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \ + ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + +/* Firmware feature dependent sections */ +#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) + +#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) + +#define END_FW_FTR_SECTION(msk, val) \ + END_FW_FTR_SECTION_NESTED(msk, val, 97) + +#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) +#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) + +/* Firmware feature sections with alternatives */ +#define FW_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) +#define FW_FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) +#define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) +#define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_FW_FTR_SECTION_END(msk, val) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_FW_FTR_SECTION_END_IFSET(msk) \ + ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ + ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + +#ifndef __ASSEMBLY__ + +#define ASM_FTR_IF(section_if, section_else, msk, val) \ + stringify_in_c(BEGIN_FTR_SECTION) \ + section_if "; " \ + stringify_in_c(FTR_SECTION_ELSE) \ + section_else "; " \ + stringify_in_c(ALT_FTR_SECTION_END((msk), (val))) + +#define ASM_FTR_IFSET(section_if, section_else, msk) \ + ASM_FTR_IF(section_if, section_else, (msk), (msk)) + +#define ASM_FTR_IFCLR(section_if, section_else, msk) \ + ASM_FTR_IF(section_if, section_else, (msk), 0) + +#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ + stringify_in_c(BEGIN_MMU_FTR_SECTION) \ + section_if "; " \ + stringify_in_c(MMU_FTR_SECTION_ELSE) \ + section_else "; " \ + stringify_in_c(ALT_MMU_FTR_SECTION_END((msk), (val))) + +#define ASM_MMU_FTR_IFSET(section_if, section_else, msk) \ + ASM_MMU_FTR_IF(section_if, section_else, (msk), (msk)) + +#define ASM_MMU_FTR_IFCLR(section_if, section_else, msk) \ + ASM_MMU_FTR_IF(section_if, section_else, (msk), 0) + +#endif /* __ASSEMBLY__ */ + +/* LWSYNC feature sections */ +#define START_LWSYNC_SECTION(label) label##1: +#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \ +label##2: \ + .pushsection sect,"a"; \ + .align 2; \ +label##3: \ + FTR_ENTRY_OFFSET label##1b-label##3b; \ + .popsection; + +#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h new file mode 100644 index 00000000000..14db29b18d0 --- /dev/null +++ b/arch/powerpc/include/asm/firmware.h @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * Modifications for ppc64: + * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef __ASM_POWERPC_FIRMWARE_H +#define __ASM_POWERPC_FIRMWARE_H + +#ifdef __KERNEL__ + +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> + +/* firmware feature bitmask values */ +#define FIRMWARE_MAX_FEATURES 63 + +#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001) +#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002) +#define FW_FEATURE_SPRG0 ASM_CONST(0x0000000000000004) +#define FW_FEATURE_DABR ASM_CONST(0x0000000000000008) +#define FW_FEATURE_COPY ASM_CONST(0x0000000000000010) +#define FW_FEATURE_ASR ASM_CONST(0x0000000000000020) +#define FW_FEATURE_DEBUG ASM_CONST(0x0000000000000040) +#define FW_FEATURE_TERM ASM_CONST(0x0000000000000080) +#define FW_FEATURE_PERF ASM_CONST(0x0000000000000100) +#define FW_FEATURE_DUMP ASM_CONST(0x0000000000000200) +#define FW_FEATURE_INTERRUPT ASM_CONST(0x0000000000000400) +#define FW_FEATURE_MIGRATE ASM_CONST(0x0000000000000800) +#define FW_FEATURE_PERFMON ASM_CONST(0x0000000000001000) +#define FW_FEATURE_CRQ ASM_CONST(0x0000000000002000) +#define FW_FEATURE_VIO ASM_CONST(0x0000000000004000) +#define FW_FEATURE_RDMA ASM_CONST(0x0000000000008000) +#define FW_FEATURE_LLAN ASM_CONST(0x0000000000010000) +#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000000020000) +#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000) +#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000) +#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000) +#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000) +#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000) +#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) +#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) +#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) +#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) +#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) +#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000) +#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000) + +#ifndef __ASSEMBLY__ + +enum { +#ifdef CONFIG_PPC64 + FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE | + FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY | + FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM | + FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT | + FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ | + FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | + FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | + FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | + FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, + FW_FEATURE_PSERIES_ALWAYS = 0, + FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, + FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, + FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, + FW_FEATURE_POWERNV_ALWAYS = 0, + FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, + FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, + FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT, + FW_FEATURE_CELLEB_ALWAYS = 0, + FW_FEATURE_NATIVE_POSSIBLE = 0, + FW_FEATURE_NATIVE_ALWAYS = 0, + FW_FEATURE_POSSIBLE = +#ifdef CONFIG_PPC_PSERIES + FW_FEATURE_PSERIES_POSSIBLE | +#endif +#ifdef CONFIG_PPC_ISERIES + FW_FEATURE_ISERIES_POSSIBLE | +#endif +#ifdef CONFIG_PPC_POWERNV + FW_FEATURE_POWERNV_POSSIBLE | +#endif +#ifdef CONFIG_PPC_PS3 + FW_FEATURE_PS3_POSSIBLE | +#endif +#ifdef CONFIG_PPC_CELLEB + FW_FEATURE_CELLEB_POSSIBLE | +#endif +#ifdef CONFIG_PPC_NATIVE + FW_FEATURE_NATIVE_ALWAYS | +#endif + 0, + FW_FEATURE_ALWAYS = +#ifdef CONFIG_PPC_PSERIES + FW_FEATURE_PSERIES_ALWAYS & +#endif +#ifdef CONFIG_PPC_ISERIES + FW_FEATURE_ISERIES_ALWAYS & +#endif +#ifdef CONFIG_PPC_POWERNV + FW_FEATURE_POWERNV_ALWAYS & +#endif +#ifdef CONFIG_PPC_PS3 + FW_FEATURE_PS3_ALWAYS & +#endif +#ifdef CONFIG_PPC_CELLEB + FW_FEATURE_CELLEB_ALWAYS & +#endif +#ifdef CONFIG_PPC_NATIVE + FW_FEATURE_NATIVE_ALWAYS & +#endif + FW_FEATURE_POSSIBLE, + +#else /* CONFIG_PPC64 */ + FW_FEATURE_POSSIBLE = 0, + FW_FEATURE_ALWAYS = 0, +#endif +}; + +/* This is used to identify firmware features which are available + * to the kernel. + */ +extern unsigned long powerpc_firmware_features; + +#define firmware_has_feature(feature) \ + ((FW_FEATURE_ALWAYS & (feature)) || \ + (FW_FEATURE_POSSIBLE & powerpc_firmware_features & (feature))) + +extern void system_reset_fwnmi(void); +extern void machine_check_fwnmi(void); + +/* This is true if we are using the firmware NMI handler (typically LPAR) */ +extern int fwnmi_active; + +extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* __ASM_POWERPC_FIRMWARE_H */ diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h new file mode 100644 index 00000000000..5c2c0233175 --- /dev/null +++ b/arch/powerpc/include/asm/fixmap.h @@ -0,0 +1,109 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Copyright 2008 Freescale Semiconductor Inc. + * Port to powerpc added by Kumar Gala + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#ifndef __ASSEMBLY__ +#include <linux/kernel.h> +#include <asm/page.h> +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ +enum fixed_addresses { + FIX_HOLE, + /* reserve the top 128K for early debugging purposes */ + FIX_EARLY_DEBUG_TOP = FIX_HOLE, + FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+((128*1024)/PAGE_SIZE)-1, +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, +#endif + /* FIX_PCIE_MCFG, */ + __end_of_fixed_addresses +}; + +extern void __set_fixmap (enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NCG) + +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, __pgprot(0)) + +#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ + if (idx >= __end_of_fixed_addresses) + __this_fixmap_does_not_exist(); + + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif /* !__ASSEMBLY__ */ +#endif diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h new file mode 100644 index 00000000000..936a904ae78 --- /dev/null +++ b/arch/powerpc/include/asm/floppy.h @@ -0,0 +1,213 @@ +/* + * Architecture specific parts of the Floppy driver + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 + */ +#ifndef __ASM_POWERPC_FLOPPY_H +#define __ASM_POWERPC_FLOPPY_H +#ifdef __KERNEL__ + +#include <asm/machdep.h> + +#define fd_inb(port) inb_p(port) +#define fd_outb(value,port) outb_p(value,port) + +#define fd_enable_dma() enable_dma(FLOPPY_DMA) +#define fd_disable_dma() fd_ops->_disable_dma(FLOPPY_DMA) +#define fd_free_dma() fd_ops->_free_dma(FLOPPY_DMA) +#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) +#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode) +#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count) +#define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA) +#define fd_enable_irq() enable_irq(FLOPPY_IRQ) +#define fd_disable_irq() disable_irq(FLOPPY_IRQ) +#define fd_cacheflush(addr,size) /* nothing */ +#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); + +#include <linux/pci.h> +#include <asm/ppc-pci.h> /* for isa_bridge_pcidev */ + +#define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io) + +static int fd_request_dma(void); + +struct fd_dma_ops { + void (*_disable_dma)(unsigned int dmanr); + void (*_free_dma)(unsigned int dmanr); + int (*_get_dma_residue)(unsigned int dummy); + int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); +}; + +static int virtual_dma_count; +static int virtual_dma_residue; +static char *virtual_dma_addr; +static int virtual_dma_mode; +static int doing_vdma; +static struct fd_dma_ops *fd_ops; + +static irqreturn_t floppy_hardint(int irq, void *dev_id) +{ + unsigned char st; + int lcount; + char *lptr; + + if (!doing_vdma) + return floppy_interrupt(irq, dev_id); + + + st = 1; + for (lcount=virtual_dma_count, lptr=virtual_dma_addr; + lcount; lcount--, lptr++) { + st=inb(virtual_dma_port+4) & 0xa0 ; + if (st != 0xa0) + break; + if (virtual_dma_mode) + outb_p(*lptr, virtual_dma_port+5); + else + *lptr = inb_p(virtual_dma_port+5); + } + virtual_dma_count = lcount; + virtual_dma_addr = lptr; + st = inb(virtual_dma_port+4); + + if (st == 0x20) + return IRQ_HANDLED; + if (!(st & 0x20)) { + virtual_dma_residue += virtual_dma_count; + virtual_dma_count=0; + doing_vdma = 0; + floppy_interrupt(irq, dev_id); + return IRQ_HANDLED; + } + return IRQ_HANDLED; +} + +static void vdma_disable_dma(unsigned int dummy) +{ + doing_vdma = 0; + virtual_dma_residue += virtual_dma_count; + virtual_dma_count=0; +} + +static void vdma_nop(unsigned int dummy) +{ +} + + +static int vdma_get_dma_residue(unsigned int dummy) +{ + return virtual_dma_count + virtual_dma_residue; +} + + +static int fd_request_irq(void) +{ + if (can_use_virtual_dma) + return request_irq(FLOPPY_IRQ, floppy_hardint, + 0, "floppy", NULL); + else + return request_irq(FLOPPY_IRQ, floppy_interrupt, + 0, "floppy", NULL); +} + +static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) +{ + doing_vdma = 1; + virtual_dma_port = io; + virtual_dma_mode = (mode == DMA_MODE_WRITE); + virtual_dma_addr = addr; + virtual_dma_count = size; + virtual_dma_residue = 0; + return 0; +} + +static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) +{ + static unsigned long prev_size; + static dma_addr_t bus_addr = 0; + static char *prev_addr; + static int prev_dir; + int dir; + + doing_vdma = 0; + dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; + + if (bus_addr + && (addr != prev_addr || size != prev_size || dir != prev_dir)) { + /* different from last time -- unmap prev */ + pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir); + bus_addr = 0; + } + + if (!bus_addr) /* need to map it */ + bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir); + + /* remember this one as prev */ + prev_addr = addr; + prev_size = size; + prev_dir = dir; + + fd_clear_dma_ff(); + fd_cacheflush(addr, size); + fd_set_dma_mode(mode); + set_dma_addr(FLOPPY_DMA, bus_addr); + fd_set_dma_count(size); + virtual_dma_port = io; + fd_enable_dma(); + + return 0; +} + +static struct fd_dma_ops real_dma_ops = +{ + ._disable_dma = disable_dma, + ._free_dma = free_dma, + ._get_dma_residue = get_dma_residue, + ._dma_setup = hard_dma_setup +}; + +static struct fd_dma_ops virt_dma_ops = +{ + ._disable_dma = vdma_disable_dma, + ._free_dma = vdma_nop, + ._get_dma_residue = vdma_get_dma_residue, + ._dma_setup = vdma_dma_setup +}; + +static int fd_request_dma(void) +{ + if (can_use_virtual_dma & 1) { + fd_ops = &virt_dma_ops; + return 0; + } + else { + fd_ops = &real_dma_ops; + return request_dma(FLOPPY_DMA, "floppy"); + } +} + +static int FDC1 = 0x3f0; +static int FDC2 = -1; + +/* + * Again, the CMOS information not available + */ +#define FLOPPY0_TYPE 6 +#define FLOPPY1_TYPE 0 + +#define N_FDC 2 /* Don't change this! */ +#define N_DRIVE 8 + +/* + * The PowerPC has no problems with floppy DMA crossing 64k borders. + */ +#define CROSS_64KB(a,s) (0) + +#define EXTRA_FLOPPY_PARAMS + +#endif /* __KERNEL__ */ +#endif /* __ASM_POWERPC_FLOPPY_H */ diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h new file mode 100644 index 00000000000..9361cd5342c --- /dev/null +++ b/arch/powerpc/include/asm/fs_pd.h @@ -0,0 +1,50 @@ +/* + * Platform information definitions. + * + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef FS_PD_H +#define FS_PD_H +#include <sysdev/fsl_soc.h> +#include <asm/time.h> + +#ifdef CONFIG_CPM2 +#include <asm/cpm2.h> + +#if defined(CONFIG_8260) +#include <asm/mpc8260.h> +#endif + +#define cpm2_map(member) (&cpm2_immr->member) +#define cpm2_map_size(member, size) (&cpm2_immr->member) +#define cpm2_unmap(addr) do {} while(0) +#endif + +#ifdef CONFIG_8xx +#include <asm/8xx_immap.h> +#include <asm/mpc8xx.h> + +extern immap_t __iomem *mpc8xx_immr; + +#define immr_map(member) (&mpc8xx_immr->member) +#define immr_map_size(member, size) (&mpc8xx_immr->member) +#define immr_unmap(addr) do {} while (0) +#endif + +static inline int uart_baudrate(void) +{ + return get_baudrate(); +} + +static inline int uart_clock(void) +{ + return ppc_proc_freq; +} + +#endif diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h new file mode 100644 index 00000000000..2af2bdc37b2 --- /dev/null +++ b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h @@ -0,0 +1,48 @@ +/* + * Copyright 2009 Freescale Semiconductor, Inc. + * + * Cache SRAM handling for QorIQ platform + * + * Author: Vivek Mahajan <vivek.mahajan@freescale.com> + + * This file is derived from the original work done + * by Sylvain Munaut for the Bestcomm SRAM allocator. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ +#define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ + +#include <asm/rheap.h> +#include <linux/spinlock.h> + +/* + * Cache-SRAM + */ + +struct mpc85xx_cache_sram { + phys_addr_t base_phys; + void *base_virt; + unsigned int size; + rh_info_t *rh; + spinlock_t lock; +}; + +extern void mpc85xx_cache_sram_free(void *ptr); +extern void *mpc85xx_cache_sram_alloc(unsigned int size, + phys_addr_t *phys, unsigned int align); + +#endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */ diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h new file mode 100644 index 00000000000..8e8c9b5032d --- /dev/null +++ b/arch/powerpc/include/asm/fsl_gtm.h @@ -0,0 +1,47 @@ +/* + * Freescale General-purpose Timers Module + * + * Copyright (c) Freescale Semicondutor, Inc. 2006. + * Shlomi Gridish <gridish@freescale.com> + * Jerry Huang <Chang-Ming.Huang@freescale.com> + * Copyright (c) MontaVista Software, Inc. 2008. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __ASM_FSL_GTM_H +#define __ASM_FSL_GTM_H + +#include <linux/types.h> + +struct gtm; + +struct gtm_timer { + unsigned int irq; + + struct gtm *gtm; + bool requested; + u8 __iomem *gtcfr; + __be16 __iomem *gtmdr; + __be16 __iomem *gtpsr; + __be16 __iomem *gtcnr; + __be16 __iomem *gtrfr; + __be16 __iomem *gtevr; +}; + +extern struct gtm_timer *gtm_get_timer16(void); +extern struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm, + unsigned int timer); +extern void gtm_put_timer16(struct gtm_timer *tmr); +extern int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, + bool reload); +extern int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec, + bool reload); +extern void gtm_stop_timer16(struct gtm_timer *tmr); +extern void gtm_ack_timer16(struct gtm_timer *tmr, u16 events); + +#endif /* __ASM_FSL_GTM_H */ diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h new file mode 100644 index 00000000000..bebd12463ec --- /dev/null +++ b/arch/powerpc/include/asm/fsl_guts.h @@ -0,0 +1,197 @@ +/** + * Freecale 85xx and 86xx Global Utilties register set + * + * Authors: Jeff Brown + * Timur Tabi <timur@freescale.com> + * + * Copyright 2004,2007 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __ASM_POWERPC_FSL_GUTS_H__ +#define __ASM_POWERPC_FSL_GUTS_H__ +#ifdef __KERNEL__ + +/* + * These #ifdefs are safe because it's not possible to build a kernel that + * runs on e500 and e600 cores. + */ + +#if !defined(CONFIG_PPC_85xx) && !defined(CONFIG_PPC_86xx) +#error Only 85xx and 86xx SOCs are supported +#endif + +/** + * Global Utility Registers. + * + * Not all registers defined in this structure are available on all chips, so + * you are expected to know whether a given register actually exists on your + * chip before you access it. + * + * Also, some registers are similar on different chips but have slightly + * different names. In these cases, one name is chosen to avoid extraneous + * #ifdefs. + */ +#ifdef CONFIG_PPC_85xx +struct ccsr_guts_85xx { +#else +struct ccsr_guts_86xx { +#endif + __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ + __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; + __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u8 res024[0x30 - 0x24]; + __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; + __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u8 res044[0x50 - 0x44]; + __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u8 res054[0x60 - 0x54]; + __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ + __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ + __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; + __be32 devdisr; /* 0x.0070 - Device Disable Control */ + __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; + __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ + __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ + __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ + __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ + __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ + __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ + __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ + __be32 autorstsr; /* 0x.009c - Automatic reset status register */ + __be32 pvr; /* 0x.00a0 - Processor Version Register */ + __be32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; + __be32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; +#ifdef CONFIG_PPC_85xx + __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register */ +#else + __be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */ +#endif + u8 res0c4[0x224 - 0xc4]; + __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x800 - 0x22c]; + __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; + __be32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; + __be32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; + __be32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; + __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; + __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; + __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; + __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u8 rese28[0xf04 - 0xe28]; + __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; + __be32 itcr; /* 0x.0f2c - Internal transaction control register */ + u8 resf30[0xf40 - 0xf30]; + __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ +} __attribute__ ((packed)); + +#ifdef CONFIG_PPC_86xx + +#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ +#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ + +/* + * Set the DMACR register in the GUTS + * + * The DMACR register determines the source of initiated transfers for each + * channel on each DMA controller. Rather than have a bunch of repetitive + * macros for the bit patterns, we just have a function that calculates + * them. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) + */ +static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts, + unsigned int co, unsigned int ch, unsigned int device) +{ + unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); + + clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); +} + +#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 +#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ +#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ +#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ +#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ +#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ +#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 +#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 +#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 +#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 + +/* + * Set the DMA external control bits in the GUTS + * + * The DMA external control bits in the PMUXCR are only meaningful for + * channels 0 and 3. Any other channels are ignored. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * value: the new value for the bit (0 or 1) + */ +static inline void guts_set_pmuxcr_dma(struct ccsr_guts_86xx __iomem *guts, + unsigned int co, unsigned int ch, unsigned int value) +{ + if ((ch == 0) || (ch == 3)) { + unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; + + clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); + } +} + +#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 +#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 +#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ + (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) +#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 +#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 +#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) +#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF +#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) + +#endif + +#endif +#endif diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h new file mode 100644 index 00000000000..922d9b5fe3d --- /dev/null +++ b/arch/powerpc/include/asm/fsl_hcalls.h @@ -0,0 +1,655 @@ +/* + * Freescale hypervisor call interface + * + * Copyright 2008-2010 Freescale Semiconductor, Inc. + * + * Author: Timur Tabi <timur@freescale.com> + * + * This file is provided under a dual BSD/GPL license. When using or + * redistributing this file, you may do so under either license. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _FSL_HCALLS_H +#define _FSL_HCALLS_H + +#include <linux/types.h> +#include <linux/errno.h> +#include <asm/byteorder.h> +#include <asm/epapr_hcalls.h> + +#define FH_API_VERSION 1 + +#define FH_ERR_GET_INFO 1 +#define FH_PARTITION_GET_DTPROP 2 +#define FH_PARTITION_SET_DTPROP 3 +#define FH_PARTITION_RESTART 4 +#define FH_PARTITION_GET_STATUS 5 +#define FH_PARTITION_START 6 +#define FH_PARTITION_STOP 7 +#define FH_PARTITION_MEMCPY 8 +#define FH_DMA_ENABLE 9 +#define FH_DMA_DISABLE 10 +#define FH_SEND_NMI 11 +#define FH_VMPIC_GET_MSIR 12 +#define FH_SYSTEM_RESET 13 +#define FH_GET_CORE_STATE 14 +#define FH_ENTER_NAP 15 +#define FH_EXIT_NAP 16 +#define FH_CLAIM_DEVICE 17 +#define FH_PARTITION_STOP_DMA 18 + +/* vendor ID: Freescale Semiconductor */ +#define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num) + +/* + * We use "uintptr_t" to define a register because it's guaranteed to be a + * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit + * platform. + * + * All registers are either input/output or output only. Registers that are + * initialized before making the hypercall are input/output. All + * input/output registers are represented with "+r". Output-only registers + * are represented with "=r". Do not specify any unused registers. The + * clobber list will tell the compiler that the hypercall modifies those + * registers, which is good enough. + */ + +/** + * fh_send_nmi - send NMI to virtual cpu(s). + * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask. + * + * Returns 0 for success, or EINVAL for invalid vcpu_mask. + */ +static inline unsigned int fh_send_nmi(unsigned int vcpu_mask) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_SEND_NMI); + r3 = vcpu_mask; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +/* Arbitrary limits to avoid excessive memory allocation in hypervisor */ +#define FH_DTPROP_MAX_PATHLEN 4096 +#define FH_DTPROP_MAX_PROPLEN 32768 + +/** + * fh_partiton_get_dtprop - get a property from a guest device tree. + * @handle: handle of partition whose device tree is to be accessed + * @dtpath_addr: physical address of device tree path to access + * @propname_addr: physical address of name of property + * @propvalue_addr: physical address of property value buffer + * @propvalue_len: length of buffer on entry, length of property on return + * + * Returns zero on success, non-zero on error. + */ +static inline unsigned int fh_partition_get_dtprop(int handle, + uint64_t dtpath_addr, + uint64_t propname_addr, + uint64_t propvalue_addr, + uint32_t *propvalue_len) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + register uintptr_t r7 __asm__("r7"); + register uintptr_t r8 __asm__("r8"); + register uintptr_t r9 __asm__("r9"); + register uintptr_t r10 __asm__("r10"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP); + r3 = handle; + +#ifdef CONFIG_PHYS_64BIT + r4 = dtpath_addr >> 32; + r6 = propname_addr >> 32; + r8 = propvalue_addr >> 32; +#else + r4 = 0; + r6 = 0; + r8 = 0; +#endif + r5 = (uint32_t)dtpath_addr; + r7 = (uint32_t)propname_addr; + r9 = (uint32_t)propvalue_addr; + r10 = *propvalue_len; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), + "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), + "+r" (r8), "+r" (r9), "+r" (r10) + : : EV_HCALL_CLOBBERS8 + ); + + *propvalue_len = r4; + return r3; +} + +/** + * Set a property in a guest device tree. + * @handle: handle of partition whose device tree is to be accessed + * @dtpath_addr: physical address of device tree path to access + * @propname_addr: physical address of name of property + * @propvalue_addr: physical address of property value + * @propvalue_len: length of property + * + * Returns zero on success, non-zero on error. + */ +static inline unsigned int fh_partition_set_dtprop(int handle, + uint64_t dtpath_addr, + uint64_t propname_addr, + uint64_t propvalue_addr, + uint32_t propvalue_len) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r6 __asm__("r6"); + register uintptr_t r8 __asm__("r8"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r7 __asm__("r7"); + register uintptr_t r9 __asm__("r9"); + register uintptr_t r10 __asm__("r10"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP); + r3 = handle; + +#ifdef CONFIG_PHYS_64BIT + r4 = dtpath_addr >> 32; + r6 = propname_addr >> 32; + r8 = propvalue_addr >> 32; +#else + r4 = 0; + r6 = 0; + r8 = 0; +#endif + r5 = (uint32_t)dtpath_addr; + r7 = (uint32_t)propname_addr; + r9 = (uint32_t)propvalue_addr; + r10 = propvalue_len; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), + "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), + "+r" (r8), "+r" (r9), "+r" (r10) + : : EV_HCALL_CLOBBERS8 + ); + + return r3; +} + +/** + * fh_partition_restart - reboot the current partition + * @partition: partition ID + * + * Returns an error code if reboot failed. Does not return if it succeeds. + */ +static inline unsigned int fh_partition_restart(unsigned int partition) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART); + r3 = partition; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +#define FH_PARTITION_STOPPED 0 +#define FH_PARTITION_RUNNING 1 +#define FH_PARTITION_STARTING 2 +#define FH_PARTITION_STOPPING 3 +#define FH_PARTITION_PAUSING 4 +#define FH_PARTITION_PAUSED 5 +#define FH_PARTITION_RESUMING 6 + +/** + * fh_partition_get_status - gets the status of a partition + * @partition: partition ID + * @status: returned status code + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_partition_get_status(unsigned int partition, + unsigned int *status) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS); + r3 = partition; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "=r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + *status = r4; + + return r3; +} + +/** + * fh_partition_start - boots and starts execution of the specified partition + * @partition: partition ID + * @entry_point: guest physical address to start execution + * + * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot + * time, guest physical address are the same as guest virtual addresses. + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_partition_start(unsigned int partition, + uint32_t entry_point, int load) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_START); + r3 = partition; + r4 = entry_point; + r5 = load; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5) + : : EV_HCALL_CLOBBERS3 + ); + + return r3; +} + +/** + * fh_partition_stop - stops another partition + * @partition: partition ID + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_partition_stop(unsigned int partition) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP); + r3 = partition; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +/** + * struct fh_sg_list: definition of the fh_partition_memcpy S/G list + * @source: guest physical address to copy from + * @target: guest physical address to copy to + * @size: number of bytes to copy + * @reserved: reserved, must be zero + * + * The scatter/gather list for fh_partition_memcpy() is an array of these + * structures. The array must be guest physically contiguous. + * + * This structure must be aligned on 32-byte boundary, so that no single + * strucuture can span two pages. + */ +struct fh_sg_list { + uint64_t source; /**< guest physical address to copy from */ + uint64_t target; /**< guest physical address to copy to */ + uint64_t size; /**< number of bytes to copy */ + uint64_t reserved; /**< reserved, must be zero */ +} __attribute__ ((aligned(32))); + +/** + * fh_partition_memcpy - copies data from one guest to another + * @source: the ID of the partition to copy from + * @target: the ID of the partition to copy to + * @sg_list: guest physical address of an array of &fh_sg_list structures + * @count: the number of entries in @sg_list + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_partition_memcpy(unsigned int source, + unsigned int target, phys_addr_t sg_list, unsigned int count) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + register uintptr_t r7 __asm__("r7"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY); + r3 = source; + r4 = target; + r5 = (uint32_t) sg_list; + +#ifdef CONFIG_PHYS_64BIT + r6 = sg_list >> 32; +#else + r6 = 0; +#endif + r7 = count; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), + "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7) + : : EV_HCALL_CLOBBERS5 + ); + + return r3; +} + +/** + * fh_dma_enable - enable DMA for the specified device + * @liodn: the LIODN of the I/O device for which to enable DMA + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_dma_enable(unsigned int liodn) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE); + r3 = liodn; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +/** + * fh_dma_disable - disable DMA for the specified device + * @liodn: the LIODN of the I/O device for which to disable DMA + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_dma_disable(unsigned int liodn) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE); + r3 = liodn; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + + +/** + * fh_vmpic_get_msir - returns the MPIC-MSI register value + * @interrupt: the interrupt number + * @msir_val: returned MPIC-MSI register value + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt, + unsigned int *msir_val) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR); + r3 = interrupt; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "=r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + *msir_val = r4; + + return r3; +} + +/** + * fh_system_reset - reset the system + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_system_reset(void) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET); + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "=r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + + +/** + * fh_err_get_info - get platform error information + * @queue id: + * 0 for guest error event queue + * 1 for global error event queue + * + * @pointer to store the platform error data: + * platform error data is returned in registers r4 - r11 + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize, + uint32_t addr_hi, uint32_t addr_lo, int peek) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + register uintptr_t r5 __asm__("r5"); + register uintptr_t r6 __asm__("r6"); + register uintptr_t r7 __asm__("r7"); + + r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO); + r3 = queue; + r4 = *bufsize; + r5 = addr_hi; + r6 = addr_lo; + r7 = peek; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), + "+r" (r7) + : : EV_HCALL_CLOBBERS5 + ); + + *bufsize = r4; + + return r3; +} + + +#define FH_VCPU_RUN 0 +#define FH_VCPU_IDLE 1 +#define FH_VCPU_NAP 2 + +/** + * fh_get_core_state - get the state of a vcpu + * + * @handle: handle of partition containing the vcpu + * @vcpu: vcpu number within the partition + * @state:the current state of the vcpu, see FH_VCPU_* + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_get_core_state(unsigned int handle, + unsigned int vcpu, unsigned int *state) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE); + r3 = handle; + r4 = vcpu; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + *state = r4; + return r3; +} + +/** + * fh_enter_nap - enter nap on a vcpu + * + * Note that though the API supports entering nap on a vcpu other + * than the caller, this may not be implmented and may return EINVAL. + * + * @handle: handle of partition containing the vcpu + * @vcpu: vcpu number within the partition + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = FH_HCALL_TOKEN(FH_ENTER_NAP); + r3 = handle; + r4 = vcpu; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + return r3; +} + +/** + * fh_exit_nap - exit nap on a vcpu + * @handle: handle of partition containing the vcpu + * @vcpu: vcpu number within the partition + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + register uintptr_t r4 __asm__("r4"); + + r11 = FH_HCALL_TOKEN(FH_EXIT_NAP); + r3 = handle; + r4 = vcpu; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3), "+r" (r4) + : : EV_HCALL_CLOBBERS2 + ); + + return r3; +} +/** + * fh_claim_device - claim a "claimable" shared device + * @handle: fsl,hv-device-handle of node to claim + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_claim_device(unsigned int handle) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE); + r3 = handle; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} + +/** + * Run deferred DMA disabling on a partition's private devices + * + * This applies to devices which a partition owns either privately, + * or which are claimable and still actively owned by that partition, + * and which do not have the no-dma-disable property. + * + * @handle: partition (must be stopped) whose DMA is to be disabled + * + * Returns 0 for success, or an error code. + */ +static inline unsigned int fh_partition_stop_dma(unsigned int handle) +{ + register uintptr_t r11 __asm__("r11"); + register uintptr_t r3 __asm__("r3"); + + r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA); + r3 = handle; + + __asm__ __volatile__ ("sc 1" + : "+r" (r11), "+r" (r3) + : : EV_HCALL_CLOBBERS1 + ); + + return r3; +} +#endif diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h new file mode 100644 index 00000000000..b955012939a --- /dev/null +++ b/arch/powerpc/include/asm/fsl_ifc.h @@ -0,0 +1,834 @@ +/* Freescale Integrated Flash Controller + * + * Copyright 2011 Freescale Semiconductor, Inc + * + * Author: Dipen Dudhat <dipen.dudhat@freescale.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_FSL_IFC_H +#define __ASM_FSL_IFC_H + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/io.h> + +#include <linux/of_platform.h> +#include <linux/interrupt.h> + +#define FSL_IFC_BANK_COUNT 4 + +/* + * CSPR - Chip Select Property Register + */ +#define CSPR_BA 0xFFFF0000 +#define CSPR_BA_SHIFT 16 +#define CSPR_PORT_SIZE 0x00000180 +#define CSPR_PORT_SIZE_SHIFT 7 +/* Port Size 8 bit */ +#define CSPR_PORT_SIZE_8 0x00000080 +/* Port Size 16 bit */ +#define CSPR_PORT_SIZE_16 0x00000100 +/* Port Size 32 bit */ +#define CSPR_PORT_SIZE_32 0x00000180 +/* Write Protect */ +#define CSPR_WP 0x00000040 +#define CSPR_WP_SHIFT 6 +/* Machine Select */ +#define CSPR_MSEL 0x00000006 +#define CSPR_MSEL_SHIFT 1 +/* NOR */ +#define CSPR_MSEL_NOR 0x00000000 +/* NAND */ +#define CSPR_MSEL_NAND 0x00000002 +/* GPCM */ +#define CSPR_MSEL_GPCM 0x00000004 +/* Bank Valid */ +#define CSPR_V 0x00000001 +#define CSPR_V_SHIFT 0 + +/* + * Address Mask Register + */ +#define IFC_AMASK_MASK 0xFFFF0000 +#define IFC_AMASK_SHIFT 16 +#define IFC_AMASK(n) (IFC_AMASK_MASK << \ + (__ilog2(n) - IFC_AMASK_SHIFT)) + +/* + * Chip Select Option Register IFC_NAND Machine + */ +/* Enable ECC Encoder */ +#define CSOR_NAND_ECC_ENC_EN 0x80000000 +#define CSOR_NAND_ECC_MODE_MASK 0x30000000 +/* 4 bit correction per 520 Byte sector */ +#define CSOR_NAND_ECC_MODE_4 0x00000000 +/* 8 bit correction per 528 Byte sector */ +#define CSOR_NAND_ECC_MODE_8 0x10000000 +/* Enable ECC Decoder */ +#define CSOR_NAND_ECC_DEC_EN 0x04000000 +/* Row Address Length */ +#define CSOR_NAND_RAL_MASK 0x01800000 +#define CSOR_NAND_RAL_SHIFT 20 +#define CSOR_NAND_RAL_1 0x00000000 +#define CSOR_NAND_RAL_2 0x00800000 +#define CSOR_NAND_RAL_3 0x01000000 +#define CSOR_NAND_RAL_4 0x01800000 +/* Page Size 512b, 2k, 4k */ +#define CSOR_NAND_PGS_MASK 0x00180000 +#define CSOR_NAND_PGS_SHIFT 16 +#define CSOR_NAND_PGS_512 0x00000000 +#define CSOR_NAND_PGS_2K 0x00080000 +#define CSOR_NAND_PGS_4K 0x00100000 +/* Spare region Size */ +#define CSOR_NAND_SPRZ_MASK 0x0000E000 +#define CSOR_NAND_SPRZ_SHIFT 13 +#define CSOR_NAND_SPRZ_16 0x00000000 +#define CSOR_NAND_SPRZ_64 0x00002000 +#define CSOR_NAND_SPRZ_128 0x00004000 +#define CSOR_NAND_SPRZ_210 0x00006000 +#define CSOR_NAND_SPRZ_218 0x00008000 +#define CSOR_NAND_SPRZ_224 0x0000A000 +/* Pages Per Block */ +#define CSOR_NAND_PB_MASK 0x00000700 +#define CSOR_NAND_PB_SHIFT 8 +#define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT) +/* Time for Read Enable High to Output High Impedance */ +#define CSOR_NAND_TRHZ_MASK 0x0000001C +#define CSOR_NAND_TRHZ_SHIFT 2 +#define CSOR_NAND_TRHZ_20 0x00000000 +#define CSOR_NAND_TRHZ_40 0x00000004 +#define CSOR_NAND_TRHZ_60 0x00000008 +#define CSOR_NAND_TRHZ_80 0x0000000C +#define CSOR_NAND_TRHZ_100 0x00000010 +/* Buffer control disable */ +#define CSOR_NAND_BCTLD 0x00000001 + +/* + * Chip Select Option Register - NOR Flash Mode + */ +/* Enable Address shift Mode */ +#define CSOR_NOR_ADM_SHFT_MODE_EN 0x80000000 +/* Page Read Enable from NOR device */ +#define CSOR_NOR_PGRD_EN 0x10000000 +/* AVD Toggle Enable during Burst Program */ +#define CSOR_NOR_AVD_TGL_PGM_EN 0x01000000 +/* Address Data Multiplexing Shift */ +#define CSOR_NOR_ADM_MASK 0x0003E000 +#define CSOR_NOR_ADM_SHIFT_SHIFT 13 +#define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT) +/* Type of the NOR device hooked */ +#define CSOR_NOR_NOR_MODE_AYSNC_NOR 0x00000000 +#define CSOR_NOR_NOR_MODE_AVD_NOR 0x00000020 +/* Time for Read Enable High to Output High Impedance */ +#define CSOR_NOR_TRHZ_MASK 0x0000001C +#define CSOR_NOR_TRHZ_SHIFT 2 +#define CSOR_NOR_TRHZ_20 0x00000000 +#define CSOR_NOR_TRHZ_40 0x00000004 +#define CSOR_NOR_TRHZ_60 0x00000008 +#define CSOR_NOR_TRHZ_80 0x0000000C +#define CSOR_NOR_TRHZ_100 0x00000010 +/* Buffer control disable */ +#define CSOR_NOR_BCTLD 0x00000001 + +/* + * Chip Select Option Register - GPCM Mode + */ +/* GPCM Mode - Normal */ +#define CSOR_GPCM_GPMODE_NORMAL 0x00000000 +/* GPCM Mode - GenericASIC */ +#define CSOR_GPCM_GPMODE_ASIC 0x80000000 +/* Parity Mode odd/even */ +#define CSOR_GPCM_PARITY_EVEN 0x40000000 +/* Parity Checking enable/disable */ +#define CSOR_GPCM_PAR_EN 0x20000000 +/* GPCM Timeout Count */ +#define CSOR_GPCM_GPTO_MASK 0x0F000000 +#define CSOR_GPCM_GPTO_SHIFT 24 +#define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT) +/* GPCM External Access Termination mode for read access */ +#define CSOR_GPCM_RGETA_EXT 0x00080000 +/* GPCM External Access Termination mode for write access */ +#define CSOR_GPCM_WGETA_EXT 0x00040000 +/* Address Data Multiplexing Shift */ +#define CSOR_GPCM_ADM_MASK 0x0003E000 +#define CSOR_GPCM_ADM_SHIFT_SHIFT 13 +#define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT) +/* Generic ASIC Parity error indication delay */ +#define CSOR_GPCM_GAPERRD_MASK 0x00000180 +#define CSOR_GPCM_GAPERRD_SHIFT 7 +#define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT) +/* Time for Read Enable High to Output High Impedance */ +#define CSOR_GPCM_TRHZ_MASK 0x0000001C +#define CSOR_GPCM_TRHZ_20 0x00000000 +#define CSOR_GPCM_TRHZ_40 0x00000004 +#define CSOR_GPCM_TRHZ_60 0x00000008 +#define CSOR_GPCM_TRHZ_80 0x0000000C +#define CSOR_GPCM_TRHZ_100 0x00000010 +/* Buffer control disable */ +#define CSOR_GPCM_BCTLD 0x00000001 + +/* + * Ready Busy Status Register (RB_STAT) + */ +/* CSn is READY */ +#define IFC_RB_STAT_READY_CS0 0x80000000 +#define IFC_RB_STAT_READY_CS1 0x40000000 +#define IFC_RB_STAT_READY_CS2 0x20000000 +#define IFC_RB_STAT_READY_CS3 0x10000000 + +/* + * General Control Register (GCR) + */ +#define IFC_GCR_MASK 0x8000F800 +/* reset all IFC hardware */ +#define IFC_GCR_SOFT_RST_ALL 0x80000000 +/* Turnaroud Time of external buffer */ +#define IFC_GCR_TBCTL_TRN_TIME 0x0000F800 +#define IFC_GCR_TBCTL_TRN_TIME_SHIFT 11 + +/* + * Common Event and Error Status Register (CM_EVTER_STAT) + */ +/* Chip select error */ +#define IFC_CM_EVTER_STAT_CSER 0x80000000 + +/* + * Common Event and Error Enable Register (CM_EVTER_EN) + */ +/* Chip select error checking enable */ +#define IFC_CM_EVTER_EN_CSEREN 0x80000000 + +/* + * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN) + */ +/* Chip select error interrupt enable */ +#define IFC_CM_EVTER_INTR_EN_CSERIREN 0x80000000 + +/* + * Common Transfer Error Attribute Register-0 (CM_ERATTR0) + */ +/* transaction type of error Read/Write */ +#define IFC_CM_ERATTR0_ERTYP_READ 0x80000000 +#define IFC_CM_ERATTR0_ERAID 0x0FF00000 +#define IFC_CM_ERATTR0_ERAID_SHIFT 20 +#define IFC_CM_ERATTR0_ESRCID 0x0000FF00 +#define IFC_CM_ERATTR0_ESRCID_SHIFT 8 + +/* + * Clock Control Register (CCR) + */ +#define IFC_CCR_MASK 0x0F0F8800 +/* Clock division ratio */ +#define IFC_CCR_CLK_DIV_MASK 0x0F000000 +#define IFC_CCR_CLK_DIV_SHIFT 24 +#define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT) +/* IFC Clock Delay */ +#define IFC_CCR_CLK_DLY_MASK 0x000F0000 +#define IFC_CCR_CLK_DLY_SHIFT 16 +#define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT) +/* Invert IFC clock before sending out */ +#define IFC_CCR_INV_CLK_EN 0x00008000 +/* Fedback IFC Clock */ +#define IFC_CCR_FB_IFC_CLK_SEL 0x00000800 + +/* + * Clock Status Register (CSR) + */ +/* Clk is stable */ +#define IFC_CSR_CLK_STAT_STABLE 0x80000000 + +/* + * IFC_NAND Machine Specific Registers + */ +/* + * NAND Configuration Register (NCFGR) + */ +/* Auto Boot Mode */ +#define IFC_NAND_NCFGR_BOOT 0x80000000 +/* Addressing Mode-ROW0+n/COL0 */ +#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 +/* Addressing Mode-ROW0+n/COL0+n */ +#define IFC_NAND_NCFGR_ADDR_MODE_RC1 0x00400000 +/* Number of loop iterations of FIR sequences for multi page operations */ +#define IFC_NAND_NCFGR_NUM_LOOP_MASK 0x0000F000 +#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT 12 +#define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT) +/* Number of wait cycles */ +#define IFC_NAND_NCFGR_NUM_WAIT_MASK 0x000000FF +#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT 0 + +/* + * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1) + */ +/* General purpose FCM flash command bytes CMD0-CMD7 */ +#define IFC_NAND_FCR0_CMD0 0xFF000000 +#define IFC_NAND_FCR0_CMD0_SHIFT 24 +#define IFC_NAND_FCR0_CMD1 0x00FF0000 +#define IFC_NAND_FCR0_CMD1_SHIFT 16 +#define IFC_NAND_FCR0_CMD2 0x0000FF00 +#define IFC_NAND_FCR0_CMD2_SHIFT 8 +#define IFC_NAND_FCR0_CMD3 0x000000FF +#define IFC_NAND_FCR0_CMD3_SHIFT 0 +#define IFC_NAND_FCR1_CMD4 0xFF000000 +#define IFC_NAND_FCR1_CMD4_SHIFT 24 +#define IFC_NAND_FCR1_CMD5 0x00FF0000 +#define IFC_NAND_FCR1_CMD5_SHIFT 16 +#define IFC_NAND_FCR1_CMD6 0x0000FF00 +#define IFC_NAND_FCR1_CMD6_SHIFT 8 +#define IFC_NAND_FCR1_CMD7 0x000000FF +#define IFC_NAND_FCR1_CMD7_SHIFT 0 + +/* + * Flash ROW and COL Address Register (ROWn, COLn) + */ +/* Main/spare region locator */ +#define IFC_NAND_COL_MS 0x80000000 +/* Column Address */ +#define IFC_NAND_COL_CA_MASK 0x00000FFF + +/* + * NAND Flash Byte Count Register (NAND_BC) + */ +/* Byte Count for read/Write */ +#define IFC_NAND_BC 0x000001FF + +/* + * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2) + */ +/* NAND Machine specific opcodes OP0-OP14*/ +#define IFC_NAND_FIR0_OP0 0xFC000000 +#define IFC_NAND_FIR0_OP0_SHIFT 26 +#define IFC_NAND_FIR0_OP1 0x03F00000 +#define IFC_NAND_FIR0_OP1_SHIFT 20 +#define IFC_NAND_FIR0_OP2 0x000FC000 +#define IFC_NAND_FIR0_OP2_SHIFT 14 +#define IFC_NAND_FIR0_OP3 0x00003F00 +#define IFC_NAND_FIR0_OP3_SHIFT 8 +#define IFC_NAND_FIR0_OP4 0x000000FC +#define IFC_NAND_FIR0_OP4_SHIFT 2 +#define IFC_NAND_FIR1_OP5 0xFC000000 +#define IFC_NAND_FIR1_OP5_SHIFT 26 +#define IFC_NAND_FIR1_OP6 0x03F00000 +#define IFC_NAND_FIR1_OP6_SHIFT 20 +#define IFC_NAND_FIR1_OP7 0x000FC000 +#define IFC_NAND_FIR1_OP7_SHIFT 14 +#define IFC_NAND_FIR1_OP8 0x00003F00 +#define IFC_NAND_FIR1_OP8_SHIFT 8 +#define IFC_NAND_FIR1_OP9 0x000000FC +#define IFC_NAND_FIR1_OP9_SHIFT 2 +#define IFC_NAND_FIR2_OP10 0xFC000000 +#define IFC_NAND_FIR2_OP10_SHIFT 26 +#define IFC_NAND_FIR2_OP11 0x03F00000 +#define IFC_NAND_FIR2_OP11_SHIFT 20 +#define IFC_NAND_FIR2_OP12 0x000FC000 +#define IFC_NAND_FIR2_OP12_SHIFT 14 +#define IFC_NAND_FIR2_OP13 0x00003F00 +#define IFC_NAND_FIR2_OP13_SHIFT 8 +#define IFC_NAND_FIR2_OP14 0x000000FC +#define IFC_NAND_FIR2_OP14_SHIFT 2 + +/* + * Instruction opcodes to be programmed + * in FIR registers- 6bits + */ +enum ifc_nand_fir_opcodes { + IFC_FIR_OP_NOP, + IFC_FIR_OP_CA0, + IFC_FIR_OP_CA1, + IFC_FIR_OP_CA2, + IFC_FIR_OP_CA3, + IFC_FIR_OP_RA0, + IFC_FIR_OP_RA1, + IFC_FIR_OP_RA2, + IFC_FIR_OP_RA3, + IFC_FIR_OP_CMD0, + IFC_FIR_OP_CMD1, + IFC_FIR_OP_CMD2, + IFC_FIR_OP_CMD3, + IFC_FIR_OP_CMD4, + IFC_FIR_OP_CMD5, + IFC_FIR_OP_CMD6, + IFC_FIR_OP_CMD7, + IFC_FIR_OP_CW0, + IFC_FIR_OP_CW1, + IFC_FIR_OP_CW2, + IFC_FIR_OP_CW3, + IFC_FIR_OP_CW4, + IFC_FIR_OP_CW5, + IFC_FIR_OP_CW6, + IFC_FIR_OP_CW7, + IFC_FIR_OP_WBCD, + IFC_FIR_OP_RBCD, + IFC_FIR_OP_BTRD, + IFC_FIR_OP_RDSTAT, + IFC_FIR_OP_NWAIT, + IFC_FIR_OP_WFR, + IFC_FIR_OP_SBRD, + IFC_FIR_OP_UA, + IFC_FIR_OP_RB, +}; + +/* + * NAND Chip Select Register (NAND_CSEL) + */ +#define IFC_NAND_CSEL 0x0C000000 +#define IFC_NAND_CSEL_SHIFT 26 +#define IFC_NAND_CSEL_CS0 0x00000000 +#define IFC_NAND_CSEL_CS1 0x04000000 +#define IFC_NAND_CSEL_CS2 0x08000000 +#define IFC_NAND_CSEL_CS3 0x0C000000 + +/* + * NAND Operation Sequence Start (NANDSEQ_STRT) + */ +/* NAND Flash Operation Start */ +#define IFC_NAND_SEQ_STRT_FIR_STRT 0x80000000 +/* Automatic Erase */ +#define IFC_NAND_SEQ_STRT_AUTO_ERS 0x00800000 +/* Automatic Program */ +#define IFC_NAND_SEQ_STRT_AUTO_PGM 0x00100000 +/* Automatic Copyback */ +#define IFC_NAND_SEQ_STRT_AUTO_CPB 0x00020000 +/* Automatic Read Operation */ +#define IFC_NAND_SEQ_STRT_AUTO_RD 0x00004000 +/* Automatic Status Read */ +#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD 0x00000800 + +/* + * NAND Event and Error Status Register (NAND_EVTER_STAT) + */ +/* Operation Complete */ +#define IFC_NAND_EVTER_STAT_OPC 0x80000000 +/* Flash Timeout Error */ +#define IFC_NAND_EVTER_STAT_FTOER 0x08000000 +/* Write Protect Error */ +#define IFC_NAND_EVTER_STAT_WPER 0x04000000 +/* ECC Error */ +#define IFC_NAND_EVTER_STAT_ECCER 0x02000000 +/* RCW Load Done */ +#define IFC_NAND_EVTER_STAT_RCW_DN 0x00008000 +/* Boot Loadr Done */ +#define IFC_NAND_EVTER_STAT_BOOT_DN 0x00004000 +/* Bad Block Indicator search select */ +#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE 0x00000800 + +/* + * NAND Flash Page Read Completion Event Status Register + * (PGRDCMPL_EVT_STAT) + */ +#define PGRDCMPL_EVT_STAT_MASK 0xFFFF0000 +/* Small Page 0-15 Done */ +#define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n))) +/* Large Page(2K) 0-3 Done */ +#define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4)) +/* Large Page(4K) 0-1 Done */ +#define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8)) + +/* + * NAND Event and Error Enable Register (NAND_EVTER_EN) + */ +/* Operation complete event enable */ +#define IFC_NAND_EVTER_EN_OPC_EN 0x80000000 +/* Page read complete event enable */ +#define IFC_NAND_EVTER_EN_PGRDCMPL_EN 0x20000000 +/* Flash Timeout error enable */ +#define IFC_NAND_EVTER_EN_FTOER_EN 0x08000000 +/* Write Protect error enable */ +#define IFC_NAND_EVTER_EN_WPER_EN 0x04000000 +/* ECC error logging enable */ +#define IFC_NAND_EVTER_EN_ECCER_EN 0x02000000 + +/* + * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN) + */ +/* Enable interrupt for operation complete */ +#define IFC_NAND_EVTER_INTR_OPCIR_EN 0x80000000 +/* Enable interrupt for Page read complete */ +#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN 0x20000000 +/* Enable interrupt for Flash timeout error */ +#define IFC_NAND_EVTER_INTR_FTOERIR_EN 0x08000000 +/* Enable interrupt for Write protect error */ +#define IFC_NAND_EVTER_INTR_WPERIR_EN 0x04000000 +/* Enable interrupt for ECC error*/ +#define IFC_NAND_EVTER_INTR_ECCERIR_EN 0x02000000 + +/* + * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0) + */ +#define IFC_NAND_ERATTR0_MASK 0x0C080000 +/* Error on CS0-3 for NAND */ +#define IFC_NAND_ERATTR0_ERCS_CS0 0x00000000 +#define IFC_NAND_ERATTR0_ERCS_CS1 0x04000000 +#define IFC_NAND_ERATTR0_ERCS_CS2 0x08000000 +#define IFC_NAND_ERATTR0_ERCS_CS3 0x0C000000 +/* Transaction type of error Read/Write */ +#define IFC_NAND_ERATTR0_ERTTYPE_READ 0x00080000 + +/* + * NAND Flash Status Register (NAND_FSR) + */ +/* First byte of data read from read status op */ +#define IFC_NAND_NFSR_RS0 0xFF000000 +/* Second byte of data read from read status op */ +#define IFC_NAND_NFSR_RS1 0x00FF0000 + +/* + * ECC Error Status Registers (ECCSTAT0-ECCSTAT3) + */ +/* Number of ECC errors on sector n (n = 0-15) */ +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT 24 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT 16 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT 8 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK 0x0000000F +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT 0 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT 24 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT 16 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT 8 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK 0x0000000F +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT 0 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT 24 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT 16 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT 8 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK 0x0000000F +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT 0 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT 24 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT 16 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT 8 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK 0x0000000F +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT 0 + +/* + * NAND Control Register (NANDCR) + */ +#define IFC_NAND_NCR_FTOCNT_MASK 0x1E000000 +#define IFC_NAND_NCR_FTOCNT_SHIFT 25 +#define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT) + +/* + * NAND_AUTOBOOT_TRGR + */ +/* Trigger RCW load */ +#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD 0x80000000 +/* Trigget Auto Boot */ +#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD 0x20000000 + +/* + * NAND_MDR + */ +/* 1st read data byte when opcode SBRD */ +#define IFC_NAND_MDR_RDATA0 0xFF000000 +/* 2nd read data byte when opcode SBRD */ +#define IFC_NAND_MDR_RDATA1 0x00FF0000 + +/* + * NOR Machine Specific Registers + */ +/* + * NOR Event and Error Status Register (NOR_EVTER_STAT) + */ +/* NOR Command Sequence Operation Complete */ +#define IFC_NOR_EVTER_STAT_OPC_NOR 0x80000000 +/* Write Protect Error */ +#define IFC_NOR_EVTER_STAT_WPER 0x04000000 +/* Command Sequence Timeout Error */ +#define IFC_NOR_EVTER_STAT_STOER 0x01000000 + +/* + * NOR Event and Error Enable Register (NOR_EVTER_EN) + */ +/* NOR Command Seq complete event enable */ +#define IFC_NOR_EVTER_EN_OPCEN_NOR 0x80000000 +/* Write Protect Error Checking Enable */ +#define IFC_NOR_EVTER_EN_WPEREN 0x04000000 +/* Timeout Error Enable */ +#define IFC_NOR_EVTER_EN_STOEREN 0x01000000 + +/* + * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN) + */ +/* Enable interrupt for OPC complete */ +#define IFC_NOR_EVTER_INTR_OPCEN_NOR 0x80000000 +/* Enable interrupt for write protect error */ +#define IFC_NOR_EVTER_INTR_WPEREN 0x04000000 +/* Enable interrupt for timeout error */ +#define IFC_NOR_EVTER_INTR_STOEREN 0x01000000 + +/* + * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0) + */ +/* Source ID for error transaction */ +#define IFC_NOR_ERATTR0_ERSRCID 0xFF000000 +/* AXI ID for error transation */ +#define IFC_NOR_ERATTR0_ERAID 0x000FF000 +/* Chip select corresponds to NOR error */ +#define IFC_NOR_ERATTR0_ERCS_CS0 0x00000000 +#define IFC_NOR_ERATTR0_ERCS_CS1 0x00000010 +#define IFC_NOR_ERATTR0_ERCS_CS2 0x00000020 +#define IFC_NOR_ERATTR0_ERCS_CS3 0x00000030 +/* Type of transaction read/write */ +#define IFC_NOR_ERATTR0_ERTYPE_READ 0x00000001 + +/* + * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2) + */ +#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP 0x000F0000 +#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER 0x00000F00 + +/* + * NOR Control Register (NORCR) + */ +#define IFC_NORCR_MASK 0x0F0F0000 +/* No. of Address/Data Phase */ +#define IFC_NORCR_NUM_PHASE_MASK 0x0F000000 +#define IFC_NORCR_NUM_PHASE_SHIFT 24 +#define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT) +/* Sequence Timeout Count */ +#define IFC_NORCR_STOCNT_MASK 0x000F0000 +#define IFC_NORCR_STOCNT_SHIFT 16 +#define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT) + +/* + * GPCM Machine specific registers + */ +/* + * GPCM Event and Error Status Register (GPCM_EVTER_STAT) + */ +/* Timeout error */ +#define IFC_GPCM_EVTER_STAT_TOER 0x04000000 +/* Parity error */ +#define IFC_GPCM_EVTER_STAT_PER 0x01000000 + +/* + * GPCM Event and Error Enable Register (GPCM_EVTER_EN) + */ +/* Timeout error enable */ +#define IFC_GPCM_EVTER_EN_TOER_EN 0x04000000 +/* Parity error enable */ +#define IFC_GPCM_EVTER_EN_PER_EN 0x01000000 + +/* + * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN) + */ +/* Enable Interrupt for timeout error */ +#define IFC_GPCM_EEIER_TOERIR_EN 0x04000000 +/* Enable Interrupt for Parity error */ +#define IFC_GPCM_EEIER_PERIR_EN 0x01000000 + +/* + * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0) + */ +/* Source ID for error transaction */ +#define IFC_GPCM_ERATTR0_ERSRCID 0xFF000000 +/* AXI ID for error transaction */ +#define IFC_GPCM_ERATTR0_ERAID 0x000FF000 +/* Chip select corresponds to GPCM error */ +#define IFC_GPCM_ERATTR0_ERCS_CS0 0x00000000 +#define IFC_GPCM_ERATTR0_ERCS_CS1 0x00000040 +#define IFC_GPCM_ERATTR0_ERCS_CS2 0x00000080 +#define IFC_GPCM_ERATTR0_ERCS_CS3 0x000000C0 +/* Type of transaction read/Write */ +#define IFC_GPCM_ERATTR0_ERTYPE_READ 0x00000001 + +/* + * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2) + */ +/* On which beat of address/data parity error is observed */ +#define IFC_GPCM_ERATTR2_PERR_BEAT 0x00000C00 +/* Parity Error on byte */ +#define IFC_GPCM_ERATTR2_PERR_BYTE 0x000000F0 +/* Parity Error reported in addr or data phase */ +#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE 0x00000001 + +/* + * GPCM Status Register (GPCM_STAT) + */ +#define IFC_GPCM_STAT_BSY 0x80000000 /* GPCM is busy */ + +/* + * IFC Controller NAND Machine registers + */ +struct fsl_ifc_nand { + __be32 ncfgr; + u32 res1[0x4]; + __be32 nand_fcr0; + __be32 nand_fcr1; + u32 res2[0x8]; + __be32 row0; + u32 res3; + __be32 col0; + u32 res4; + __be32 row1; + u32 res5; + __be32 col1; + u32 res6; + __be32 row2; + u32 res7; + __be32 col2; + u32 res8; + __be32 row3; + u32 res9; + __be32 col3; + u32 res10[0x24]; + __be32 nand_fbcr; + u32 res11; + __be32 nand_fir0; + __be32 nand_fir1; + __be32 nand_fir2; + u32 res12[0x10]; + __be32 nand_csel; + u32 res13; + __be32 nandseq_strt; + u32 res14; + __be32 nand_evter_stat; + u32 res15; + __be32 pgrdcmpl_evt_stat; + u32 res16[0x2]; + __be32 nand_evter_en; + u32 res17[0x2]; + __be32 nand_evter_intr_en; + u32 res18[0x2]; + __be32 nand_erattr0; + __be32 nand_erattr1; + u32 res19[0x10]; + __be32 nand_fsr; + u32 res20; + __be32 nand_eccstat[4]; + u32 res21[0x20]; + __be32 nanndcr; + u32 res22[0x2]; + __be32 nand_autoboot_trgr; + u32 res23; + __be32 nand_mdr; + u32 res24[0x5C]; +}; + +/* + * IFC controller NOR Machine registers + */ +struct fsl_ifc_nor { + __be32 nor_evter_stat; + u32 res1[0x2]; + __be32 nor_evter_en; + u32 res2[0x2]; + __be32 nor_evter_intr_en; + u32 res3[0x2]; + __be32 nor_erattr0; + __be32 nor_erattr1; + __be32 nor_erattr2; + u32 res4[0x4]; + __be32 norcr; + u32 res5[0xEF]; +}; + +/* + * IFC controller GPCM Machine registers + */ +struct fsl_ifc_gpcm { + __be32 gpcm_evter_stat; + u32 res1[0x2]; + __be32 gpcm_evter_en; + u32 res2[0x2]; + __be32 gpcm_evter_intr_en; + u32 res3[0x2]; + __be32 gpcm_erattr0; + __be32 gpcm_erattr1; + __be32 gpcm_erattr2; + __be32 gpcm_stat; + u32 res4[0x1F3]; +}; + +/* + * IFC Controller Registers + */ +struct fsl_ifc_regs { + __be32 ifc_rev; + u32 res1[0x3]; + struct { + __be32 cspr; + u32 res2[0x2]; + } cspr_cs[FSL_IFC_BANK_COUNT]; + u32 res3[0x18]; + struct { + __be32 amask; + u32 res4[0x2]; + } amask_cs[FSL_IFC_BANK_COUNT]; + u32 res5[0x18]; + struct { + __be32 csor; + u32 res6[0x2]; + } csor_cs[FSL_IFC_BANK_COUNT]; + u32 res7[0x18]; + struct { + __be32 ftim[4]; + u32 res8[0x8]; + } ftim_cs[FSL_IFC_BANK_COUNT]; + u32 res9[0x60]; + __be32 rb_stat; + u32 res10[0x2]; + __be32 ifc_gcr; + u32 res11[0x2]; + __be32 cm_evter_stat; + u32 res12[0x2]; + __be32 cm_evter_en; + u32 res13[0x2]; + __be32 cm_evter_intr_en; + u32 res14[0x2]; + __be32 cm_erattr0; + __be32 cm_erattr1; + u32 res15[0x2]; + __be32 ifc_ccr; + __be32 ifc_csr; + u32 res16[0x2EB]; + struct fsl_ifc_nand ifc_nand; + struct fsl_ifc_nor ifc_nor; + struct fsl_ifc_gpcm ifc_gpcm; +}; + +extern unsigned int convert_ifc_address(phys_addr_t addr_base); +extern int fsl_ifc_find(phys_addr_t addr_base); + +/* overview of the fsl ifc controller */ + +struct fsl_ifc_ctrl { + /* device info */ + struct device *dev; + struct fsl_ifc_regs __iomem *regs; + int irq; + int nand_irq; + spinlock_t lock; + void *nand; + + u32 nand_stat; + wait_queue_head_t nand_wait; +}; + +extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + + +#endif /* __ASM_FSL_IFC_H */ diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h new file mode 100644 index 00000000000..420b45368fc --- /dev/null +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -0,0 +1,306 @@ +/* Freescale Local Bus Controller + * + * Copyright © 2006-2007, 2010 Freescale Semiconductor + * + * Authors: Nick Spence <nick.spence@freescale.com>, + * Scott Wood <scottwood@freescale.com> + * Jack Lan <jack.lan@freescale.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_FSL_LBC_H +#define __ASM_FSL_LBC_H + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/spinlock.h> + +struct fsl_lbc_bank { + __be32 br; /**< Base Register */ +#define BR_BA 0xFFFF8000 +#define BR_BA_SHIFT 15 +#define BR_PS 0x00001800 +#define BR_PS_SHIFT 11 +#define BR_PS_8 0x00000800 /* Port Size 8 bit */ +#define BR_PS_16 0x00001000 /* Port Size 16 bit */ +#define BR_PS_32 0x00001800 /* Port Size 32 bit */ +#define BR_DECC 0x00000600 +#define BR_DECC_SHIFT 9 +#define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */ +#define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */ +#define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */ +#define BR_WP 0x00000100 +#define BR_WP_SHIFT 8 +#define BR_MSEL 0x000000E0 +#define BR_MSEL_SHIFT 5 +#define BR_MS_GPCM 0x00000000 /* GPCM */ +#define BR_MS_FCM 0x00000020 /* FCM */ +#define BR_MS_SDRAM 0x00000060 /* SDRAM */ +#define BR_MS_UPMA 0x00000080 /* UPMA */ +#define BR_MS_UPMB 0x000000A0 /* UPMB */ +#define BR_MS_UPMC 0x000000C0 /* UPMC */ +#define BR_V 0x00000001 +#define BR_V_SHIFT 0 +#define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V) + + __be32 or; /**< Base Register */ +#define OR0 0x5004 +#define OR1 0x500C +#define OR2 0x5014 +#define OR3 0x501C +#define OR4 0x5024 +#define OR5 0x502C +#define OR6 0x5034 +#define OR7 0x503C + +#define OR_FCM_AM 0xFFFF8000 +#define OR_FCM_AM_SHIFT 15 +#define OR_FCM_BCTLD 0x00001000 +#define OR_FCM_BCTLD_SHIFT 12 +#define OR_FCM_PGS 0x00000400 +#define OR_FCM_PGS_SHIFT 10 +#define OR_FCM_CSCT 0x00000200 +#define OR_FCM_CSCT_SHIFT 9 +#define OR_FCM_CST 0x00000100 +#define OR_FCM_CST_SHIFT 8 +#define OR_FCM_CHT 0x00000080 +#define OR_FCM_CHT_SHIFT 7 +#define OR_FCM_SCY 0x00000070 +#define OR_FCM_SCY_SHIFT 4 +#define OR_FCM_SCY_1 0x00000010 +#define OR_FCM_SCY_2 0x00000020 +#define OR_FCM_SCY_3 0x00000030 +#define OR_FCM_SCY_4 0x00000040 +#define OR_FCM_SCY_5 0x00000050 +#define OR_FCM_SCY_6 0x00000060 +#define OR_FCM_SCY_7 0x00000070 +#define OR_FCM_RST 0x00000008 +#define OR_FCM_RST_SHIFT 3 +#define OR_FCM_TRLX 0x00000004 +#define OR_FCM_TRLX_SHIFT 2 +#define OR_FCM_EHTR 0x00000002 +#define OR_FCM_EHTR_SHIFT 1 +}; + +struct fsl_lbc_regs { + struct fsl_lbc_bank bank[12]; + u8 res0[0x8]; + __be32 mar; /**< UPM Address Register */ + u8 res1[0x4]; + __be32 mamr; /**< UPMA Mode Register */ +#define MxMR_OP_NO (0 << 28) /**< normal operation */ +#define MxMR_OP_WA (1 << 28) /**< write array */ +#define MxMR_OP_RA (2 << 28) /**< read array */ +#define MxMR_OP_RP (3 << 28) /**< run pattern */ +#define MxMR_MAD 0x3f /**< machine address */ + __be32 mbmr; /**< UPMB Mode Register */ + __be32 mcmr; /**< UPMC Mode Register */ + u8 res2[0x8]; + __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */ + __be32 mdr; /**< UPM Data Register */ + u8 res3[0x4]; + __be32 lsor; /**< Special Operation Initiation Register */ + __be32 lsdmr; /**< SDRAM Mode Register */ + u8 res4[0x8]; + __be32 lurt; /**< UPM Refresh Timer */ + __be32 lsrt; /**< SDRAM Refresh Timer */ + u8 res5[0x8]; + __be32 ltesr; /**< Transfer Error Status Register */ +#define LTESR_BM 0x80000000 +#define LTESR_FCT 0x40000000 +#define LTESR_PAR 0x20000000 +#define LTESR_WP 0x04000000 +#define LTESR_ATMW 0x00800000 +#define LTESR_ATMR 0x00400000 +#define LTESR_CS 0x00080000 +#define LTESR_UPM 0x00000002 +#define LTESR_CC 0x00000001 +#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) +#define LTESR_MASK (LTESR_BM | LTESR_FCT | LTESR_PAR | LTESR_WP \ + | LTESR_ATMW | LTESR_ATMR | LTESR_CS | LTESR_UPM \ + | LTESR_CC) +#define LTESR_CLEAR 0xFFFFFFFF +#define LTECCR_CLEAR 0xFFFFFFFF +#define LTESR_STATUS LTESR_MASK +#define LTEIR_ENABLE LTESR_MASK +#define LTEDR_ENABLE 0x00000000 + __be32 ltedr; /**< Transfer Error Disable Register */ + __be32 lteir; /**< Transfer Error Interrupt Register */ + __be32 lteatr; /**< Transfer Error Attributes Register */ + __be32 ltear; /**< Transfer Error Address Register */ + __be32 lteccr; /**< Transfer Error ECC Register */ + u8 res6[0x8]; + __be32 lbcr; /**< Configuration Register */ +#define LBCR_LDIS 0x80000000 +#define LBCR_LDIS_SHIFT 31 +#define LBCR_BCTLC 0x00C00000 +#define LBCR_BCTLC_SHIFT 22 +#define LBCR_AHD 0x00200000 +#define LBCR_LPBSE 0x00020000 +#define LBCR_LPBSE_SHIFT 17 +#define LBCR_EPAR 0x00010000 +#define LBCR_EPAR_SHIFT 16 +#define LBCR_BMT 0x0000FF00 +#define LBCR_BMT_SHIFT 8 +#define LBCR_BMTPS 0x0000000F +#define LBCR_BMTPS_SHIFT 0 +#define LBCR_INIT 0x00040000 + __be32 lcrr; /**< Clock Ratio Register */ +#define LCRR_DBYP 0x80000000 +#define LCRR_DBYP_SHIFT 31 +#define LCRR_BUFCMDC 0x30000000 +#define LCRR_BUFCMDC_SHIFT 28 +#define LCRR_ECL 0x03000000 +#define LCRR_ECL_SHIFT 24 +#define LCRR_EADC 0x00030000 +#define LCRR_EADC_SHIFT 16 +#define LCRR_CLKDIV 0x0000000F +#define LCRR_CLKDIV_SHIFT 0 + u8 res7[0x8]; + __be32 fmr; /**< Flash Mode Register */ +#define FMR_CWTO 0x0000F000 +#define FMR_CWTO_SHIFT 12 +#define FMR_BOOT 0x00000800 +#define FMR_ECCM 0x00000100 +#define FMR_AL 0x00000030 +#define FMR_AL_SHIFT 4 +#define FMR_OP 0x00000003 +#define FMR_OP_SHIFT 0 + __be32 fir; /**< Flash Instruction Register */ +#define FIR_OP0 0xF0000000 +#define FIR_OP0_SHIFT 28 +#define FIR_OP1 0x0F000000 +#define FIR_OP1_SHIFT 24 +#define FIR_OP2 0x00F00000 +#define FIR_OP2_SHIFT 20 +#define FIR_OP3 0x000F0000 +#define FIR_OP3_SHIFT 16 +#define FIR_OP4 0x0000F000 +#define FIR_OP4_SHIFT 12 +#define FIR_OP5 0x00000F00 +#define FIR_OP5_SHIFT 8 +#define FIR_OP6 0x000000F0 +#define FIR_OP6_SHIFT 4 +#define FIR_OP7 0x0000000F +#define FIR_OP7_SHIFT 0 +#define FIR_OP_NOP 0x0 /* No operation and end of sequence */ +#define FIR_OP_CA 0x1 /* Issue current column address */ +#define FIR_OP_PA 0x2 /* Issue current block+page address */ +#define FIR_OP_UA 0x3 /* Issue user defined address */ +#define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */ +#define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */ +#define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */ +#define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */ +#define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */ +#define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */ +#define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */ +#define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */ +#define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */ +#define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */ +#define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */ +#define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */ + __be32 fcr; /**< Flash Command Register */ +#define FCR_CMD0 0xFF000000 +#define FCR_CMD0_SHIFT 24 +#define FCR_CMD1 0x00FF0000 +#define FCR_CMD1_SHIFT 16 +#define FCR_CMD2 0x0000FF00 +#define FCR_CMD2_SHIFT 8 +#define FCR_CMD3 0x000000FF +#define FCR_CMD3_SHIFT 0 + __be32 fbar; /**< Flash Block Address Register */ +#define FBAR_BLK 0x00FFFFFF + __be32 fpar; /**< Flash Page Address Register */ +#define FPAR_SP_PI 0x00007C00 +#define FPAR_SP_PI_SHIFT 10 +#define FPAR_SP_MS 0x00000200 +#define FPAR_SP_CI 0x000001FF +#define FPAR_SP_CI_SHIFT 0 +#define FPAR_LP_PI 0x0003F000 +#define FPAR_LP_PI_SHIFT 12 +#define FPAR_LP_MS 0x00000800 +#define FPAR_LP_CI 0x000007FF +#define FPAR_LP_CI_SHIFT 0 + __be32 fbcr; /**< Flash Byte Count Register */ +#define FBCR_BC 0x00000FFF +}; + +/* + * FSL UPM routines + */ +struct fsl_upm { + __be32 __iomem *mxmr; + int width; +}; + +extern u32 fsl_lbc_addr(phys_addr_t addr_base); +extern int fsl_lbc_find(phys_addr_t addr_base); +extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm); + +/** + * fsl_upm_start_pattern - start UPM patterns execution + * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find + * @pat_offset: UPM pattern offset for the command to be executed + * + * This routine programmes UPM so the next memory access that hits an UPM + * will trigger pattern execution, starting at pat_offset. + */ +static inline void fsl_upm_start_pattern(struct fsl_upm *upm, u8 pat_offset) +{ + clrsetbits_be32(upm->mxmr, MxMR_MAD, MxMR_OP_RP | pat_offset); +} + +/** + * fsl_upm_end_pattern - end UPM patterns execution + * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find + * + * This routine reverts UPM to normal operation mode. + */ +static inline void fsl_upm_end_pattern(struct fsl_upm *upm) +{ + clrbits32(upm->mxmr, MxMR_OP_RP); + + while (in_be32(upm->mxmr) & MxMR_OP_RP) + cpu_relax(); +} + +/* overview of the fsl lbc controller */ + +struct fsl_lbc_ctrl { + /* device info */ + struct device *dev; + struct fsl_lbc_regs __iomem *regs; + int irq; + wait_queue_head_t irq_wait; + spinlock_t lock; + void *nand; + + /* status read from LTESR by irq handler */ + unsigned int irq_status; + +#ifdef CONFIG_SUSPEND + /* save regs when system go to deep-sleep */ + struct fsl_lbc_regs *saved_regs; +#endif +}; + +extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, + u32 mar); +extern struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; + +#endif /* __ASM_FSL_LBC_H */ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h new file mode 100644 index 00000000000..169d039ed40 --- /dev/null +++ b/arch/powerpc/include/asm/ftrace.h @@ -0,0 +1,77 @@ +#ifndef _ASM_POWERPC_FTRACE +#define _ASM_POWERPC_FTRACE + +#ifdef CONFIG_FUNCTION_TRACER +#define MCOUNT_ADDR ((long)(_mcount)) +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + +#ifdef __ASSEMBLY__ + +/* Based off of objdump optput from glibc */ + +#define MCOUNT_SAVE_FRAME \ + stwu r1,-48(r1); \ + stw r3, 12(r1); \ + stw r4, 16(r1); \ + stw r5, 20(r1); \ + stw r6, 24(r1); \ + mflr r3; \ + lwz r4, 52(r1); \ + mfcr r5; \ + stw r7, 28(r1); \ + stw r8, 32(r1); \ + stw r9, 36(r1); \ + stw r10,40(r1); \ + stw r3, 44(r1); \ + stw r5, 8(r1) + +#define MCOUNT_RESTORE_FRAME \ + lwz r6, 8(r1); \ + lwz r0, 44(r1); \ + lwz r3, 12(r1); \ + mtctr r0; \ + lwz r4, 16(r1); \ + mtcr r6; \ + lwz r5, 20(r1); \ + lwz r6, 24(r1); \ + lwz r0, 52(r1); \ + lwz r7, 28(r1); \ + lwz r8, 32(r1); \ + mtlr r0; \ + lwz r9, 36(r1); \ + lwz r10,40(r1); \ + addi r1, r1, 48 + +#else /* !__ASSEMBLY__ */ +extern void _mcount(void); + +#ifdef CONFIG_DYNAMIC_FTRACE +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* reloction of mcount call site is the same as the address */ + return addr; +} + +struct dyn_arch_ftrace { + struct module *mod; +}; +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* __ASSEMBLY__ */ + +#endif + +#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) +{ + /* + * Compare the symbol name with the system call name. Skip the .sys or .SyS + * prefix from the symbol name and the sys prefix from the system call name and + * just match the rest. This is only needed on ppc64 since symbol names on + * 32bit do not start with a period so the generic function will work. + */ + return !strcmp(sym + 4, name + 3); +} +#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_FTRACE */ diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h new file mode 100644 index 00000000000..2a9cf845473 --- /dev/null +++ b/arch/powerpc/include/asm/futex.h @@ -0,0 +1,121 @@ +#ifndef _ASM_POWERPC_FUTEX_H +#define _ASM_POWERPC_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> +#include <asm/synch.h> +#include <asm/asm-compat.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile ( \ + PPC_ATOMIC_ENTRY_BARRIER \ +"1: lwarx %0,0,%2\n" \ + insn \ + PPC405_ERR77(0, %2) \ +"2: stwcx. %1,0,%2\n" \ + "bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ + "li %1,0\n" \ +"3: .section .fixup,\"ax\"\n" \ +"4: li %1,%3\n" \ + "b 3b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".align 3\n" \ + PPC_LONG "1b,4b,2b,4b\n" \ + ".previous" \ + : "=&r" (oldval), "=&r" (ret) \ + : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ + : "cr0", "memory") + +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%1,%4\n\ + bne- 3f\n" + PPC405_ERR77(0,%3) +"2: stwcx. %5,0,%3\n\ + bne- 1b\n" + PPC_ATOMIC_EXIT_BARRIER +"3: .section .fixup,\"ax\"\n\ +4: li %0,%6\n\ + b 3b\n\ + .previous\n\ + .section __ex_table,\"a\"\n\ + .align 3\n\ + " PPC_LONG "1b,4b,2b,4b\n\ + .previous" \ + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) + : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) + : "cc", "memory"); + + *uval = prev; + return ret; +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_FUTEX_H */ diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h new file mode 100644 index 00000000000..38762edb5e5 --- /dev/null +++ b/arch/powerpc/include/asm/gpio.h @@ -0,0 +1,53 @@ +/* + * Generic GPIO API implementation for PowerPC. + * + * Copyright (c) 2007-2008 MontaVista Software, Inc. + * + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __ASM_POWERPC_GPIO_H +#define __ASM_POWERPC_GPIO_H + +#include <linux/errno.h> +#include <asm-generic/gpio.h> + +#ifdef CONFIG_GPIOLIB + +/* + * We don't (yet) implement inlined/rapid versions for on-chip gpios. + * Just call gpiolib. + */ +static inline int gpio_get_value(unsigned int gpio) +{ + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned int gpio, int value) +{ + __gpio_set_value(gpio, value); +} + +static inline int gpio_cansleep(unsigned int gpio) +{ + return __gpio_cansleep(gpio); +} + +static inline int gpio_to_irq(unsigned int gpio) +{ + return __gpio_to_irq(gpio); +} + +static inline int irq_to_gpio(unsigned int irq) +{ + return -EINVAL; +} + +#endif /* CONFIG_GPIOLIB */ + +#endif /* __ASM_POWERPC_GPIO_H */ diff --git a/arch/powerpc/include/asm/grackle.h b/arch/powerpc/include/asm/grackle.h new file mode 100644 index 00000000000..bd7812a519d --- /dev/null +++ b/arch/powerpc/include/asm/grackle.h @@ -0,0 +1,12 @@ +#ifndef _ASM_POWERPC_GRACKLE_H +#define _ASM_POWERPC_GRACKLE_H +#ifdef __KERNEL__ +/* + * Functions for setting up and using a MPC106 northbridge + */ + +#include <asm/pci-bridge.h> + +extern void setup_grackle(struct pci_controller *hose); +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_GRACKLE_H */ diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h new file mode 100644 index 00000000000..3147a297012 --- /dev/null +++ b/arch/powerpc/include/asm/hardirq.h @@ -0,0 +1,29 @@ +#ifndef _ASM_POWERPC_HARDIRQ_H +#define _ASM_POWERPC_HARDIRQ_H + +#include <linux/threads.h> +#include <linux/irq.h> + +typedef struct { + unsigned int __softirq_pending; + unsigned int timer_irqs; + unsigned int pmu_irqs; + unsigned int mce_exceptions; + unsigned int spurious_irqs; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define __ARCH_IRQ_STAT + +#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending + +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); +} + +extern u64 arch_irq_stat_cpu(unsigned int cpu); +#define arch_irq_stat_cpu arch_irq_stat_cpu + +#endif /* _ASM_POWERPC_HARDIRQ_H */ diff --git a/arch/powerpc/include/asm/heathrow.h b/arch/powerpc/include/asm/heathrow.h new file mode 100644 index 00000000000..93f54958a9d --- /dev/null +++ b/arch/powerpc/include/asm/heathrow.h @@ -0,0 +1,67 @@ +#ifndef _ASM_POWERPC_HEATHROW_H +#define _ASM_POWERPC_HEATHROW_H +#ifdef __KERNEL__ +/* + * heathrow.h: definitions for using the "Heathrow" I/O controller chip. + * + * Grabbed from Open Firmware definitions on a PowerBook G3 Series + * + * Copyright (C) 1997 Paul Mackerras. + */ + +/* Front light color on Yikes/B&W G3. 32 bits */ +#define HEATHROW_FRONT_LIGHT 0x32 /* (set to 0 or 0xffffffff) */ + +/* Brightness/contrast (gossamer iMac ?). 8 bits */ +#define HEATHROW_BRIGHTNESS_CNTL 0x32 +#define HEATHROW_CONTRAST_CNTL 0x33 + +/* offset from ohare base for feature control register */ +#define HEATHROW_MBCR 0x34 /* Media bay control */ +#define HEATHROW_FCR 0x38 /* Feature control */ +#define HEATHROW_AUX_CNTL_REG 0x3c /* Aux control */ + +/* + * Bits in feature control register. + * Bits postfixed with a _N are in inverse logic + */ +#define HRW_SCC_TRANS_EN_N 0x00000001 /* Also controls modem power */ +#define HRW_BAY_POWER_N 0x00000002 +#define HRW_BAY_PCI_ENABLE 0x00000004 +#define HRW_BAY_IDE_ENABLE 0x00000008 +#define HRW_BAY_FLOPPY_ENABLE 0x00000010 +#define HRW_IDE0_ENABLE 0x00000020 +#define HRW_IDE0_RESET_N 0x00000040 +#define HRW_BAY_DEV_MASK 0x0000001c +#define HRW_BAY_RESET_N 0x00000080 +#define HRW_IOBUS_ENABLE 0x00000100 /* Internal IDE ? */ +#define HRW_SCC_ENABLE 0x00000200 +#define HRW_MESH_ENABLE 0x00000400 +#define HRW_SWIM_ENABLE 0x00000800 +#define HRW_SOUND_POWER_N 0x00001000 +#define HRW_SOUND_CLK_ENABLE 0x00002000 +#define HRW_SCCA_IO 0x00004000 +#define HRW_SCCB_IO 0x00008000 +#define HRW_PORT_OR_DESK_VIA_N 0x00010000 /* This one is 0 on PowerBook */ +#define HRW_PWM_MON_ID_N 0x00020000 /* ??? (0) */ +#define HRW_HOOK_MB_CNT_N 0x00040000 /* ??? (0) */ +#define HRW_SWIM_CLONE_FLOPPY 0x00080000 /* ??? (0) */ +#define HRW_AUD_RUN22 0x00100000 /* ??? (1) */ +#define HRW_SCSI_LINK_MODE 0x00200000 /* Read ??? (1) */ +#define HRW_ARB_BYPASS 0x00400000 /* Disable internal PCI arbitrer */ +#define HRW_IDE1_RESET_N 0x00800000 /* Media bay */ +#define HRW_SLOW_SCC_PCLK 0x01000000 /* ??? (0) */ +#define HRW_RESET_SCC 0x02000000 +#define HRW_MFDC_CELL_ENABLE 0x04000000 /* ??? (0) */ +#define HRW_USE_MFDC 0x08000000 /* ??? (0) */ +#define HRW_BMAC_IO_ENABLE 0x60000000 /* two bits, not documented in OF */ +#define HRW_BMAC_RESET 0x80000000 /* not documented in OF */ + +/* We OR those features at boot on desktop G3s */ +#define HRW_DEFAULTS (HRW_SCCA_IO | HRW_SCCB_IO | HRW_SCC_ENABLE) + +/* Looks like Heathrow has some sort of GPIOs as well... */ +#define HRW_GPIO_MODEM_RESET 0x6d + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_HEATHROW_H */ diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h new file mode 100644 index 00000000000..dbc264010d0 --- /dev/null +++ b/arch/powerpc/include/asm/highmem.h @@ -0,0 +1,105 @@ +/* + * highmem.h: virtual kernel memory mappings for high memory + * + * PowerPC version, stolen from the i386 version. + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terrabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + */ + +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#ifdef __KERNEL__ + +#include <linux/interrupt.h> +#include <asm/kmap_types.h> +#include <asm/tlbflush.h> +#include <asm/page.h> +#include <asm/fixmap.h> + +extern pte_t *kmap_pte; +extern pgprot_t kmap_prot; +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +/* + * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte + * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP + * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP + * in case of 16K/64K/256K page sizes. + */ +#ifdef CONFIG_PPC_4K_PAGES +#define PKMAP_ORDER PTE_SHIFT +#else +#define PKMAP_ORDER 9 +#endif +#define LAST_PKMAP (1 << PKMAP_ORDER) +#ifndef CONFIG_PPC_4K_PAGES +#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) +#else +#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) +#endif +#define LAST_PKMAP_MASK (LAST_PKMAP-1) +#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); +extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void __kunmap_atomic(void *kvaddr); + +static inline void *kmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return page_address(page); + return kmap_high(page); +} + +static inline void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +static inline void *__kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} + +static inline struct page *kmap_atomic_to_page(void *ptr) +{ + unsigned long idx, vaddr = (unsigned long) ptr; + pte_t *pte; + + if (vaddr < FIXADDR_START) + return virt_to_page(ptr); + + idx = virt_to_fix(vaddr); + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); + return pte_page(*pte); +} + + +#define flush_cache_kmaps() flush_cache_all() + +#endif /* __KERNEL__ */ + +#endif /* _ASM_HIGHMEM_H */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h new file mode 100644 index 00000000000..dfdb95bc59a --- /dev/null +++ b/arch/powerpc/include/asm/hugetlb.h @@ -0,0 +1,175 @@ +#ifndef _ASM_POWERPC_HUGETLB_H +#define _ASM_POWERPC_HUGETLB_H + +#ifdef CONFIG_HUGETLB_PAGE +#include <asm/page.h> + +extern struct kmem_cache *hugepte_cache; + +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + BUG_ON(!hugepd_ok(hpd)); + return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return hpd.pd & HUGEPD_SHIFT_MASK; +} + +static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, + unsigned pdshift) +{ + /* + * On FSL BookE, we have multiple higher-level table entries that + * point to the same hugepte. Just use the first one since they're all + * identical. So for that case, idx=0. + */ + unsigned long idx = 0; + + pte_t *dir = hugepd_page(*hpdp); +#ifndef CONFIG_PPC_FSL_BOOK3E + idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); +#endif + + return dir + idx; +} + +pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, + unsigned long addr, unsigned *shift); + +void flush_dcache_icache_hugepage(struct page *page); + +#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) +int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, + unsigned long len); +#else +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return 0; +} +#endif + +void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, + pte_t pte); +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); + +void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling); + +/* + * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs + * to override the version in mm/hugetlb.c + */ +#define vma_mmu_pagesize vma_mmu_pagesize + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ +#ifdef CONFIG_PPC64 + return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); +#else + return __pte(pte_update(ptep, ~0UL, 0)); +#endif +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t pte; + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + flush_tlb_page(vma, addr); +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ +#ifdef HUGETLB_NEED_PRELOAD + /* + * The "return 1" forces a call of update_mmu_cache, which will write a + * TLB entry. Without this, platforms that don't do a write of the TLB + * entry in the TLB miss handler asm will fault ad infinitum. + */ + ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return 1; +#else + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +#endif +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#else /* ! CONFIG_HUGETLB_PAGE */ +static inline void flush_hugetlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} +#endif /* CONFIG_HUGETLB_PAGE */ + + +/* + * FSL Book3E platforms require special gpage handling - the gpages + * are reserved early in the boot process by memblock instead of via + * the .dts as on IBM platforms. + */ +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) +extern void __init reserve_hugetlb_gpages(void); +#else +static inline void reserve_hugetlb_gpages(void) +{ +} +#endif + +#endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h new file mode 100644 index 00000000000..1c324ff55ea --- /dev/null +++ b/arch/powerpc/include/asm/hvcall.h @@ -0,0 +1,355 @@ +#ifndef _ASM_POWERPC_HVCALL_H +#define _ASM_POWERPC_HVCALL_H +#ifdef __KERNEL__ + +#define HVSC .long 0x44000022 + +#define H_SUCCESS 0 +#define H_BUSY 1 /* Hardware busy -- retry later */ +#define H_CLOSED 2 /* Resource closed */ +#define H_NOT_AVAILABLE 3 +#define H_CONSTRAINED 4 /* Resource request constrained to max allowed */ +#define H_PARTIAL 5 +#define H_IN_PROGRESS 14 /* Kind of like busy */ +#define H_PAGE_REGISTERED 15 +#define H_PARTIAL_STORE 16 +#define H_PENDING 17 /* returned from H_POLL_PENDING */ +#define H_CONTINUE 18 /* Returned from H_Join on success */ +#define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */ +#define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \ + is a good time to retry */ +#define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \ + is a good time to retry */ +#define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \ + is a good time to retry */ +#define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \ + is a good time to retry */ +#define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \ + is a good time to retry */ +#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ + is a good time to retry */ +#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ + +/* Internal value used in book3s_hv kvm support; not returned to guests */ +#define H_TOO_HARD 9999 + +#define H_HARDWARE -1 /* Hardware error */ +#define H_FUNCTION -2 /* Function not supported */ +#define H_PRIVILEGE -3 /* Caller not privileged */ +#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */ +#define H_BAD_MODE -5 /* Illegal msr value */ +#define H_PTEG_FULL -6 /* PTEG is full */ +#define H_NOT_FOUND -7 /* PTE was not found" */ +#define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */ +#define H_NO_MEM -9 +#define H_AUTHORITY -10 +#define H_PERMISSION -11 +#define H_DROPPED -12 +#define H_SOURCE_PARM -13 +#define H_DEST_PARM -14 +#define H_REMOTE_PARM -15 +#define H_RESOURCE -16 +#define H_ADAPTER_PARM -17 +#define H_RH_PARM -18 +#define H_RCQ_PARM -19 +#define H_SCQ_PARM -20 +#define H_EQ_PARM -21 +#define H_RT_PARM -22 +#define H_ST_PARM -23 +#define H_SIGT_PARM -24 +#define H_TOKEN_PARM -25 +#define H_MLENGTH_PARM -27 +#define H_MEM_PARM -28 +#define H_MEM_ACCESS_PARM -29 +#define H_ATTR_PARM -30 +#define H_PORT_PARM -31 +#define H_MCG_PARM -32 +#define H_VL_PARM -33 +#define H_TSIZE_PARM -34 +#define H_TRACE_PARM -35 + +#define H_MASK_PARM -37 +#define H_MCG_FULL -38 +#define H_ALIAS_EXIST -39 +#define H_P_COUNTER -40 +#define H_TABLE_FULL -41 +#define H_ALT_TABLE -42 +#define H_MR_CONDITION -43 +#define H_NOT_ENOUGH_RESOURCES -44 +#define H_R_STATE -45 +#define H_RESCINDEND -46 +#define H_MULTI_THREADS_ACTIVE -9005 + + +/* Long Busy is a condition that can be returned by the firmware + * when a call cannot be completed now, but the identical call + * should be retried later. This prevents calls blocking in the + * firmware for long periods of time. Annoyingly the firmware can return + * a range of return codes, hinting at how long we should wait before + * retrying. If you don't care for the hint, the macro below is a good + * way to check for the long_busy return codes + */ +#define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \ + && (x <= H_LONG_BUSY_END_RANGE)) + +/* Flags */ +#define H_LARGE_PAGE (1UL<<(63-16)) +#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */ +#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */ +#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */ +#define H_PAGE_STATE_CHANGE (1UL<<(63-28)) +#define H_PAGE_UNUSED ((1UL<<(63-29)) | (1UL<<(63-30))) +#define H_PAGE_SET_UNUSED (H_PAGE_STATE_CHANGE | H_PAGE_UNUSED) +#define H_PAGE_SET_LOANED (H_PAGE_SET_UNUSED | (1UL<<(63-31))) +#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE +#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ +#define H_ANDCOND (1UL<<(63-33)) +#define H_LOCAL (1UL<<(63-35)) +#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ +#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ +#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ +#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ +#define H_COPY_PAGE (1UL<<(63-49)) +#define H_N (1UL<<(63-61)) +#define H_PP1 (1UL<<(63-62)) +#define H_PP2 (1UL<<(63-63)) + +/* VASI States */ +#define H_VASI_INVALID 0 +#define H_VASI_ENABLED 1 +#define H_VASI_ABORTED 2 +#define H_VASI_SUSPENDING 3 +#define H_VASI_SUSPENDED 4 +#define H_VASI_RESUMED 5 +#define H_VASI_COMPLETED 6 + +/* DABRX flags */ +#define H_DABRX_HYPERVISOR (1UL<<(63-61)) +#define H_DABRX_KERNEL (1UL<<(63-62)) +#define H_DABRX_USER (1UL<<(63-63)) + +/* Each control block has to be on a 4K boundary */ +#define H_CB_ALIGNMENT 4096 + +/* pSeries hypervisor opcodes */ +#define H_REMOVE 0x04 +#define H_ENTER 0x08 +#define H_READ 0x0c +#define H_CLEAR_MOD 0x10 +#define H_CLEAR_REF 0x14 +#define H_PROTECT 0x18 +#define H_GET_TCE 0x1c +#define H_PUT_TCE 0x20 +#define H_SET_SPRG0 0x24 +#define H_SET_DABR 0x28 +#define H_PAGE_INIT 0x2c +#define H_SET_ASR 0x30 +#define H_ASR_ON 0x34 +#define H_ASR_OFF 0x38 +#define H_LOGICAL_CI_LOAD 0x3c +#define H_LOGICAL_CI_STORE 0x40 +#define H_LOGICAL_CACHE_LOAD 0x44 +#define H_LOGICAL_CACHE_STORE 0x48 +#define H_LOGICAL_ICBI 0x4c +#define H_LOGICAL_DCBF 0x50 +#define H_GET_TERM_CHAR 0x54 +#define H_PUT_TERM_CHAR 0x58 +#define H_REAL_TO_LOGICAL 0x5c +#define H_HYPERVISOR_DATA 0x60 +#define H_EOI 0x64 +#define H_CPPR 0x68 +#define H_IPI 0x6c +#define H_IPOLL 0x70 +#define H_XIRR 0x74 +#define H_PERFMON 0x7c +#define H_MIGRATE_DMA 0x78 +#define H_REGISTER_VPA 0xDC +#define H_CEDE 0xE0 +#define H_CONFER 0xE4 +#define H_PROD 0xE8 +#define H_GET_PPP 0xEC +#define H_SET_PPP 0xF0 +#define H_PURR 0xF4 +#define H_PIC 0xF8 +#define H_REG_CRQ 0xFC +#define H_FREE_CRQ 0x100 +#define H_VIO_SIGNAL 0x104 +#define H_SEND_CRQ 0x108 +#define H_COPY_RDMA 0x110 +#define H_REGISTER_LOGICAL_LAN 0x114 +#define H_FREE_LOGICAL_LAN 0x118 +#define H_ADD_LOGICAL_LAN_BUFFER 0x11C +#define H_SEND_LOGICAL_LAN 0x120 +#define H_BULK_REMOVE 0x124 +#define H_MULTICAST_CTRL 0x130 +#define H_SET_XDABR 0x134 +#define H_STUFF_TCE 0x138 +#define H_PUT_TCE_INDIRECT 0x13C +#define H_CHANGE_LOGICAL_LAN_MAC 0x14C +#define H_VTERM_PARTNER_INFO 0x150 +#define H_REGISTER_VTERM 0x154 +#define H_FREE_VTERM 0x158 +#define H_RESET_EVENTS 0x15C +#define H_ALLOC_RESOURCE 0x160 +#define H_FREE_RESOURCE 0x164 +#define H_MODIFY_QP 0x168 +#define H_QUERY_QP 0x16C +#define H_REREGISTER_PMR 0x170 +#define H_REGISTER_SMR 0x174 +#define H_QUERY_MR 0x178 +#define H_QUERY_MW 0x17C +#define H_QUERY_HCA 0x180 +#define H_QUERY_PORT 0x184 +#define H_MODIFY_PORT 0x188 +#define H_DEFINE_AQP1 0x18C +#define H_GET_TRACE_BUFFER 0x190 +#define H_DEFINE_AQP0 0x194 +#define H_RESIZE_MR 0x198 +#define H_ATTACH_MCQP 0x19C +#define H_DETACH_MCQP 0x1A0 +#define H_CREATE_RPT 0x1A4 +#define H_REMOVE_RPT 0x1A8 +#define H_REGISTER_RPAGES 0x1AC +#define H_DISABLE_AND_GETC 0x1B0 +#define H_ERROR_DATA 0x1B4 +#define H_GET_HCA_INFO 0x1B8 +#define H_GET_PERF_COUNT 0x1BC +#define H_MANAGE_TRACE 0x1C0 +#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 +#define H_QUERY_INT_STATE 0x1E4 +#define H_POLL_PENDING 0x1D8 +#define H_ILLAN_ATTRIBUTES 0x244 +#define H_MODIFY_HEA_QP 0x250 +#define H_QUERY_HEA_QP 0x254 +#define H_QUERY_HEA 0x258 +#define H_QUERY_HEA_PORT 0x25C +#define H_MODIFY_HEA_PORT 0x260 +#define H_REG_BCMC 0x264 +#define H_DEREG_BCMC 0x268 +#define H_REGISTER_HEA_RPAGES 0x26C +#define H_DISABLE_AND_GET_HEA 0x270 +#define H_GET_HEA_INFO 0x274 +#define H_ALLOC_HEA_RESOURCE 0x278 +#define H_ADD_CONN 0x284 +#define H_DEL_CONN 0x288 +#define H_JOIN 0x298 +#define H_VASI_STATE 0x2A4 +#define H_ENABLE_CRQ 0x2B0 +#define H_GET_EM_PARMS 0x2B8 +#define H_SET_MPP 0x2D0 +#define H_GET_MPP 0x2D4 +#define H_HOME_NODE_ASSOCIATIVITY 0x2EC +#define H_BEST_ENERGY 0x2F4 +#define H_GET_MPP_X 0x314 +#define MAX_HCALL_OPCODE H_GET_MPP_X + +#ifndef __ASSEMBLY__ + +/** + * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments + * @opcode: The hypervisor call to make. + * + * This call supports up to 7 arguments and only returns the status of + * the hcall. Use this version where possible, its slightly faster than + * the other plpar_hcalls. + */ +long plpar_hcall_norets(unsigned long opcode, ...); + +/** + * plpar_hcall: - Make a pseries hypervisor call + * @opcode: The hypervisor call to make. + * @retbuf: Buffer to store up to 4 return arguments in. + * + * This call supports up to 6 arguments and 4 return arguments. Use + * PLPAR_HCALL_BUFSIZE to size the return argument buffer. + * + * Used for all but the craziest of phyp interfaces (see plpar_hcall9) + */ +#define PLPAR_HCALL_BUFSIZE 4 +long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); + +/** + * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats + * @opcode: The hypervisor call to make. + * @retbuf: Buffer to store up to 4 return arguments in. + * + * This call supports up to 6 arguments and 4 return arguments. Use + * PLPAR_HCALL_BUFSIZE to size the return argument buffer. + * + * Used when phyp interface needs to be called in real mode. Similar to + * plpar_hcall, but plpar_hcall_raw works in real mode and does not + * calculate hypervisor call statistics. + */ +long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); + +/** + * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments + * @opcode: The hypervisor call to make. + * @retbuf: Buffer to store up to 9 return arguments in. + * + * This call supports up to 9 arguments and 9 return arguments. Use + * PLPAR_HCALL9_BUFSIZE to size the return argument buffer. + */ +#define PLPAR_HCALL9_BUFSIZE 9 +long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...); + +/* For hcall instrumentation. One structure per-hcall, per-CPU */ +struct hcall_stats { + unsigned long num_calls; /* number of calls (on this CPU) */ + unsigned long tb_total; /* total wall time (mftb) of calls. */ + unsigned long purr_total; /* total cpu time (PURR) of calls. */ + unsigned long tb_start; + unsigned long purr_start; +}; +#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) + +struct hvcall_mpp_data { + unsigned long entitled_mem; + unsigned long mapped_mem; + unsigned short group_num; + unsigned short pool_num; + unsigned char mem_weight; + unsigned char unallocated_mem_weight; + unsigned long unallocated_entitlement; /* value in bytes */ + unsigned long pool_size; + signed long loan_request; + unsigned long backing_mem; +}; + +int h_get_mpp(struct hvcall_mpp_data *); + +struct hvcall_mpp_x_data { + unsigned long coalesced_bytes; + unsigned long pool_coalesced_bytes; + unsigned long pool_purr_cycles; + unsigned long pool_spurr_cycles; + unsigned long reserved[3]; +}; + +int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data); + +#ifdef CONFIG_PPC_PSERIES +extern int CMO_PrPSP; +extern int CMO_SecPSP; +extern unsigned long CMO_PageSize; + +static inline int cmo_get_primary_psp(void) +{ + return CMO_PrPSP; +} + +static inline int cmo_get_secondary_psp(void) +{ + return CMO_SecPSP; +} + +static inline unsigned long cmo_get_page_size(void) +{ + return CMO_PageSize; +} +#endif /* CONFIG_PPC_PSERIES */ + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h new file mode 100644 index 00000000000..35ea69e8121 --- /dev/null +++ b/arch/powerpc/include/asm/hvconsole.h @@ -0,0 +1,41 @@ +/* + * hvconsole.h + * Copyright (C) 2004 Ryan S Arnold, IBM Corporation + * + * LPAR console support. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PPC64_HVCONSOLE_H +#define _PPC64_HVCONSOLE_H +#ifdef __KERNEL__ + +/* + * PSeries firmware will only send/recv up to 16 bytes of character data per + * hcall. + */ +#define MAX_VIO_PUT_CHARS 16 +#define SIZE_VIO_GET_CHARS 16 + +/* + * Vio firmware always attempts to fetch MAX_VIO_GET_CHARS chars. The 'count' + * parm is included to conform to put_chars() function pointer template + */ +extern int hvc_get_chars(uint32_t vtermno, char *buf, int count); +extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count); + +#endif /* __KERNEL__ */ +#endif /* _PPC64_HVCONSOLE_H */ diff --git a/arch/powerpc/include/asm/hvcserver.h b/arch/powerpc/include/asm/hvcserver.h new file mode 100644 index 00000000000..67d7da3a4da --- /dev/null +++ b/arch/powerpc/include/asm/hvcserver.h @@ -0,0 +1,59 @@ +/* + * hvcserver.h + * Copyright (C) 2004 Ryan S Arnold, IBM Corporation + * + * PPC64 virtual I/O console server support. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PPC64_HVCSERVER_H +#define _PPC64_HVCSERVER_H +#ifdef __KERNEL__ + +#include <linux/list.h> + +/* Converged Location Code length */ +#define HVCS_CLC_LENGTH 79 + +/** + * hvcs_partner_info - an element in a list of partner info + * @node: list_head denoting this partner_info struct's position in the list of + * partner info. + * @unit_address: The partner unit address of this entry. + * @partition_ID: The partner partition ID of this entry. + * @location_code: The converged location code of this entry + 1 char for the + * null-term. + * + * This structure outlines the format that partner info is presented to a caller + * of the hvcs partner info fetching functions. These are strung together into + * a list using linux kernel lists. + */ +struct hvcs_partner_info { + struct list_head node; + uint32_t unit_address; + uint32_t partition_ID; + char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */ +}; + +extern int hvcs_free_partner_info(struct list_head *head); +extern int hvcs_get_partner_info(uint32_t unit_address, + struct list_head *head, unsigned long *pi_buff); +extern int hvcs_register_connection(uint32_t unit_address, + uint32_t p_partition_ID, uint32_t p_unit_address); +extern int hvcs_free_connection(uint32_t unit_address); + +#endif /* __KERNEL__ */ +#endif /* _PPC64_HVCSERVER_H */ diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h new file mode 100644 index 00000000000..d3f64f36181 --- /dev/null +++ b/arch/powerpc/include/asm/hvsi.h @@ -0,0 +1,94 @@ +#ifndef _HVSI_H +#define _HVSI_H + +#define VS_DATA_PACKET_HEADER 0xff +#define VS_CONTROL_PACKET_HEADER 0xfe +#define VS_QUERY_PACKET_HEADER 0xfd +#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc + +/* control verbs */ +#define VSV_SET_MODEM_CTL 1 /* to service processor only */ +#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */ +#define VSV_CLOSE_PROTOCOL 3 + +/* query verbs */ +#define VSV_SEND_VERSION_NUMBER 1 +#define VSV_SEND_MODEM_CTL_STATUS 2 + +/* yes, these masks are not consecutive. */ +#define HVSI_TSDTR 0x01 +#define HVSI_TSCD 0x20 + +#define HVSI_MAX_OUTGOING_DATA 12 +#define HVSI_VERSION 1 + +struct hvsi_header { + uint8_t type; + uint8_t len; + uint16_t seqno; +} __attribute__((packed)); + +struct hvsi_data { + struct hvsi_header hdr; + uint8_t data[HVSI_MAX_OUTGOING_DATA]; +} __attribute__((packed)); + +struct hvsi_control { + struct hvsi_header hdr; + uint16_t verb; + /* optional depending on verb: */ + uint32_t word; + uint32_t mask; +} __attribute__((packed)); + +struct hvsi_query { + struct hvsi_header hdr; + uint16_t verb; +} __attribute__((packed)); + +struct hvsi_query_response { + struct hvsi_header hdr; + uint16_t verb; + uint16_t query_seqno; + union { + uint8_t version; + uint32_t mctrl_word; + } u; +} __attribute__((packed)); + +/* hvsi lib struct definitions */ +#define HVSI_INBUF_SIZE 255 +struct tty_struct; +struct hvsi_priv { + unsigned int inbuf_len; /* data in input buffer */ + unsigned char inbuf[HVSI_INBUF_SIZE]; + unsigned int inbuf_cur; /* Cursor in input buffer */ + unsigned int inbuf_pktlen; /* packet lenght from cursor */ + atomic_t seqno; /* packet sequence number */ + unsigned int opened:1; /* driver opened */ + unsigned int established:1; /* protocol established */ + unsigned int is_console:1; /* used as a kernel console device */ + unsigned int mctrl_update:1; /* modem control updated */ + unsigned short mctrl; /* modem control */ + struct tty_struct *tty; /* tty structure */ + int (*get_chars)(uint32_t termno, char *buf, int count); + int (*put_chars)(uint32_t termno, const char *buf, int count); + uint32_t termno; +}; + +/* hvsi lib functions */ +struct hvc_struct; +extern void hvsilib_init(struct hvsi_priv *pv, + int (*get_chars)(uint32_t termno, char *buf, int count), + int (*put_chars)(uint32_t termno, const char *buf, + int count), + int termno, int is_console); +extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp); +extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp); +extern int hvsilib_read_mctrl(struct hvsi_priv *pv); +extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr); +extern void hvsilib_establish(struct hvsi_priv *pv); +extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count); +extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count); + +#endif /* _HVSI_H */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h new file mode 100644 index 00000000000..80fd4d2b4a6 --- /dev/null +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -0,0 +1,74 @@ +/* + * PowerPC BookIII S hardware breakpoint definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright 2010, IBM Corporation. + * Author: K.Prasad <prasad@linux.vnet.ibm.com> + * + */ + +#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H +#define _PPC_BOOK3S_64_HW_BREAKPOINT_H + +#ifdef __KERNEL__ +#ifdef CONFIG_HAVE_HW_BREAKPOINT + +struct arch_hw_breakpoint { + bool extraneous_interrupt; + u8 len; /* length of the target data symbol */ + int type; + unsigned long address; +}; + +#include <linux/kdebug.h> +#include <asm/reg.h> +#include <asm/system.h> + +struct perf_event; +struct pmu; +struct perf_sample_data; + +#define HW_BREAKPOINT_ALIGN 0x7 +/* Maximum permissible length of any HW Breakpoint */ +#define HW_BREAKPOINT_LEN 0x8 + +extern int hw_breakpoint_slots(int type); +extern int arch_bp_generic_fields(int type, int *gen_bp_type); +extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); +int arch_install_hw_breakpoint(struct perf_event *bp); +void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void hw_breakpoint_pmu_read(struct perf_event *bp); +extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); + +extern struct pmu perf_ops_bp; +extern void ptrace_triggered(struct perf_event *bp, + struct perf_sample_data *data, struct pt_regs *regs); +static inline void hw_breakpoint_disable(void) +{ + set_dabr(0); +} +extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); + +#else /* CONFIG_HAVE_HW_BREAKPOINT */ +static inline void hw_breakpoint_disable(void) { } +static inline void thread_change_pc(struct task_struct *tsk, + struct pt_regs *regs) { } +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#endif /* __KERNEL__ */ +#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h new file mode 100644 index 00000000000..bb712c9488b --- /dev/null +++ b/arch/powerpc/include/asm/hw_irq.h @@ -0,0 +1,153 @@ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_HW_IRQ_H +#define _ASM_POWERPC_HW_IRQ_H + +#ifdef __KERNEL__ + +#include <linux/errno.h> +#include <linux/compiler.h> +#include <asm/ptrace.h> +#include <asm/processor.h> + +extern void timer_interrupt(struct pt_regs *); + +#ifdef CONFIG_PPC64 +#include <asm/paca.h> + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + + asm volatile( + "lbz %0,%1(13)" + : "=r" (flags) + : "i" (offsetof(struct paca_struct, soft_enabled))); + + return flags; +} + +static inline unsigned long arch_local_irq_disable(void) +{ + unsigned long flags, zero; + + asm volatile( + "li %1,0; lbz %0,%2(13); stb %1,%2(13)" + : "=r" (flags), "=&r" (zero) + : "i" (offsetof(struct paca_struct, soft_enabled)) + : "memory"); + + return flags; +} + +extern void arch_local_irq_restore(unsigned long); +extern void iseries_handle_interrupts(void); + +static inline void arch_local_irq_enable(void) +{ + arch_local_irq_restore(1); +} + +static inline unsigned long arch_local_irq_save(void) +{ + return arch_local_irq_disable(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags == 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#ifdef CONFIG_PPC_BOOK3E +#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); +#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); +#else +#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) +#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) +#endif + +#define hard_irq_disable() \ + do { \ + __hard_irq_disable(); \ + get_paca()->soft_enabled = 0; \ + get_paca()->hard_enabled = 0; \ + } while(0) + +#else /* CONFIG_PPC64 */ + +#define SET_MSR_EE(x) mtmsr(x) + +static inline unsigned long arch_local_save_flags(void) +{ + return mfmsr(); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ +#if defined(CONFIG_BOOKE) + asm volatile("wrtee %0" : : "r" (flags) : "memory"); +#else + mtmsr(flags); +#endif +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); +#ifdef CONFIG_BOOKE + asm volatile("wrteei 0" : : : "memory"); +#else + SET_MSR_EE(flags & ~MSR_EE); +#endif + return flags; +} + +static inline void arch_local_irq_disable(void) +{ +#ifdef CONFIG_BOOKE + asm volatile("wrteei 0" : : : "memory"); +#else + arch_local_irq_save(); +#endif +} + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_BOOKE + asm volatile("wrteei 1" : : : "memory"); +#else + unsigned long msr = mfmsr(); + SET_MSR_EE(msr | MSR_EE); +#endif +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return (flags & MSR_EE) == 0; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#define hard_irq_disable() arch_local_irq_disable() + +#endif /* CONFIG_PPC64 */ + +#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST + +/* + * interrupt-retrigger: should we handle this via lost interrupts and IPIs + * or should we not care like we do now ? --BenH. + */ +struct irq_chip; + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h new file mode 100644 index 00000000000..5b0c98bd46a --- /dev/null +++ b/arch/powerpc/include/asm/hydra.h @@ -0,0 +1,102 @@ +/* + * include/asm-ppc/hydra.h -- Mac I/O `Hydra' definitions + * + * Copyright (C) 1997 Geert Uytterhoeven + * + * This file is based on the following documentation: + * + * Macintosh Technology in the Common Hardware Reference Platform + * Apple Computer, Inc. + * + * © Copyright 1995 Apple Computer, Inc. All rights reserved. + * + * It's available online from http://www.cpu.lu/~mlan/ftp/MacTech.pdf + * You can obtain paper copies of this book from computer bookstores or by + * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San + * Francisco, CA 94104. Reference ISBN 1-55860-393-X. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _ASMPPC_HYDRA_H +#define _ASMPPC_HYDRA_H + +#ifdef __KERNEL__ + +struct Hydra { + /* DBDMA Controller Register Space */ + char Pad1[0x30]; + u_int CachePD; + u_int IDs; + u_int Feature_Control; + char Pad2[0x7fc4]; + /* DBDMA Channel Register Space */ + char SCSI_DMA[0x100]; + char Pad3[0x300]; + char SCCA_Tx_DMA[0x100]; + char SCCA_Rx_DMA[0x100]; + char SCCB_Tx_DMA[0x100]; + char SCCB_Rx_DMA[0x100]; + char Pad4[0x7800]; + /* Device Register Space */ + char SCSI[0x1000]; + char ADB[0x1000]; + char SCC_Legacy[0x1000]; + char SCC[0x1000]; + char Pad9[0x2000]; + char VIA[0x2000]; + char Pad10[0x28000]; + char OpenPIC[0x40000]; +}; + +extern volatile struct Hydra __iomem *Hydra; + + + /* + * Feature Control Register + */ + +#define HYDRA_FC_SCC_CELL_EN 0x00000001 /* Enable SCC Clock */ +#define HYDRA_FC_SCSI_CELL_EN 0x00000002 /* Enable SCSI Clock */ +#define HYDRA_FC_SCCA_ENABLE 0x00000004 /* Enable SCC A Lines */ +#define HYDRA_FC_SCCB_ENABLE 0x00000008 /* Enable SCC B Lines */ +#define HYDRA_FC_ARB_BYPASS 0x00000010 /* Bypass Internal Arbiter */ +#define HYDRA_FC_RESET_SCC 0x00000020 /* Reset SCC */ +#define HYDRA_FC_MPIC_ENABLE 0x00000040 /* Enable OpenPIC */ +#define HYDRA_FC_SLOW_SCC_PCLK 0x00000080 /* 1=15.6672, 0=25 MHz */ +#define HYDRA_FC_MPIC_IS_MASTER 0x00000100 /* OpenPIC Master Mode */ + + + /* + * OpenPIC Interrupt Sources + */ + +#define HYDRA_INT_SIO 0 +#define HYDRA_INT_SCSI_DMA 1 +#define HYDRA_INT_SCCA_TX_DMA 2 +#define HYDRA_INT_SCCA_RX_DMA 3 +#define HYDRA_INT_SCCB_TX_DMA 4 +#define HYDRA_INT_SCCB_RX_DMA 5 +#define HYDRA_INT_SCSI 6 +#define HYDRA_INT_SCCA 7 +#define HYDRA_INT_SCCB 8 +#define HYDRA_INT_VIA 9 +#define HYDRA_INT_ADB 10 +#define HYDRA_INT_ADB_NMI 11 +#define HYDRA_INT_EXT1 12 /* PCI IRQW */ +#define HYDRA_INT_EXT2 13 /* PCI IRQX */ +#define HYDRA_INT_EXT3 14 /* PCI IRQY */ +#define HYDRA_INT_EXT4 15 /* PCI IRQZ */ +#define HYDRA_INT_EXT5 16 /* IDE Primay/Secondary */ +#define HYDRA_INT_EXT6 17 /* IDE Secondary */ +#define HYDRA_INT_EXT7 18 /* Power Off Request */ +#define HYDRA_INT_SPARE 19 + +extern int hydra_init(void); +extern void macio_adb_init(void); + +#endif /* __KERNEL__ */ + +#endif /* _ASMPPC_HYDRA_H */ diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h new file mode 100644 index 00000000000..105ade297aa --- /dev/null +++ b/arch/powerpc/include/asm/i8259.h @@ -0,0 +1,12 @@ +#ifndef _ASM_POWERPC_I8259_H +#define _ASM_POWERPC_I8259_H +#ifdef __KERNEL__ + +#include <linux/irq.h> + +extern void i8259_init(struct device_node *node, unsigned long intack_addr); +extern unsigned int i8259_irq(void); +extern struct irq_host *i8259_get_host(void); + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_I8259_H */ diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h new file mode 100644 index 00000000000..1a9d9aea21f --- /dev/null +++ b/arch/powerpc/include/asm/ibmebus.h @@ -0,0 +1,60 @@ +/* + * IBM PowerPC eBus Infrastructure Support. + * + * Copyright (c) 2005 IBM Corporation + * Joachim Fenkes <fenkes@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_EBUS_H +#define _ASM_EBUS_H +#ifdef __KERNEL__ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/mod_devicetable.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> + +extern struct bus_type ibmebus_bus_type; + +int ibmebus_register_driver(struct of_platform_driver *drv); +void ibmebus_unregister_driver(struct of_platform_driver *drv); + +int ibmebus_request_irq(u32 ist, irq_handler_t handler, + unsigned long irq_flags, const char *devname, + void *dev_id); +void ibmebus_free_irq(u32 ist, void *dev_id); + +#endif /* __KERNEL__ */ +#endif /* _ASM_IBMEBUS_H */ diff --git a/arch/powerpc/include/asm/ide.h b/arch/powerpc/include/asm/ide.h new file mode 100644 index 00000000000..da01b20aea5 --- /dev/null +++ b/arch/powerpc/include/asm/ide.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 1994-1996 Linus Torvalds & authors + * + * This file contains the powerpc architecture specific IDE code. + */ +#ifndef _ASM_POWERPC_IDE_H +#define _ASM_POWERPC_IDE_H + +#include <linux/compiler.h> +#include <asm/io.h> + +#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c)) +#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c)) +#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c)) +#define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c)) + +#endif /* _ASM_POWERPC_IDE_H */ diff --git a/arch/powerpc/include/asm/immap_cpm2.h b/arch/powerpc/include/asm/immap_cpm2.h new file mode 100644 index 00000000000..7c64fda5357 --- /dev/null +++ b/arch/powerpc/include/asm/immap_cpm2.h @@ -0,0 +1,647 @@ +/* + * CPM2 Internal Memory Map + * Copyright (c) 1999 Dan Malek (dmalek@jlc.net) + * + * The Internal Memory Map for devices with CPM2 on them. This + * is the superset of all CPM2 devices (8260, 8266, 8280, 8272, + * 8560). + */ +#ifdef __KERNEL__ +#ifndef __IMMAP_CPM2__ +#define __IMMAP_CPM2__ + +#include <linux/types.h> + +/* System configuration registers. +*/ +typedef struct sys_82xx_conf { + u32 sc_siumcr; + u32 sc_sypcr; + u8 res1[6]; + u16 sc_swsr; + u8 res2[20]; + u32 sc_bcr; + u8 sc_ppc_acr; + u8 res3[3]; + u32 sc_ppc_alrh; + u32 sc_ppc_alrl; + u8 sc_lcl_acr; + u8 res4[3]; + u32 sc_lcl_alrh; + u32 sc_lcl_alrl; + u32 sc_tescr1; + u32 sc_tescr2; + u32 sc_ltescr1; + u32 sc_ltescr2; + u32 sc_pdtea; + u8 sc_pdtem; + u8 res5[3]; + u32 sc_ldtea; + u8 sc_ldtem; + u8 res6[163]; +} sysconf_82xx_cpm2_t; + +typedef struct sys_85xx_conf { + u32 sc_cear; + u16 sc_ceer; + u16 sc_cemr; + u8 res1[70]; + u32 sc_smaer; + u8 res2[4]; + u32 sc_smevr; + u32 sc_smctr; + u32 sc_lmaer; + u8 res3[4]; + u32 sc_lmevr; + u32 sc_lmctr; + u8 res4[144]; +} sysconf_85xx_cpm2_t; + +typedef union sys_conf { + sysconf_82xx_cpm2_t siu_82xx; + sysconf_85xx_cpm2_t siu_85xx; +} sysconf_cpm2_t; + + + +/* Memory controller registers. +*/ +typedef struct mem_ctlr { + u32 memc_br0; + u32 memc_or0; + u32 memc_br1; + u32 memc_or1; + u32 memc_br2; + u32 memc_or2; + u32 memc_br3; + u32 memc_or3; + u32 memc_br4; + u32 memc_or4; + u32 memc_br5; + u32 memc_or5; + u32 memc_br6; + u32 memc_or6; + u32 memc_br7; + u32 memc_or7; + u32 memc_br8; + u32 memc_or8; + u32 memc_br9; + u32 memc_or9; + u32 memc_br10; + u32 memc_or10; + u32 memc_br11; + u32 memc_or11; + u8 res1[8]; + u32 memc_mar; + u8 res2[4]; + u32 memc_mamr; + u32 memc_mbmr; + u32 memc_mcmr; + u8 res3[8]; + u16 memc_mptpr; + u8 res4[2]; + u32 memc_mdr; + u8 res5[4]; + u32 memc_psdmr; + u32 memc_lsdmr; + u8 memc_purt; + u8 res6[3]; + u8 memc_psrt; + u8 res7[3]; + u8 memc_lurt; + u8 res8[3]; + u8 memc_lsrt; + u8 res9[3]; + u32 memc_immr; + u32 memc_pcibr0; + u32 memc_pcibr1; + u8 res10[16]; + u32 memc_pcimsk0; + u32 memc_pcimsk1; + u8 res11[52]; +} memctl_cpm2_t; + +/* System Integration Timers. +*/ +typedef struct sys_int_timers { + u8 res1[32]; + u16 sit_tmcntsc; + u8 res2[2]; + u32 sit_tmcnt; + u8 res3[4]; + u32 sit_tmcntal; + u8 res4[16]; + u16 sit_piscr; + u8 res5[2]; + u32 sit_pitc; + u32 sit_pitr; + u8 res6[94]; + u8 res7[390]; +} sit_cpm2_t; + +#define PISCR_PIRQ_MASK ((u16)0xff00) +#define PISCR_PS ((u16)0x0080) +#define PISCR_PIE ((u16)0x0004) +#define PISCR_PTF ((u16)0x0002) +#define PISCR_PTE ((u16)0x0001) + +/* PCI Controller. +*/ +typedef struct pci_ctlr { + u32 pci_omisr; + u32 pci_omimr; + u8 res1[8]; + u32 pci_ifqpr; + u32 pci_ofqpr; + u8 res2[8]; + u32 pci_imr0; + u32 pci_imr1; + u32 pci_omr0; + u32 pci_omr1; + u32 pci_odr; + u8 res3[4]; + u32 pci_idr; + u8 res4[20]; + u32 pci_imisr; + u32 pci_imimr; + u8 res5[24]; + u32 pci_ifhpr; + u8 res6[4]; + u32 pci_iftpr; + u8 res7[4]; + u32 pci_iphpr; + u8 res8[4]; + u32 pci_iptpr; + u8 res9[4]; + u32 pci_ofhpr; + u8 res10[4]; + u32 pci_oftpr; + u8 res11[4]; + u32 pci_ophpr; + u8 res12[4]; + u32 pci_optpr; + u8 res13[8]; + u32 pci_mucr; + u8 res14[8]; + u32 pci_qbar; + u8 res15[12]; + u32 pci_dmamr0; + u32 pci_dmasr0; + u32 pci_dmacdar0; + u8 res16[4]; + u32 pci_dmasar0; + u8 res17[4]; + u32 pci_dmadar0; + u8 res18[4]; + u32 pci_dmabcr0; + u32 pci_dmandar0; + u8 res19[86]; + u32 pci_dmamr1; + u32 pci_dmasr1; + u32 pci_dmacdar1; + u8 res20[4]; + u32 pci_dmasar1; + u8 res21[4]; + u32 pci_dmadar1; + u8 res22[4]; + u32 pci_dmabcr1; + u32 pci_dmandar1; + u8 res23[88]; + u32 pci_dmamr2; + u32 pci_dmasr2; + u32 pci_dmacdar2; + u8 res24[4]; + u32 pci_dmasar2; + u8 res25[4]; + u32 pci_dmadar2; + u8 res26[4]; + u32 pci_dmabcr2; + u32 pci_dmandar2; + u8 res27[88]; + u32 pci_dmamr3; + u32 pci_dmasr3; + u32 pci_dmacdar3; + u8 res28[4]; + u32 pci_dmasar3; + u8 res29[4]; + u32 pci_dmadar3; + u8 res30[4]; + u32 pci_dmabcr3; + u32 pci_dmandar3; + u8 res31[344]; + u32 pci_potar0; + u8 res32[4]; + u32 pci_pobar0; + u8 res33[4]; + u32 pci_pocmr0; + u8 res34[4]; + u32 pci_potar1; + u8 res35[4]; + u32 pci_pobar1; + u8 res36[4]; + u32 pci_pocmr1; + u8 res37[4]; + u32 pci_potar2; + u8 res38[4]; + u32 pci_pobar2; + u8 res39[4]; + u32 pci_pocmr2; + u8 res40[50]; + u32 pci_ptcr; + u32 pci_gpcr; + u32 pci_gcr; + u32 pci_esr; + u32 pci_emr; + u32 pci_ecr; + u32 pci_eacr; + u8 res41[4]; + u32 pci_edcr; + u8 res42[4]; + u32 pci_eccr; + u8 res43[44]; + u32 pci_pitar1; + u8 res44[4]; + u32 pci_pibar1; + u8 res45[4]; + u32 pci_picmr1; + u8 res46[4]; + u32 pci_pitar0; + u8 res47[4]; + u32 pci_pibar0; + u8 res48[4]; + u32 pci_picmr0; + u8 res49[4]; + u32 pci_cfg_addr; + u32 pci_cfg_data; + u32 pci_int_ack; + u8 res50[756]; +} pci_cpm2_t; + +/* Interrupt Controller. +*/ +typedef struct interrupt_controller { + u16 ic_sicr; + u8 res1[2]; + u32 ic_sivec; + u32 ic_sipnrh; + u32 ic_sipnrl; + u32 ic_siprr; + u32 ic_scprrh; + u32 ic_scprrl; + u32 ic_simrh; + u32 ic_simrl; + u32 ic_siexr; + u8 res2[88]; +} intctl_cpm2_t; + +/* Clocks and Reset. +*/ +typedef struct clk_and_reset { + u32 car_sccr; + u8 res1[4]; + u32 car_scmr; + u8 res2[4]; + u32 car_rsr; + u32 car_rmr; + u8 res[104]; +} car_cpm2_t; + +/* Input/Output Port control/status registers. + * Names consistent with processor manual, although they are different + * from the original 8xx names....... + */ +typedef struct io_port { + u32 iop_pdira; + u32 iop_ppara; + u32 iop_psora; + u32 iop_podra; + u32 iop_pdata; + u8 res1[12]; + u32 iop_pdirb; + u32 iop_pparb; + u32 iop_psorb; + u32 iop_podrb; + u32 iop_pdatb; + u8 res2[12]; + u32 iop_pdirc; + u32 iop_pparc; + u32 iop_psorc; + u32 iop_podrc; + u32 iop_pdatc; + u8 res3[12]; + u32 iop_pdird; + u32 iop_ppard; + u32 iop_psord; + u32 iop_podrd; + u32 iop_pdatd; + u8 res4[12]; +} iop_cpm2_t; + +/* Communication Processor Module Timers +*/ +typedef struct cpm_timers { + u8 cpmt_tgcr1; + u8 res1[3]; + u8 cpmt_tgcr2; + u8 res2[11]; + u16 cpmt_tmr1; + u16 cpmt_tmr2; + u16 cpmt_trr1; + u16 cpmt_trr2; + u16 cpmt_tcr1; + u16 cpmt_tcr2; + u16 cpmt_tcn1; + u16 cpmt_tcn2; + u16 cpmt_tmr3; + u16 cpmt_tmr4; + u16 cpmt_trr3; + u16 cpmt_trr4; + u16 cpmt_tcr3; + u16 cpmt_tcr4; + u16 cpmt_tcn3; + u16 cpmt_tcn4; + u16 cpmt_ter1; + u16 cpmt_ter2; + u16 cpmt_ter3; + u16 cpmt_ter4; + u8 res3[584]; +} cpmtimer_cpm2_t; + +/* DMA control/status registers. +*/ +typedef struct sdma_csr { + u8 res0[24]; + u8 sdma_sdsr; + u8 res1[3]; + u8 sdma_sdmr; + u8 res2[3]; + u8 sdma_idsr1; + u8 res3[3]; + u8 sdma_idmr1; + u8 res4[3]; + u8 sdma_idsr2; + u8 res5[3]; + u8 sdma_idmr2; + u8 res6[3]; + u8 sdma_idsr3; + u8 res7[3]; + u8 sdma_idmr3; + u8 res8[3]; + u8 sdma_idsr4; + u8 res9[3]; + u8 sdma_idmr4; + u8 res10[707]; +} sdma_cpm2_t; + +/* Fast controllers +*/ +typedef struct fcc { + u32 fcc_gfmr; + u32 fcc_fpsmr; + u16 fcc_ftodr; + u8 res1[2]; + u16 fcc_fdsr; + u8 res2[2]; + u16 fcc_fcce; + u8 res3[2]; + u16 fcc_fccm; + u8 res4[2]; + u8 fcc_fccs; + u8 res5[3]; + u8 fcc_ftirr_phy[4]; +} fcc_t; + +/* Fast controllers continued + */ +typedef struct fcc_c { + u32 fcc_firper; + u32 fcc_firer; + u32 fcc_firsr_hi; + u32 fcc_firsr_lo; + u8 fcc_gfemr; + u8 res1[15]; +} fcc_c_t; + +/* TC Layer + */ +typedef struct tclayer { + u16 tc_tcmode; + u16 tc_cdsmr; + u16 tc_tcer; + u16 tc_rcc; + u16 tc_tcmr; + u16 tc_fcc; + u16 tc_ccc; + u16 tc_icc; + u16 tc_tcc; + u16 tc_ecc; + u8 res1[12]; +} tclayer_t; + + +/* I2C +*/ +typedef struct i2c { + u8 i2c_i2mod; + u8 res1[3]; + u8 i2c_i2add; + u8 res2[3]; + u8 i2c_i2brg; + u8 res3[3]; + u8 i2c_i2com; + u8 res4[3]; + u8 i2c_i2cer; + u8 res5[3]; + u8 i2c_i2cmr; + u8 res6[331]; +} i2c_cpm2_t; + +typedef struct scc { /* Serial communication channels */ + u32 scc_gsmrl; + u32 scc_gsmrh; + u16 scc_psmr; + u8 res1[2]; + u16 scc_todr; + u16 scc_dsr; + u16 scc_scce; + u8 res2[2]; + u16 scc_sccm; + u8 res3; + u8 scc_sccs; + u8 res4[8]; +} scc_t; + +typedef struct smc { /* Serial management channels */ + u8 res1[2]; + u16 smc_smcmr; + u8 res2[2]; + u8 smc_smce; + u8 res3[3]; + u8 smc_smcm; + u8 res4[5]; +} smc_t; + +/* Serial Peripheral Interface. +*/ +typedef struct spi_ctrl { + u16 spi_spmode; + u8 res1[4]; + u8 spi_spie; + u8 res2[3]; + u8 spi_spim; + u8 res3[2]; + u8 spi_spcom; + u8 res4[82]; +} spictl_cpm2_t; + +/* CPM Mux. +*/ +typedef struct cpmux { + u8 cmx_si1cr; + u8 res1; + u8 cmx_si2cr; + u8 res2; + u32 cmx_fcr; + u32 cmx_scr; + u8 cmx_smr; + u8 res3; + u16 cmx_uar; + u8 res4[16]; +} cpmux_t; + +/* SIRAM control +*/ +typedef struct siram { + u16 si_amr; + u16 si_bmr; + u16 si_cmr; + u16 si_dmr; + u8 si_gmr; + u8 res1; + u8 si_cmdr; + u8 res2; + u8 si_str; + u8 res3; + u16 si_rsr; +} siramctl_t; + +typedef struct mcc { + u16 mcc_mcce; + u8 res1[2]; + u16 mcc_mccm; + u8 res2[2]; + u8 mcc_mccf; + u8 res3[7]; +} mcc_t; + +typedef struct comm_proc { + u32 cp_cpcr; + u32 cp_rccr; + u8 res1[14]; + u16 cp_rter; + u8 res2[2]; + u16 cp_rtmr; + u16 cp_rtscr; + u8 res3[2]; + u32 cp_rtsr; + u8 res4[12]; +} cpm_cpm2_t; + +/* USB Controller. +*/ +typedef struct cpm_usb_ctlr { + u8 usb_usmod; + u8 usb_usadr; + u8 usb_uscom; + u8 res1[1]; + __be16 usb_usep[4]; + u8 res2[4]; + __be16 usb_usber; + u8 res3[2]; + __be16 usb_usbmr; + u8 usb_usbs; + u8 res4[7]; +} usb_cpm2_t; + +/* ...and the whole thing wrapped up.... +*/ + +typedef struct immap { + /* Some references are into the unique and known dpram spaces, + * others are from the generic base. + */ +#define im_dprambase im_dpram1 + u8 im_dpram1[16*1024]; + u8 res1[16*1024]; + u8 im_dpram2[4*1024]; + u8 res2[8*1024]; + u8 im_dpram3[4*1024]; + u8 res3[16*1024]; + + sysconf_cpm2_t im_siu_conf; /* SIU Configuration */ + memctl_cpm2_t im_memctl; /* Memory Controller */ + sit_cpm2_t im_sit; /* System Integration Timers */ + pci_cpm2_t im_pci; /* PCI Controller */ + intctl_cpm2_t im_intctl; /* Interrupt Controller */ + car_cpm2_t im_clkrst; /* Clocks and reset */ + iop_cpm2_t im_ioport; /* IO Port control/status */ + cpmtimer_cpm2_t im_cpmtimer; /* CPM timers */ + sdma_cpm2_t im_sdma; /* SDMA control/status */ + + fcc_t im_fcc[3]; /* Three FCCs */ + u8 res4z[32]; + fcc_c_t im_fcc_c[3]; /* Continued FCCs */ + + u8 res4[32]; + + tclayer_t im_tclayer[8]; /* Eight TCLayers */ + u16 tc_tcgsr; + u16 tc_tcger; + + /* First set of baud rate generators. + */ + u8 res[236]; + u32 im_brgc5; + u32 im_brgc6; + u32 im_brgc7; + u32 im_brgc8; + + u8 res5[608]; + + i2c_cpm2_t im_i2c; /* I2C control/status */ + cpm_cpm2_t im_cpm; /* Communication processor */ + + /* Second set of baud rate generators. + */ + u32 im_brgc1; + u32 im_brgc2; + u32 im_brgc3; + u32 im_brgc4; + + scc_t im_scc[4]; /* Four SCCs */ + smc_t im_smc[2]; /* Couple of SMCs */ + spictl_cpm2_t im_spi; /* A SPI */ + cpmux_t im_cpmux; /* CPM clock route mux */ + siramctl_t im_siramctl1; /* First SI RAM Control */ + mcc_t im_mcc1; /* First MCC */ + siramctl_t im_siramctl2; /* Second SI RAM Control */ + mcc_t im_mcc2; /* Second MCC */ + usb_cpm2_t im_usb; /* USB Controller */ + + u8 res6[1153]; + + u16 im_si1txram[256]; + u8 res7[512]; + u16 im_si1rxram[256]; + u8 res8[512]; + u16 im_si2txram[256]; + u8 res9[512]; + u16 im_si2rxram[256]; + u8 res10[512]; + u8 res11[4096]; +} cpm2_map_t; + +extern cpm2_map_t __iomem *cpm2_immr; + +#endif /* __IMMAP_CPM2__ */ +#endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h new file mode 100644 index 00000000000..0edb6842b13 --- /dev/null +++ b/arch/powerpc/include/asm/immap_qe.h @@ -0,0 +1,489 @@ +/* + * QUICC Engine (QE) Internal Memory Map. + * The Internal Memory Map for devices with QE on them. This + * is the superset of all QE devices (8360, etc.). + + * Copyright (C) 2006. Freescale Semicondutor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_POWERPC_IMMAP_QE_H +#define _ASM_POWERPC_IMMAP_QE_H +#ifdef __KERNEL__ + +#include <linux/kernel.h> +#include <asm/io.h> + +#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */ + +/* QE I-RAM */ +struct qe_iram { + __be32 iadd; /* I-RAM Address Register */ + __be32 idata; /* I-RAM Data Register */ + u8 res0[0x78]; +} __attribute__ ((packed)); + +/* QE Interrupt Controller */ +struct qe_ic_regs { + __be32 qicr; + __be32 qivec; + __be32 qripnr; + __be32 qipnr; + __be32 qipxcc; + __be32 qipycc; + __be32 qipwcc; + __be32 qipzcc; + __be32 qimr; + __be32 qrimr; + __be32 qicnr; + u8 res0[0x4]; + __be32 qiprta; + __be32 qiprtb; + u8 res1[0x4]; + __be32 qricr; + u8 res2[0x20]; + __be32 qhivec; + u8 res3[0x1C]; +} __attribute__ ((packed)); + +/* Communications Processor */ +struct cp_qe { + __be32 cecr; /* QE command register */ + __be32 ceccr; /* QE controller configuration register */ + __be32 cecdr; /* QE command data register */ + u8 res0[0xA]; + __be16 ceter; /* QE timer event register */ + u8 res1[0x2]; + __be16 cetmr; /* QE timers mask register */ + __be32 cetscr; /* QE time-stamp timer control register */ + __be32 cetsr1; /* QE time-stamp register 1 */ + __be32 cetsr2; /* QE time-stamp register 2 */ + u8 res2[0x8]; + __be32 cevter; /* QE virtual tasks event register */ + __be32 cevtmr; /* QE virtual tasks mask register */ + __be16 cercr; /* QE RAM control register */ + u8 res3[0x2]; + u8 res4[0x24]; + __be16 ceexe1; /* QE external request 1 event register */ + u8 res5[0x2]; + __be16 ceexm1; /* QE external request 1 mask register */ + u8 res6[0x2]; + __be16 ceexe2; /* QE external request 2 event register */ + u8 res7[0x2]; + __be16 ceexm2; /* QE external request 2 mask register */ + u8 res8[0x2]; + __be16 ceexe3; /* QE external request 3 event register */ + u8 res9[0x2]; + __be16 ceexm3; /* QE external request 3 mask register */ + u8 res10[0x2]; + __be16 ceexe4; /* QE external request 4 event register */ + u8 res11[0x2]; + __be16 ceexm4; /* QE external request 4 mask register */ + u8 res12[0x3A]; + __be32 ceurnr; /* QE microcode revision number register */ + u8 res13[0x244]; +} __attribute__ ((packed)); + +/* QE Multiplexer */ +struct qe_mux { + __be32 cmxgcr; /* CMX general clock route register */ + __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */ + __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */ + __be32 cmxsi1syr; /* CMX SI1 SYNC route register */ + __be32 cmxucr[4]; /* CMX UCCx clock route registers */ + __be32 cmxupcr; /* CMX UPC clock route register */ + u8 res0[0x1C]; +} __attribute__ ((packed)); + +/* QE Timers */ +struct qe_timers { + u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/ + u8 res0[0x3]; + u8 gtcfr2; /* Timer 3 and timer 4 global config register*/ + u8 res1[0xB]; + __be16 gtmdr1; /* Timer 1 mode register */ + __be16 gtmdr2; /* Timer 2 mode register */ + __be16 gtrfr1; /* Timer 1 reference register */ + __be16 gtrfr2; /* Timer 2 reference register */ + __be16 gtcpr1; /* Timer 1 capture register */ + __be16 gtcpr2; /* Timer 2 capture register */ + __be16 gtcnr1; /* Timer 1 counter */ + __be16 gtcnr2; /* Timer 2 counter */ + __be16 gtmdr3; /* Timer 3 mode register */ + __be16 gtmdr4; /* Timer 4 mode register */ + __be16 gtrfr3; /* Timer 3 reference register */ + __be16 gtrfr4; /* Timer 4 reference register */ + __be16 gtcpr3; /* Timer 3 capture register */ + __be16 gtcpr4; /* Timer 4 capture register */ + __be16 gtcnr3; /* Timer 3 counter */ + __be16 gtcnr4; /* Timer 4 counter */ + __be16 gtevr1; /* Timer 1 event register */ + __be16 gtevr2; /* Timer 2 event register */ + __be16 gtevr3; /* Timer 3 event register */ + __be16 gtevr4; /* Timer 4 event register */ + __be16 gtps; /* Timer 1 prescale register */ + u8 res2[0x46]; +} __attribute__ ((packed)); + +/* BRG */ +struct qe_brg { + __be32 brgc[16]; /* BRG configuration registers */ + u8 res0[0x40]; +} __attribute__ ((packed)); + +/* SPI */ +struct spi { + u8 res0[0x20]; + __be32 spmode; /* SPI mode register */ + u8 res1[0x2]; + u8 spie; /* SPI event register */ + u8 res2[0x1]; + u8 res3[0x2]; + u8 spim; /* SPI mask register */ + u8 res4[0x1]; + u8 res5[0x1]; + u8 spcom; /* SPI command register */ + u8 res6[0x2]; + __be32 spitd; /* SPI transmit data register (cpu mode) */ + __be32 spird; /* SPI receive data register (cpu mode) */ + u8 res7[0x8]; +} __attribute__ ((packed)); + +/* SI */ +struct si1 { + __be16 siamr1; /* SI1 TDMA mode register */ + __be16 sibmr1; /* SI1 TDMB mode register */ + __be16 sicmr1; /* SI1 TDMC mode register */ + __be16 sidmr1; /* SI1 TDMD mode register */ + u8 siglmr1_h; /* SI1 global mode register high */ + u8 res0[0x1]; + u8 sicmdr1_h; /* SI1 command register high */ + u8 res2[0x1]; + u8 sistr1_h; /* SI1 status register high */ + u8 res3[0x1]; + __be16 sirsr1_h; /* SI1 RAM shadow address register high */ + u8 sitarc1; /* SI1 RAM counter Tx TDMA */ + u8 sitbrc1; /* SI1 RAM counter Tx TDMB */ + u8 sitcrc1; /* SI1 RAM counter Tx TDMC */ + u8 sitdrc1; /* SI1 RAM counter Tx TDMD */ + u8 sirarc1; /* SI1 RAM counter Rx TDMA */ + u8 sirbrc1; /* SI1 RAM counter Rx TDMB */ + u8 sircrc1; /* SI1 RAM counter Rx TDMC */ + u8 sirdrc1; /* SI1 RAM counter Rx TDMD */ + u8 res4[0x8]; + __be16 siemr1; /* SI1 TDME mode register 16 bits */ + __be16 sifmr1; /* SI1 TDMF mode register 16 bits */ + __be16 sigmr1; /* SI1 TDMG mode register 16 bits */ + __be16 sihmr1; /* SI1 TDMH mode register 16 bits */ + u8 siglmg1_l; /* SI1 global mode register low 8 bits */ + u8 res5[0x1]; + u8 sicmdr1_l; /* SI1 command register low 8 bits */ + u8 res6[0x1]; + u8 sistr1_l; /* SI1 status register low 8 bits */ + u8 res7[0x1]; + __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/ + u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */ + u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */ + u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */ + u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */ + u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */ + u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */ + u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */ + u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */ + u8 res8[0x8]; + __be32 siml1; /* SI1 multiframe limit register */ + u8 siedm1; /* SI1 extended diagnostic mode register */ + u8 res9[0xBB]; +} __attribute__ ((packed)); + +/* SI Routing Tables */ +struct sir { + u8 tx[0x400]; + u8 rx[0x400]; + u8 res0[0x800]; +} __attribute__ ((packed)); + +/* USB Controller */ +struct qe_usb_ctlr { + u8 usb_usmod; + u8 usb_usadr; + u8 usb_uscom; + u8 res1[1]; + __be16 usb_usep[4]; + u8 res2[4]; + __be16 usb_usber; + u8 res3[2]; + __be16 usb_usbmr; + u8 res4[1]; + u8 usb_usbs; + __be16 usb_ussft; + u8 res5[2]; + __be16 usb_usfrn; + u8 res6[0x22]; +} __attribute__ ((packed)); + +/* MCC */ +struct qe_mcc { + __be32 mcce; /* MCC event register */ + __be32 mccm; /* MCC mask register */ + __be32 mccf; /* MCC configuration register */ + __be32 merl; /* MCC emergency request level register */ + u8 res0[0xF0]; +} __attribute__ ((packed)); + +/* QE UCC Slow */ +struct ucc_slow { + __be32 gumr_l; /* UCCx general mode register (low) */ + __be32 gumr_h; /* UCCx general mode register (high) */ + __be16 upsmr; /* UCCx protocol-specific mode register */ + u8 res0[0x2]; + __be16 utodr; /* UCCx transmit on demand register */ + __be16 udsr; /* UCCx data synchronization register */ + __be16 ucce; /* UCCx event register */ + u8 res1[0x2]; + __be16 uccm; /* UCCx mask register */ + u8 res2[0x1]; + u8 uccs; /* UCCx status register */ + u8 res3[0x24]; + __be16 utpt; + u8 res4[0x52]; + u8 guemr; /* UCC general extended mode register */ +} __attribute__ ((packed)); + +/* QE UCC Fast */ +struct ucc_fast { + __be32 gumr; /* UCCx general mode register */ + __be32 upsmr; /* UCCx protocol-specific mode register */ + __be16 utodr; /* UCCx transmit on demand register */ + u8 res0[0x2]; + __be16 udsr; /* UCCx data synchronization register */ + u8 res1[0x2]; + __be32 ucce; /* UCCx event register */ + __be32 uccm; /* UCCx mask register */ + u8 uccs; /* UCCx status register */ + u8 res2[0x7]; + __be32 urfb; /* UCC receive FIFO base */ + __be16 urfs; /* UCC receive FIFO size */ + u8 res3[0x2]; + __be16 urfet; /* UCC receive FIFO emergency threshold */ + __be16 urfset; /* UCC receive FIFO special emergency + threshold */ + __be32 utfb; /* UCC transmit FIFO base */ + __be16 utfs; /* UCC transmit FIFO size */ + u8 res4[0x2]; + __be16 utfet; /* UCC transmit FIFO emergency threshold */ + u8 res5[0x2]; + __be16 utftt; /* UCC transmit FIFO transmit threshold */ + u8 res6[0x2]; + __be16 utpt; /* UCC transmit polling timer */ + u8 res7[0x2]; + __be32 urtry; /* UCC retry counter register */ + u8 res8[0x4C]; + u8 guemr; /* UCC general extended mode register */ +} __attribute__ ((packed)); + +struct ucc { + union { + struct ucc_slow slow; + struct ucc_fast fast; + u8 res[0x200]; /* UCC blocks are 512 bytes each */ + }; +} __attribute__ ((packed)); + +/* MultiPHY UTOPIA POS Controllers (UPC) */ +struct upc { + __be32 upgcr; /* UTOPIA/POS general configuration register */ + __be32 uplpa; /* UTOPIA/POS last PHY address */ + __be32 uphec; /* ATM HEC register */ + __be32 upuc; /* UTOPIA/POS UCC configuration */ + __be32 updc1; /* UTOPIA/POS device 1 configuration */ + __be32 updc2; /* UTOPIA/POS device 2 configuration */ + __be32 updc3; /* UTOPIA/POS device 3 configuration */ + __be32 updc4; /* UTOPIA/POS device 4 configuration */ + __be32 upstpa; /* UTOPIA/POS STPA threshold */ + u8 res0[0xC]; + __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */ + __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */ + __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */ + __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */ + __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */ + __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */ + __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */ + __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */ + __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */ + __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */ + __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */ + __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */ + __be32 upde1; /* UTOPIA/POS device 1 event */ + __be32 upde2; /* UTOPIA/POS device 2 event */ + __be32 upde3; /* UTOPIA/POS device 3 event */ + __be32 upde4; /* UTOPIA/POS device 4 event */ + __be16 uprp1; + __be16 uprp2; + __be16 uprp3; + __be16 uprp4; + u8 res1[0x8]; + __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */ + __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */ + __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */ + __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */ + __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */ + __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */ + __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */ + __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */ + __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */ + __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */ + __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */ + __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */ + __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */ + __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */ + __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */ + __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */ + __be32 uper1; /* Device 1 port enable register */ + __be32 uper2; /* Device 2 port enable register */ + __be32 uper3; /* Device 3 port enable register */ + __be32 uper4; /* Device 4 port enable register */ + u8 res2[0x150]; +} __attribute__ ((packed)); + +/* SDMA */ +struct sdma { + __be32 sdsr; /* Serial DMA status register */ + __be32 sdmr; /* Serial DMA mode register */ + __be32 sdtr1; /* SDMA system bus threshold register */ + __be32 sdtr2; /* SDMA secondary bus threshold register */ + __be32 sdhy1; /* SDMA system bus hysteresis register */ + __be32 sdhy2; /* SDMA secondary bus hysteresis register */ + __be32 sdta1; /* SDMA system bus address register */ + __be32 sdta2; /* SDMA secondary bus address register */ + __be32 sdtm1; /* SDMA system bus MSNUM register */ + __be32 sdtm2; /* SDMA secondary bus MSNUM register */ + u8 res0[0x10]; + __be32 sdaqr; /* SDMA address bus qualify register */ + __be32 sdaqmr; /* SDMA address bus qualify mask register */ + u8 res1[0x4]; + __be32 sdebcr; /* SDMA CAM entries base register */ + u8 res2[0x38]; +} __attribute__ ((packed)); + +/* Debug Space */ +struct dbg { + __be32 bpdcr; /* Breakpoint debug command register */ + __be32 bpdsr; /* Breakpoint debug status register */ + __be32 bpdmr; /* Breakpoint debug mask register */ + __be32 bprmrr0; /* Breakpoint request mode risc register 0 */ + __be32 bprmrr1; /* Breakpoint request mode risc register 1 */ + u8 res0[0x8]; + __be32 bprmtr0; /* Breakpoint request mode trb register 0 */ + __be32 bprmtr1; /* Breakpoint request mode trb register 1 */ + u8 res1[0x8]; + __be32 bprmir; /* Breakpoint request mode immediate register */ + __be32 bprmsr; /* Breakpoint request mode serial register */ + __be32 bpemr; /* Breakpoint exit mode register */ + u8 res2[0x48]; +} __attribute__ ((packed)); + +/* + * RISC Special Registers (Trap and Breakpoint). These are described in + * the QE Developer's Handbook. + */ +struct rsp { + __be32 tibcr[16]; /* Trap/instruction breakpoint control regs */ + u8 res0[64]; + __be32 ibcr0; + __be32 ibs0; + __be32 ibcnr0; + u8 res1[4]; + __be32 ibcr1; + __be32 ibs1; + __be32 ibcnr1; + __be32 npcr; + __be32 dbcr; + __be32 dbar; + __be32 dbamr; + __be32 dbsr; + __be32 dbcnr; + u8 res2[12]; + __be32 dbdr_h; + __be32 dbdr_l; + __be32 dbdmr_h; + __be32 dbdmr_l; + __be32 bsr; + __be32 bor; + __be32 bior; + u8 res3[4]; + __be32 iatr[4]; + __be32 eccr; /* Exception control configuration register */ + __be32 eicr; + u8 res4[0x100-0xf8]; +} __attribute__ ((packed)); + +struct qe_immap { + struct qe_iram iram; /* I-RAM */ + struct qe_ic_regs ic; /* Interrupt Controller */ + struct cp_qe cp; /* Communications Processor */ + struct qe_mux qmx; /* QE Multiplexer */ + struct qe_timers qet; /* QE Timers */ + struct spi spi[0x2]; /* spi */ + struct qe_mcc mcc; /* mcc */ + struct qe_brg brg; /* brg */ + struct qe_usb_ctlr usb; /* USB */ + struct si1 si1; /* SI */ + u8 res11[0x800]; + struct sir sir; /* SI Routing Tables */ + struct ucc ucc1; /* ucc1 */ + struct ucc ucc3; /* ucc3 */ + struct ucc ucc5; /* ucc5 */ + struct ucc ucc7; /* ucc7 */ + u8 res12[0x600]; + struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/ + struct ucc ucc2; /* ucc2 */ + struct ucc ucc4; /* ucc4 */ + struct ucc ucc6; /* ucc6 */ + struct ucc ucc8; /* ucc8 */ + u8 res13[0x600]; + struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/ + struct sdma sdma; /* SDMA */ + struct dbg dbg; /* 0x104080 - 0x1040FF + Debug Space */ + struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF + RISC Special Registers + (Trap and Breakpoint) */ + u8 res14[0x300]; /* 0x104300 - 0x1045FF */ + u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */ + u8 res16[0x8000]; /* 0x108000 - 0x110000 */ + u8 muram[0xC000]; /* 0x110000 - 0x11C000 + Multi-user RAM */ + u8 res17[0x24000]; /* 0x11C000 - 0x140000 */ + u8 res18[0xC0000]; /* 0x140000 - 0x200000 */ +} __attribute__ ((packed)); + +extern struct qe_immap __iomem *qe_immr; +extern phys_addr_t get_qe_base(void); + +/* + * Returns the offset within the QE address space of the given pointer. + * + * Note that the QE does not support 36-bit physical addresses, so if + * get_qe_base() returns a number above 4GB, the caller will probably fail. + */ +static inline phys_addr_t immrbar_virt_to_phys(void *address) +{ + void *q = (void *)qe_immr; + + /* Is it a MURAM address? */ + if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) + return get_qe_base() + (address - q); + + /* It's an address returned by kmalloc */ + return virt_to_phys(address); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_IMMAP_QE_H */ diff --git a/arch/powerpc/include/asm/io-defs.h b/arch/powerpc/include/asm/io-defs.h new file mode 100644 index 00000000000..44d7927aec6 --- /dev/null +++ b/arch/powerpc/include/asm/io-defs.h @@ -0,0 +1,60 @@ +/* This file is meant to be include multiple times by other headers */ +/* last 2 argments are used by platforms/cell/io-workarounds.[ch] */ + +DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr), mem, addr) +DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr) +DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr) +DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr) +DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr) + +#ifdef __powerpc64__ +DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr), mem, addr) +DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr) +DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr) +#endif /* __powerpc64__ */ + +DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port), pio, port) +DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port), pio, port) +DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port), pio, port) +DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port), pio, port) +DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port), pio, port) +DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port), pio, port) + +DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c), + (a, b, c), mem, a) +DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c), + (a, b, c), mem, a) +DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c), + (a, b, c), mem, a) +DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c), + (a, b, c), mem, a) +DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c), + (a, b, c), mem, a) +DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c), + (a, b, c), mem, a) + +DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), + (p, b, c), pio, p) +DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), + (p, b, c), pio, p) +DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), + (p, b, c), pio, p) +DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), + (p, b, c), pio, p) +DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), + (p, b, c), pio, p) +DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), + (p, b, c), pio, p) + +DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n), + (a, c, n), mem, a) +DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n), + (d, s, n), mem, s) +DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n), + (d, s, n), mem, d) diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h new file mode 100644 index 00000000000..fbae4928692 --- /dev/null +++ b/arch/powerpc/include/asm/io-workarounds.h @@ -0,0 +1,48 @@ +/* + * Support PCI IO workaround + * + * (C) Copyright 2007-2008 TOSHIBA CORPORATION + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _IO_WORKAROUNDS_H +#define _IO_WORKAROUNDS_H + +#include <linux/io.h> +#include <asm/pci-bridge.h> + +/* Bus info */ +struct iowa_bus { + struct pci_controller *phb; + struct ppc_pci_io *ops; + void *private; +}; + +void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, + int (*)(struct iowa_bus *, void *), void *); +struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); +struct iowa_bus *iowa_pio_find_bus(unsigned long); + +extern struct ppc_pci_io spiderpci_ops; +extern int spiderpci_iowa_init(struct iowa_bus *, void *); + +#define SPIDER_PCI_REG_BASE 0xd000 +#define SPIDER_PCI_REG_SIZE 0x1000 +#define SPIDER_PCI_VCI_CNTL_STAT 0x0110 +#define SPIDER_PCI_DUMMY_READ 0x0810 +#define SPIDER_PCI_DUMMY_READ_BASE 0x0814 + +#endif /* _IO_WORKAROUNDS_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h new file mode 100644 index 00000000000..a3855b81ead --- /dev/null +++ b/arch/powerpc/include/asm/io.h @@ -0,0 +1,795 @@ +#ifndef _ASM_POWERPC_IO_H +#define _ASM_POWERPC_IO_H +#ifdef __KERNEL__ + +#define ARCH_HAS_IOREMAP_WC + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* Check of existence of legacy devices */ +extern int check_legacy_ioport(unsigned long base_port); +#define I8042_DATA_REG 0x60 +#define FDC_BASE 0x3f0 +/* only relevant for PReP */ +#define _PIDXR 0x279 +#define _PNPWRP 0xa79 +#define PNPBIOS_BASE 0xf000 + +#include <linux/device.h> +#include <linux/io.h> + +#include <linux/compiler.h> +#include <asm/page.h> +#include <asm/byteorder.h> +#include <asm/synch.h> +#include <asm/delay.h> +#include <asm/mmu.h> + +#include <asm-generic/iomap.h> + +#ifdef CONFIG_PPC64 +#include <asm/paca.h> +#endif + +#define SIO_CONFIG_RA 0x398 +#define SIO_CONFIG_RD 0x399 + +#define SLOW_DOWN_IO + +/* 32 bits uses slightly different variables for the various IO + * bases. Most of this file only uses _IO_BASE though which we + * define properly based on the platform + */ +#ifndef CONFIG_PCI +#define _IO_BASE 0 +#define _ISA_MEM_BASE 0 +#define PCI_DRAM_OFFSET 0 +#elif defined(CONFIG_PPC32) +#define _IO_BASE isa_io_base +#define _ISA_MEM_BASE isa_mem_base +#define PCI_DRAM_OFFSET pci_dram_offset +#else +#define _IO_BASE pci_io_base +#define _ISA_MEM_BASE isa_mem_base +#define PCI_DRAM_OFFSET 0 +#endif + +extern unsigned long isa_io_base; +extern unsigned long pci_io_base; +extern unsigned long pci_dram_offset; + +extern resource_size_t isa_mem_base; + +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO) +#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits +#endif + +/* + * + * Low level MMIO accessors + * + * This provides the non-bus specific accessors to MMIO. Those are PowerPC + * specific and thus shouldn't be used in generic code. The accessors + * provided here are: + * + * in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64 + * out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64 + * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns + * + * Those operate directly on a kernel virtual address. Note that the prototype + * for the out_* accessors has the arguments in opposite order from the usual + * linux PCI accessors. Unlike those, they take the address first and the value + * next. + * + * Note: I might drop the _ns suffix on the stream operations soon as it is + * simply normal for stream operations to not swap in the first place. + * + */ + +#ifdef CONFIG_PPC64 +#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0) +#else +#define IO_SET_SYNC_FLAG() +#endif + +/* gcc 4.0 and older doesn't have 'Z' constraint */ +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) +#define DEF_MMIO_IN_LE(name, size, insn) \ +static inline u##size name(const volatile u##size __iomem *addr) \ +{ \ + u##size ret; \ + __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync" \ + : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \ + return ret; \ +} + +#define DEF_MMIO_OUT_LE(name, size, insn) \ +static inline void name(volatile u##size __iomem *addr, u##size val) \ +{ \ + __asm__ __volatile__("sync;"#insn" %1,0,%2" \ + : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \ + IO_SET_SYNC_FLAG(); \ +} +#else /* newer gcc */ +#define DEF_MMIO_IN_LE(name, size, insn) \ +static inline u##size name(const volatile u##size __iomem *addr) \ +{ \ + u##size ret; \ + __asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync" \ + : "=r" (ret) : "Z" (*addr) : "memory"); \ + return ret; \ +} + +#define DEF_MMIO_OUT_LE(name, size, insn) \ +static inline void name(volatile u##size __iomem *addr, u##size val) \ +{ \ + __asm__ __volatile__("sync;"#insn" %1,%y0" \ + : "=Z" (*addr) : "r" (val) : "memory"); \ + IO_SET_SYNC_FLAG(); \ +} +#endif + +#define DEF_MMIO_IN_BE(name, size, insn) \ +static inline u##size name(const volatile u##size __iomem *addr) \ +{ \ + u##size ret; \ + __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ + : "=r" (ret) : "m" (*addr) : "memory"); \ + return ret; \ +} + +#define DEF_MMIO_OUT_BE(name, size, insn) \ +static inline void name(volatile u##size __iomem *addr, u##size val) \ +{ \ + __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ + : "=m" (*addr) : "r" (val) : "memory"); \ + IO_SET_SYNC_FLAG(); \ +} + + +DEF_MMIO_IN_BE(in_8, 8, lbz); +DEF_MMIO_IN_BE(in_be16, 16, lhz); +DEF_MMIO_IN_BE(in_be32, 32, lwz); +DEF_MMIO_IN_LE(in_le16, 16, lhbrx); +DEF_MMIO_IN_LE(in_le32, 32, lwbrx); + +DEF_MMIO_OUT_BE(out_8, 8, stb); +DEF_MMIO_OUT_BE(out_be16, 16, sth); +DEF_MMIO_OUT_BE(out_be32, 32, stw); +DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); +DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); + +#ifdef __powerpc64__ +DEF_MMIO_OUT_BE(out_be64, 64, std); +DEF_MMIO_IN_BE(in_be64, 64, ld); + +/* There is no asm instructions for 64 bits reverse loads and stores */ +static inline u64 in_le64(const volatile u64 __iomem *addr) +{ + return swab64(in_be64(addr)); +} + +static inline void out_le64(volatile u64 __iomem *addr, u64 val) +{ + out_be64(addr, swab64(val)); +} +#endif /* __powerpc64__ */ + +/* + * Low level IO stream instructions are defined out of line for now + */ +extern void _insb(const volatile u8 __iomem *addr, void *buf, long count); +extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count); +extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count); +extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count); +extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count); +extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count); + +/* The _ns naming is historical and will be removed. For now, just #define + * the non _ns equivalent names + */ +#define _insw _insw_ns +#define _insl _insl_ns +#define _outsw _outsw_ns +#define _outsl _outsl_ns + + +/* + * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line + */ + +extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n); +extern void _memcpy_fromio(void *dest, const volatile void __iomem *src, + unsigned long n); +extern void _memcpy_toio(volatile void __iomem *dest, const void *src, + unsigned long n); + +/* + * + * PCI and standard ISA accessors + * + * Those are globally defined linux accessors for devices on PCI or ISA + * busses. They follow the Linux defined semantics. The current implementation + * for PowerPC is as close as possible to the x86 version of these, and thus + * provides fairly heavy weight barriers for the non-raw versions + * + * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO + * allowing the platform to provide its own implementation of some or all + * of the accessors. + */ + +/* + * Include the EEH definitions when EEH is enabled only so they don't get + * in the way when building for 32 bits + */ +#ifdef CONFIG_EEH +#include <asm/eeh.h> +#endif + +/* Shortcut to the MMIO argument pointer */ +#define PCI_IO_ADDR volatile void __iomem * + +/* Indirect IO address tokens: + * + * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks + * on all IOs. (Note that this is all 64 bits only for now) + * + * To help platforms who may need to differenciate MMIO addresses in + * their hooks, a bitfield is reserved for use by the platform near the + * top of MMIO addresses (not PIO, those have to cope the hard way). + * + * This bit field is 12 bits and is at the top of the IO virtual + * addresses PCI_IO_INDIRECT_TOKEN_MASK. + * + * The kernel virtual space is thus: + * + * 0xD000000000000000 : vmalloc + * 0xD000080000000000 : PCI PHB IO space + * 0xD000080080000000 : ioremap + * 0xD0000fffffffffff : end of ioremap region + * + * Since the top 4 bits are reserved as the region ID, we use thus + * the next 12 bits and keep 4 bits available for the future if the + * virtual address space is ever to be extended. + * + * The direct IO mapping operations will then mask off those bits + * before doing the actual access, though that only happen when + * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that + * mechanism + */ + +#ifdef CONFIG_PPC_INDIRECT_IO +#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul +#define PCI_IO_IND_TOKEN_SHIFT 48 +#define PCI_FIX_ADDR(addr) \ + ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) +#define PCI_GET_ADDR_TOKEN(addr) \ + (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \ + PCI_IO_IND_TOKEN_SHIFT) +#define PCI_SET_ADDR_TOKEN(addr, token) \ +do { \ + unsigned long __a = (unsigned long)(addr); \ + __a &= ~PCI_IO_IND_TOKEN_MASK; \ + __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \ + (addr) = (void __iomem *)__a; \ +} while(0) +#else +#define PCI_FIX_ADDR(addr) (addr) +#endif + + +/* + * Non ordered and non-swapping "raw" accessors + */ + +static inline unsigned char __raw_readb(const volatile void __iomem *addr) +{ + return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr); +} +static inline unsigned short __raw_readw(const volatile void __iomem *addr) +{ + return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr); +} +static inline unsigned int __raw_readl(const volatile void __iomem *addr) +{ + return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr); +} +static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) +{ + *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v; +} +static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) +{ + *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v; +} +static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) +{ + *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v; +} + +#ifdef __powerpc64__ +static inline unsigned long __raw_readq(const volatile void __iomem *addr) +{ + return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr); +} +static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) +{ + *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; +} +#endif /* __powerpc64__ */ + +/* + * + * PCI PIO and MMIO accessors. + * + * + * On 32 bits, PIO operations have a recovery mechanism in case they trigger + * machine checks (which they occasionally do when probing non existing + * IO ports on some platforms, like PowerMac and 8xx). + * I always found it to be of dubious reliability and I am tempted to get + * rid of it one of these days. So if you think it's important to keep it, + * please voice up asap. We never had it for 64 bits and I do not intend + * to port it over + */ + +#ifdef CONFIG_PPC32 + +#define __do_in_asm(name, op) \ +static inline unsigned int name(unsigned int port) \ +{ \ + unsigned int x; \ + __asm__ __volatile__( \ + "sync\n" \ + "0:" op " %0,0,%1\n" \ + "1: twi 0,%0,0\n" \ + "2: isync\n" \ + "3: nop\n" \ + "4:\n" \ + ".section .fixup,\"ax\"\n" \ + "5: li %0,-1\n" \ + " b 4b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 0b,5b\n" \ + " .long 1b,5b\n" \ + " .long 2b,5b\n" \ + " .long 3b,5b\n" \ + ".previous" \ + : "=&r" (x) \ + : "r" (port + _IO_BASE) \ + : "memory"); \ + return x; \ +} + +#define __do_out_asm(name, op) \ +static inline void name(unsigned int val, unsigned int port) \ +{ \ + __asm__ __volatile__( \ + "sync\n" \ + "0:" op " %0,0,%1\n" \ + "1: sync\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 0b,2b\n" \ + " .long 1b,2b\n" \ + ".previous" \ + : : "r" (val), "r" (port + _IO_BASE) \ + : "memory"); \ +} + +__do_in_asm(_rec_inb, "lbzx") +__do_in_asm(_rec_inw, "lhbrx") +__do_in_asm(_rec_inl, "lwbrx") +__do_out_asm(_rec_outb, "stbx") +__do_out_asm(_rec_outw, "sthbrx") +__do_out_asm(_rec_outl, "stwbrx") + +#endif /* CONFIG_PPC32 */ + +/* The "__do_*" operations below provide the actual "base" implementation + * for each of the defined accessors. Some of them use the out_* functions + * directly, some of them still use EEH, though we might change that in the + * future. Those macros below provide the necessary argument swapping and + * handling of the IO base for PIO. + * + * They are themselves used by the macros that define the actual accessors + * and can be used by the hooks if any. + * + * Note that PIO operations are always defined in terms of their corresonding + * MMIO operations. That allows platforms like iSeries who want to modify the + * behaviour of both to only hook on the MMIO version and get both. It's also + * possible to hook directly at the toplevel PIO operation if they have to + * be handled differently + */ +#define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val) +#define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val) +#define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val) +#define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val) +#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val) +#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val) +#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val) + +#ifdef CONFIG_EEH +#define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr)) +#define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr)) +#define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr)) +#define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr)) +#define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr)) +#define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr)) +#define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr)) +#else /* CONFIG_EEH */ +#define __do_readb(addr) in_8(PCI_FIX_ADDR(addr)) +#define __do_readw(addr) in_le16(PCI_FIX_ADDR(addr)) +#define __do_readl(addr) in_le32(PCI_FIX_ADDR(addr)) +#define __do_readq(addr) in_le64(PCI_FIX_ADDR(addr)) +#define __do_readw_be(addr) in_be16(PCI_FIX_ADDR(addr)) +#define __do_readl_be(addr) in_be32(PCI_FIX_ADDR(addr)) +#define __do_readq_be(addr) in_be64(PCI_FIX_ADDR(addr)) +#endif /* !defined(CONFIG_EEH) */ + +#ifdef CONFIG_PPC32 +#define __do_outb(val, port) _rec_outb(val, port) +#define __do_outw(val, port) _rec_outw(val, port) +#define __do_outl(val, port) _rec_outl(val, port) +#define __do_inb(port) _rec_inb(port) +#define __do_inw(port) _rec_inw(port) +#define __do_inl(port) _rec_inl(port) +#else /* CONFIG_PPC32 */ +#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port); +#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port); +#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port); +#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port); +#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port); +#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port); +#endif /* !CONFIG_PPC32 */ + +#ifdef CONFIG_EEH +#define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n)) +#define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n)) +#define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n)) +#else /* CONFIG_EEH */ +#define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n)) +#define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n)) +#define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n)) +#endif /* !CONFIG_EEH */ +#define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n)) +#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n)) +#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n)) + +#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) +#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) +#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) +#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) +#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) +#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) + +#define __do_memset_io(addr, c, n) \ + _memset_io(PCI_FIX_ADDR(addr), c, n) +#define __do_memcpy_toio(dst, src, n) \ + _memcpy_toio(PCI_FIX_ADDR(dst), src, n) + +#ifdef CONFIG_EEH +#define __do_memcpy_fromio(dst, src, n) \ + eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n) +#else /* CONFIG_EEH */ +#define __do_memcpy_fromio(dst, src, n) \ + _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) +#endif /* !CONFIG_EEH */ + +#ifdef CONFIG_PPC_INDIRECT_PIO +#define DEF_PCI_HOOK_pio(x) x +#else +#define DEF_PCI_HOOK_pio(x) NULL +#endif + +#ifdef CONFIG_PPC_INDIRECT_MMIO +#define DEF_PCI_HOOK_mem(x) x +#else +#define DEF_PCI_HOOK_mem(x) NULL +#endif + +/* Structure containing all the hooks */ +extern struct ppc_pci_io { + +#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) ret (*name) at; +#define DEF_PCI_AC_NORET(name, at, al, space, aa) void (*name) at; + +#include <asm/io-defs.h> + +#undef DEF_PCI_AC_RET +#undef DEF_PCI_AC_NORET + +} ppc_pci_io; + +/* The inline wrappers */ +#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ +static inline ret name at \ +{ \ + if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ + return ppc_pci_io.name al; \ + return __do_##name al; \ +} + +#define DEF_PCI_AC_NORET(name, at, al, space, aa) \ +static inline void name at \ +{ \ + if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ + ppc_pci_io.name al; \ + else \ + __do_##name al; \ +} + +#include <asm/io-defs.h> + +#undef DEF_PCI_AC_RET +#undef DEF_PCI_AC_NORET + +/* Some drivers check for the presence of readq & writeq with + * a #ifdef, so we make them happy here. + */ +#ifdef __powerpc64__ +#define readq readq +#define writeq writeq +#endif + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +/* + * We don't do relaxed operations yet, at least not with this semantic + */ +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) +#define readq_relaxed(addr) readq(addr) + +#ifdef CONFIG_PPC32 +#define mmiowb() +#else +/* + * Enforce synchronisation of stores vs. spin_unlock + * (this does it explicitly, though our implementation of spin_unlock + * does it implicitely too) + */ +static inline void mmiowb(void) +{ + unsigned long tmp; + + __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)" + : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync)) + : "memory"); +} +#endif /* !CONFIG_PPC32 */ + +static inline void iosync(void) +{ + __asm__ __volatile__ ("sync" : : : "memory"); +} + +/* Enforce in-order execution of data I/O. + * No distinction between read/write on PPC; use eieio for all three. + * Those are fairly week though. They don't provide a barrier between + * MMIO and cacheable storage nor do they provide a barrier vs. locks, + * they only provide barriers between 2 __raw MMIO operations and + * possibly break write combining. + */ +#define iobarrier_rw() eieio() +#define iobarrier_r() eieio() +#define iobarrier_w() eieio() + + +/* + * output pause versions need a delay at least for the + * w83c105 ide controller in a p610. + */ +#define inb_p(port) inb(port) +#define outb_p(val, port) (udelay(1), outb((val), (port))) +#define inw_p(port) inw(port) +#define outw_p(val, port) (udelay(1), outw((val), (port))) +#define inl_p(port) inl(port) +#define outl_p(val, port) (udelay(1), outl((val), (port))) + + +#define IO_SPACE_LIMIT ~(0UL) + + +/** + * ioremap - map bus memory into CPU space + * @address: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * We provide a few variations of it: + * + * * ioremap is the standard one and provides non-cacheable guarded mappings + * and can be hooked by the platform via ppc_md + * + * * ioremap_prot allows to specify the page flags as an argument and can + * also be hooked by the platform via ppc_md. + * + * * ioremap_nocache is identical to ioremap + * + * * ioremap_wc enables write combining + * + * * iounmap undoes such a mapping and can be hooked + * + * * __ioremap_at (and the pending __iounmap_at) are low level functions to + * create hand-made mappings for use only by the PCI code and cannot + * currently be hooked. Must be page aligned. + * + * * __ioremap is the low level implementation used by ioremap and + * ioremap_prot and cannot be hooked (but can be used by a hook on one + * of the previous ones) + * + * * __ioremap_caller is the same as above but takes an explicit caller + * reference rather than using __builtin_return_address(0) + * + * * __iounmap, is the low level implementation used by iounmap and cannot + * be hooked (but can be used by a hook on iounmap) + * + */ +extern void __iomem *ioremap(phys_addr_t address, unsigned long size); +extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, + unsigned long flags); +extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); +#define ioremap_nocache(addr, size) ioremap((addr), (size)) + +extern void iounmap(volatile void __iomem *addr); + +extern void __iomem *__ioremap(phys_addr_t, unsigned long size, + unsigned long flags); +extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, + unsigned long flags, void *caller); + +extern void __iounmap(volatile void __iomem *addr); + +extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, + unsigned long size, unsigned long flags); +extern void __iounmap_at(void *ea, unsigned long size); + +/* + * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation + * which needs some additional definitions here. They basically allow PIO + * space overall to be 1GB. This will work as long as we never try to use + * iomap to map MMIO below 1GB which should be fine on ppc64 + */ +#define HAVE_ARCH_PIO_SIZE 1 +#define PIO_OFFSET 0x00000000UL +#define PIO_MASK (FULL_IO_SIZE - 1) +#define PIO_RESERVED (FULL_IO_SIZE) + +#define mmio_read16be(addr) readw_be(addr) +#define mmio_read32be(addr) readl_be(addr) +#define mmio_write16be(val, addr) writew_be(val, addr) +#define mmio_write32be(val, addr) writel_be(val, addr) +#define mmio_insb(addr, dst, count) readsb(addr, dst, count) +#define mmio_insw(addr, dst, count) readsw(addr, dst, count) +#define mmio_insl(addr, dst, count) readsl(addr, dst, count) +#define mmio_outsb(addr, src, count) writesb(addr, src, count) +#define mmio_outsw(addr, src, count) writesw(addr, src, count) +#define mmio_outsl(addr, src, count) writesl(addr, src, count) + +/** + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline unsigned long virt_to_phys(volatile void * address) +{ + return __pa((unsigned long)address); +} + +/** + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline void * phys_to_virt(unsigned long address) +{ + return (void *)__va(address); +} + +/* + * Change "struct page" to physical address. + */ +#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) + +/* + * 32 bits still uses virt_to_bus() for it's implementation of DMA + * mappings se we have to keep it defined here. We also have some old + * drivers (shame shame shame) that use bus_to_virt() and haven't been + * fixed yet so I need to define it here. + */ +#ifdef CONFIG_PPC32 + +static inline unsigned long virt_to_bus(volatile void * address) +{ + if (address == NULL) + return 0; + return __pa(address) + PCI_DRAM_OFFSET; +} |