diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/Kconfig | 43 | ||||
-rw-r--r-- | arch/sh/include/asm/cpu_sh4.h | 25 | ||||
-rw-r--r-- | arch/sh/include/asm/system.h | 23 | ||||
-rw-r--r-- | arch/sh/include/asm/unaligned-sh4a.h | 258 | ||||
-rw-r--r-- | arch/sh/include/asm/unaligned.h | 7 |
5 files changed, 1 insertions, 355 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 91002a9be0..7836869c55 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -4,51 +4,14 @@ menu "SuperH architecture" config CPU_SH4 bool -config CPU_SH4A - bool - select CPU_SH4 - -config SH_32BIT - bool "32bit mode" - depends on CPU_SH4A - default n - help - SH4A has 2 physical memory maps. This use 32bit mode. - And this is board specific. Please check your board if you - want to use this. - choice prompt "Target select" optional -config TARGET_MIGOR - bool "Migo-R" - select CPU_SH4 - config TARGET_R2DPLUS bool "Renesas R2D-PLUS" select CPU_SH4 -config TARGET_R7780MP - bool "R7780MP board" - select CPU_SH4A - -config TARGET_SH7752EVB - bool "SH7752EVB" - select CPU_SH4A - -config TARGET_SH7753EVB - bool "SH7753EVB" - select CPU_SH4 - -config TARGET_SH7757LCR - bool "SH7757LCR" - select CPU_SH4A - -config TARGET_SH7763RDP - bool "SH7763RDP" - select CPU_SH4 - endchoice config SYS_ARCH @@ -59,12 +22,6 @@ config SYS_CPU source "arch/sh/lib/Kconfig" -source "board/renesas/MigoR/Kconfig" source "board/renesas/r2dplus/Kconfig" -source "board/renesas/r7780mp/Kconfig" -source "board/renesas/sh7752evb/Kconfig" -source "board/renesas/sh7753evb/Kconfig" -source "board/renesas/sh7757lcr/Kconfig" -source "board/renesas/sh7763rdp/Kconfig" endmenu diff --git a/arch/sh/include/asm/cpu_sh4.h b/arch/sh/include/asm/cpu_sh4.h index 5fc9c962d8..ed7c243b3b 100644 --- a/arch/sh/include/asm/cpu_sh4.h +++ b/arch/sh/include/asm/cpu_sh4.h @@ -46,29 +46,4 @@ # error "Unknown SH4 variant" #endif -#if defined(CONFIG_SH_32BIT) -#define PMB_ADDR_ARRAY 0xf6100000 -#define PMB_ADDR_ENTRY 8 -#define PMB_VPN 24 - -#define PMB_DATA_ARRAY 0xf7100000 -#define PMB_DATA_ENTRY 8 -#define PMB_PPN 24 -#define PMB_UB 9 /* Buffered write */ -#define PMB_V 8 /* Valid */ -#define PMB_SZ1 7 /* Page size (upper bit) */ -#define PMB_SZ0 4 /* Page size (lower bit) */ -#define PMB_C 3 /* Cacheability */ -#define PMB_WT 0 /* Write-through */ - -#define PMB_ADDR_BASE(entry) (PMB_ADDR_ARRAY + (entry << PMB_ADDR_ENTRY)) -#define PMB_DATA_BASE(entry) (PMB_DATA_ARRAY + (entry << PMB_DATA_ENTRY)) -#define mk_pmb_addr_val(vpn) ((vpn << PMB_VPN)) -#define mk_pmb_data_val(ppn, ub, v, sz1, sz0, c, wt) \ - ((ppn << PMB_PPN) | (ub << PMB_UB) | \ - (v << PMB_V) | (sz1 << PMB_SZ1) | \ - (sz0 << PMB_SZ0) | (c << PMB_C) | \ - (wt << PMB_WT)) -#endif - #endif /* _ASM_CPU_SH4_H_ */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 24b5ce8e30..ccc79b3c91 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -70,18 +70,6 @@ static inline void sched_cacheflush(void) { } -#ifdef CONFIG_CPU_SH4A -#define __icbi() \ -{ \ - unsigned long __addr; \ - __addr = 0xa8000000; \ - __asm__ __volatile__( \ - "icbi %0\n\t" \ - : /* no output */ \ - : "m" (__m(__addr))); \ -} -#endif - static inline unsigned long tas(volatile int *m) { unsigned long retval; @@ -100,25 +88,14 @@ static inline unsigned long tas(volatile int *m) * effect. On newer cores (like the sh4a and sh5) this is accomplished * with icbi. * - * Also note that on sh4a in the icbi case we can forego a synco for the - * write barrier, as it's not necessary for control registers. - * * Historically we have only done this type of barrier for the MMUCR, but * it's also necessary for the CCR, so we make it generic here instead. */ -#ifdef CONFIG_CPU_SH4A -#define mb() __asm__ __volatile__ ("synco": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define ctrl_barrier() __icbi() -#define read_barrier_depends() do { } while(0) -#else #define mb() __asm__ __volatile__ ("": : :"memory") #define rmb() mb() #define wmb() __asm__ __volatile__ ("": : :"memory") #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") #define read_barrier_depends() do { } while(0) -#endif #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h deleted file mode 100644 index 9f4dd252c9..0000000000 --- a/arch/sh/include/asm/unaligned-sh4a.h +++ /dev/null @@ -1,258 +0,0 @@ -#ifndef __ASM_SH_UNALIGNED_SH4A_H -#define __ASM_SH_UNALIGNED_SH4A_H - -/* - * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. - * Support for 64-bit accesses are done through shifting and masking - * relative to the endianness. Unaligned stores are not supported by the - * instruction encoding, so these continue to use the packed - * struct. - * - * The same note as with the movli.l/movco.l pair applies here, as long - * as the load is gauranteed to be inlined, nothing else will hook in to - * r0 and we get the return value for free. - * - * NOTE: Due to the fact we require r0 encoding, care should be taken to - * avoid mixing these heavily with other r0 consumers, such as the atomic - * ops. Failure to adhere to this can result in the compiler running out - * of spill registers and blowing up when building at low optimization - * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. - */ -#include <linux/types.h> -#include <asm/byteorder.h> - -static __always_inline u32 __get_unaligned_cpu32(const u8 *p) -{ - unsigned long unaligned; - - __asm__ __volatile__ ( - "movua.l @%1, %0\n\t" - : "=z" (unaligned) - : "r" (p) - ); - - return unaligned; -} - -struct __una_u16 { u16 x __attribute__((packed)); }; -struct __una_u32 { u32 x __attribute__((packed)); }; -struct __una_u64 { u64 x __attribute__((packed)); }; - -static inline u16 __get_unaligned_cpu16(const u8 *p) -{ -#ifdef __LITTLE_ENDIAN - return p[0] | p[1] << 8; -#else - return p[0] << 8 | p[1]; -#endif -} - -/* - * Even though movua.l supports auto-increment on the read side, it can - * only store to r0 due to instruction encoding constraints, so just let - * the compiler sort it out on its own. - */ -static inline u64 __get_unaligned_cpu64(const u8 *p) -{ -#ifdef __LITTLE_ENDIAN - return (u64)__get_unaligned_cpu32(p + 4) << 32 | - __get_unaligned_cpu32(p); -#else - return (u64)__get_unaligned_cpu32(p) << 32 | - __get_unaligned_cpu32(p + 4); -#endif -} - -static inline u16 get_unaligned_le16(const void *p) -{ - return le16_to_cpu(__get_unaligned_cpu16(p)); -} - -static inline u32 get_unaligned_le32(const void *p) -{ - return le32_to_cpu(__get_unaligned_cpu32(p)); -} - -static inline u64 get_unaligned_le64(const void *p) -{ - return le64_to_cpu(__get_unaligned_cpu64(p)); -} - -static inline u16 get_unaligned_be16(const void *p) -{ - return be16_to_cpu(__get_unaligned_cpu16(p)); -} - -static inline u32 get_unaligned_be32(const void *p) -{ - return be32_to_cpu(__get_unaligned_cpu32(p)); -} - -static inline u64 get_unaligned_be64(const void *p) -{ - return be64_to_cpu(__get_unaligned_cpu64(p)); -} - -static inline void __put_le16_noalign(u8 *p, u16 val) -{ - *p++ = val; - *p++ = val >> 8; -} - -static inline void __put_le32_noalign(u8 *p, u32 val) -{ - __put_le16_noalign(p, val); - __put_le16_noalign(p + 2, val >> 16); -} - -static inline void __put_le64_noalign(u8 *p, u64 val) -{ - __put_le32_noalign(p, val); - __put_le32_noalign(p + 4, val >> 32); -} - -static inline void __put_be16_noalign(u8 *p, u16 val) -{ - *p++ = val >> 8; - *p++ = val; -} - -static inline void __put_be32_noalign(u8 *p, u32 val) -{ - __put_be16_noalign(p, val >> 16); - __put_be16_noalign(p + 2, val); -} - -static inline void __put_be64_noalign(u8 *p, u64 val) -{ - __put_be32_noalign(p, val >> 32); - __put_be32_noalign(p + 4, val); -} - -static inline void put_unaligned_le16(u16 val, void *p) -{ -#ifdef __LITTLE_ENDIAN - ((struct __una_u16 *)p)->x = val; -#else - __put_le16_noalign(p, val); -#endif -} - -static inline void put_unaligned_le32(u32 val, void *p) -{ -#ifdef __LITTLE_ENDIAN - ((struct __una_u32 *)p)->x = val; -#else - __put_le32_noalign(p, val); -#endif -} - -static inline void put_unaligned_le64(u64 val, void *p) -{ -#ifdef __LITTLE_ENDIAN - ((struct __una_u64 *)p)->x = val; -#else - __put_le64_noalign(p, val); -#endif -} - -static inline void put_unaligned_be16(u16 val, void *p) -{ -#ifdef __BIG_ENDIAN - ((struct __una_u16 *)p)->x = val; -#else - __put_be16_noalign(p, val); -#endif -} - -static inline void put_unaligned_be32(u32 val, void *p) -{ -#ifdef __BIG_ENDIAN - ((struct __una_u32 *)p)->x = val; -#else - __put_be32_noalign(p, val); -#endif -} - -static inline void put_unaligned_be64(u64 val, void *p) -{ -#ifdef __BIG_ENDIAN - ((struct __una_u64 *)p)->x = val; -#else - __put_be64_noalign(p, val); -#endif -} - -/* - * Cause a link-time error if we try an unaligned access other than - * 1,2,4 or 8 bytes long - */ -extern void __bad_unaligned_access_size(void); - -#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ - __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ - __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ - __bad_unaligned_access_size())))); \ - })) - -#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ - __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ - __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ - __bad_unaligned_access_size())))); \ - })) - -#define __put_unaligned_le(val, ptr) ({ \ - void *__gu_p = (ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(u8 *)__gu_p = (__force u8)(val); \ - break; \ - case 2: \ - put_unaligned_le16((__force u16)(val), __gu_p); \ - break; \ - case 4: \ - put_unaligned_le32((__force u32)(val), __gu_p); \ - break; \ - case 8: \ - put_unaligned_le64((__force u64)(val), __gu_p); \ - break; \ - default: \ - __bad_unaligned_access_size(); \ - break; \ - } \ - (void)0; }) - -#define __put_unaligned_be(val, ptr) ({ \ - void *__gu_p = (ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(u8 *)__gu_p = (__force u8)(val); \ - break; \ - case 2: \ - put_unaligned_be16((__force u16)(val), __gu_p); \ - break; \ - case 4: \ - put_unaligned_be32((__force u32)(val), __gu_p); \ - break; \ - case 8: \ - put_unaligned_be64((__force u64)(val), __gu_p); \ - break; \ - default: \ - __bad_unaligned_access_size(); \ - break; \ - } \ - (void)0; }) - -#ifdef __LITTLE_ENDIAN -# define get_unaligned __get_unaligned_le -# define put_unaligned __put_unaligned_le -#else -# define get_unaligned __get_unaligned_be -# define put_unaligned __put_unaligned_be -#endif - -#endif /* __ASM_SH_UNALIGNED_SH4A_H */ diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h index 06096eeac5..5acf081962 100644 --- a/arch/sh/include/asm/unaligned.h +++ b/arch/sh/include/asm/unaligned.h @@ -3,11 +3,7 @@ /* Copy from linux-kernel. */ -#ifdef CONFIG_CPU_SH4A -/* SH-4A can handle unaligned loads in a relatively neutered fashion. */ -#include <asm/unaligned-sh4a.h> -#else -/* Otherwise, SH can't handle unaligned accesses. */ +/* Other than SH4A, SH can't handle unaligned accesses. */ #include <linux/compiler.h> #if defined(__BIG_ENDIAN__) #define get_unaligned __get_unaligned_be @@ -20,6 +16,5 @@ #include <linux/unaligned/le_byteshift.h> #include <linux/unaligned/be_byteshift.h> #include <linux/unaligned/generic.h> -#endif #endif /* _ASM_SH_UNALIGNED_H */ |